Merge tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping
[sfrench/cifs-2.6.git] / arch / x86 / mm / mem_encrypt.c
index e0df96fdfe46c70e1a83e78b4d7056cd0f61ad34..fece30ca8b0cb9fc58d6bb65f2c5a2bc5f4b2b03 100644 (file)
 #include <linux/dma-direct.h>
 #include <linux/swiotlb.h>
 #include <linux/mem_encrypt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -41,7 +45,7 @@ EXPORT_SYMBOL_GPL(sev_enable_key);
 bool sev_enabled __section(.data);
 
 /* Buffer used for early in-place encryption by BSP, no locking needed */
-static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
+static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
 
 /*
  * This routine does not change the underlying encryption setting of the
@@ -348,6 +352,32 @@ bool sev_active(void)
 }
 EXPORT_SYMBOL(sev_active);
 
+/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
+bool force_dma_unencrypted(struct device *dev)
+{
+       /*
+        * For SEV, all DMA must be to unencrypted addresses.
+        */
+       if (sev_active())
+               return true;
+
+       /*
+        * For SME, all DMA must be to unencrypted addresses if the
+        * device does not support DMA to addresses that include the
+        * encryption mask.
+        */
+       if (sme_active()) {
+               u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
+               u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
+                                               dev->bus_dma_mask);
+
+               if (dma_dev_mask <= dma_enc_mask)
+                       return true;
+       }
+
+       return false;
+}
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_free_decrypted_mem(void)
 {