Merge tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping
[sfrench/cifs-2.6.git] / arch / x86 / mm / mem_encrypt.c
index e94e0a62ba92d6fb6bce253374d7a0c283affd22..fece30ca8b0cb9fc58d6bb65f2c5a2bc5f4b2b03 100644 (file)
 #include <linux/dma-direct.h>
 #include <linux/swiotlb.h>
 #include <linux/mem_encrypt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -348,6 +352,32 @@ bool sev_active(void)
 }
 EXPORT_SYMBOL(sev_active);
 
+/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
+bool force_dma_unencrypted(struct device *dev)
+{
+       /*
+        * For SEV, all DMA must be to unencrypted addresses.
+        */
+       if (sev_active())
+               return true;
+
+       /*
+        * For SME, all DMA must be to unencrypted addresses if the
+        * device does not support DMA to addresses that include the
+        * encryption mask.
+        */
+       if (sme_active()) {
+               u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
+               u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
+                                               dev->bus_dma_mask);
+
+               if (dma_dev_mask <= dma_enc_mask)
+                       return true;
+       }
+
+       return false;
+}
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_free_decrypted_mem(void)
 {