x86/mm: Add DMA support for SEV memory encryption
authorTom Lendacky <thomas.lendacky@amd.com>
Fri, 20 Oct 2017 14:30:53 +0000 (09:30 -0500)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 7 Nov 2017 14:35:58 +0000 (15:35 +0100)
DMA access to encrypted memory cannot be performed when SEV is active.
In order for DMA to properly work when SEV is active, the SWIOTLB bounce
buffers must be used.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>C
Tested-by: Borislav Petkov <bp@suse.de>
Cc: kvm@vger.kernel.org
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Link: https://lkml.kernel.org/r/20171020143059.3291-12-brijesh.singh@amd.com
arch/x86/mm/mem_encrypt.c
lib/swiotlb.c

index add836d3d174de3a273fa7e29c88ad96f5a6e065..e8bfad75ba29f98bbb2734ffcdba2c002361cbaa 100644 (file)
@@ -192,6 +192,70 @@ void __init sme_early_init(void)
        /* Update the protection map with memory encryption mask */
        for (i = 0; i < ARRAY_SIZE(protection_map); i++)
                protection_map[i] = pgprot_encrypted(protection_map[i]);
+
+       if (sev_active())
+               swiotlb_force = SWIOTLB_FORCE;
+}
+
+static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+                      gfp_t gfp, unsigned long attrs)
+{
+       unsigned long dma_mask;
+       unsigned int order;
+       struct page *page;
+       void *vaddr = NULL;
+
+       dma_mask = dma_alloc_coherent_mask(dev, gfp);
+       order = get_order(size);
+
+       /*
+        * Memory will be memset to zero after marking decrypted, so don't
+        * bother clearing it before.
+        */
+       gfp &= ~__GFP_ZERO;
+
+       page = alloc_pages_node(dev_to_node(dev), gfp, order);
+       if (page) {
+               dma_addr_t addr;
+
+               /*
+                * Since we will be clearing the encryption bit, check the
+                * mask with it already cleared.
+                */
+               addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
+               if ((addr + size) > dma_mask) {
+                       __free_pages(page, get_order(size));
+               } else {
+                       vaddr = page_address(page);
+                       *dma_handle = addr;
+               }
+       }
+
+       if (!vaddr)
+               vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+
+       if (!vaddr)
+               return NULL;
+
+       /* Clear the SME encryption bit for DMA use if not swiotlb area */
+       if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
+               set_memory_decrypted((unsigned long)vaddr, 1 << order);
+               memset(vaddr, 0, PAGE_SIZE << order);
+               *dma_handle = __sme_clr(*dma_handle);
+       }
+
+       return vaddr;
+}
+
+static void sev_free(struct device *dev, size_t size, void *vaddr,
+                    dma_addr_t dma_handle, unsigned long attrs)
+{
+       /* Set the SME encryption bit for re-use if not swiotlb area */
+       if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
+               set_memory_encrypted((unsigned long)vaddr,
+                                    1 << get_order(size));
+
+       swiotlb_free_coherent(dev, size, vaddr, dma_handle);
 }
 
 /*
@@ -218,6 +282,20 @@ bool sev_active(void)
 }
 EXPORT_SYMBOL_GPL(sev_active);
 
+static const struct dma_map_ops sev_dma_ops = {
+       .alloc                  = sev_alloc,
+       .free                   = sev_free,
+       .map_page               = swiotlb_map_page,
+       .unmap_page             = swiotlb_unmap_page,
+       .map_sg                 = swiotlb_map_sg_attrs,
+       .unmap_sg               = swiotlb_unmap_sg_attrs,
+       .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = swiotlb_sync_single_for_device,
+       .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device     = swiotlb_sync_sg_for_device,
+       .mapping_error          = swiotlb_dma_mapping_error,
+};
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void)
 {
@@ -227,6 +305,14 @@ void __init mem_encrypt_init(void)
        /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
        swiotlb_update_mem_attributes();
 
+       /*
+        * With SEV, DMA operations cannot use encryption. New DMA ops
+        * are required in order to mark the DMA areas as decrypted or
+        * to use bounce buffers.
+        */
+       if (sev_active())
+               dma_ops = &sev_dma_ops;
+
        pr_info("AMD Secure Memory Encryption (SME) active\n");
 }
 
index 8c6c83ef57a43336e0a33a52f691e3323eb8f3f4..cea19aaf303c9f3c558dd8246205ed39f38dcc46 100644 (file)
@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
        if (no_iotlb_memory)
                panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
 
-       if (sme_active())
-               pr_warn_once("SME is active and system is using DMA bounce buffers\n");
+       if (mem_encrypt_active())
+               pr_warn_once("%s is active and system is using DMA bounce buffers\n",
+                            sme_active() ? "SME" : "SEV");
 
        mask = dma_get_seg_boundary(hwdev);