iommu/amd: Create a list of reserved iova addresses
authorJoerg Roedel <jroedel@suse.de>
Thu, 7 Jul 2016 16:01:10 +0000 (18:01 +0200)
committerJoerg Roedel <jroedel@suse.de>
Wed, 13 Jul 2016 10:46:05 +0000 (12:46 +0200)
Put the MSI-range, the HT-range and the MMIO ranges of PCI
devices into that range, so that these addresses are not
allocated for DMA.

Copy this address list into every created dma_ops_domain.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd_iommu.c

index e7825b25c62fbcceaee43264ad39e2e2d36b8a72..1bb59ae2d586ad1ea6c55d62ac0e52443880bdab 100644 (file)
 #define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
 #define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
 
+/* Reserved IOVA ranges */
+#define MSI_RANGE_START                (0xfee00000)
+#define MSI_RANGE_END          (0xfeefffff)
+#define HT_RANGE_START         (0xfd00000000ULL)
+#define HT_RANGE_END           (0xffffffffffULL)
+
 /*
  * This bitmap is used to advertise the page sizes our hardware support
  * to the IOMMU core, which will then use this information to split
@@ -169,6 +175,9 @@ struct dma_ops_domain {
        struct iova_domain iovad;
 };
 
+static struct iova_domain reserved_iova_ranges;
+static struct lock_class_key reserved_rbtree_key;
+
 /****************************************************************************
  *
  * Helper functions
@@ -2058,6 +2067,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
        init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
                         IOVA_START_PFN, DMA_32BIT_PFN);
 
+       /* Initialize reserved ranges */
+       copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
+
        return dma_dom;
 
 free_dma_dom:
@@ -2963,6 +2975,59 @@ static struct dma_map_ops amd_iommu_dma_ops = {
        .set_dma_mask   = set_dma_mask,
 };
 
+static int init_reserved_iova_ranges(void)
+{
+       struct pci_dev *pdev = NULL;
+       struct iova *val;
+
+       init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
+                        IOVA_START_PFN, DMA_32BIT_PFN);
+
+       lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
+                         &reserved_rbtree_key);
+
+       /* MSI memory range */
+       val = reserve_iova(&reserved_iova_ranges,
+                          IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
+       if (!val) {
+               pr_err("Reserving MSI range failed\n");
+               return -ENOMEM;
+       }
+
+       /* HT memory range */
+       val = reserve_iova(&reserved_iova_ranges,
+                          IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
+       if (!val) {
+               pr_err("Reserving HT range failed\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * Memory used for PCI resources
+        * FIXME: Check whether we can reserve the PCI-hole completly
+        */
+       for_each_pci_dev(pdev) {
+               int i;
+
+               for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
+                       struct resource *r = &pdev->resource[i];
+
+                       if (!(r->flags & IORESOURCE_MEM))
+                               continue;
+
+                       val = reserve_iova(&reserved_iova_ranges,
+                                          IOVA_PFN(r->start),
+                                          IOVA_PFN(r->end));
+                       if (!val) {
+                               pr_err("Reserve pci-resource range failed\n");
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 int __init amd_iommu_init_api(void)
 {
        int ret, err = 0;
@@ -2971,6 +3036,10 @@ int __init amd_iommu_init_api(void)
        if (ret)
                return ret;
 
+       ret = init_reserved_iova_ranges();
+       if (ret)
+               return ret;
+
        err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
        if (err)
                return err;