Merge tag 'iommu-updates-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / iommu / intel-iommu.c
index eb5351e8cde5370cad92109e6ec5298a726dd2d4..2bd9ac285c0dee363fe9b9feecf2f9082e49ebbe 100644 (file)
@@ -184,6 +184,7 @@ static int rwbf_quirk;
  */
 static int force_on = 0;
 int intel_iommu_tboot_noforce;
+static int no_platform_optin;
 
 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
 
@@ -441,6 +442,7 @@ static int __init intel_iommu_setup(char *str)
                        pr_info("IOMMU enabled\n");
                } else if (!strncmp(str, "off", 3)) {
                        dmar_disabled = 1;
+                       no_platform_optin = 1;
                        pr_info("IOMMU disabled\n");
                } else if (!strncmp(str, "igfx_off", 8)) {
                        dmar_map_gfx = 0;
@@ -1403,7 +1405,8 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
        if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
                info->pri_enabled = 1;
 #endif
-       if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
+       if (!pdev->untrusted && info->ats_supported &&
+           !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
                info->ats_enabled = 1;
                domain_update_iotlb(info->domain);
                info->ats_qdep = pci_ats_queue_depth(pdev);
@@ -2937,6 +2940,13 @@ static int iommu_should_identity_map(struct device *dev, int startup)
                if (device_is_rmrr_locked(dev))
                        return 0;
 
+               /*
+                * Prevent any device marked as untrusted from getting
+                * placed into the statically identity mapping domain.
+                */
+               if (pdev->untrusted)
+                       return 0;
+
                if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
                        return 1;
 
@@ -3639,9 +3649,11 @@ static int iommu_no_mapping(struct device *dev)
        return 0;
 }
 
-static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
-                                    size_t size, int dir, u64 dma_mask)
+static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
+                                  unsigned long offset, size_t size, int dir,
+                                  u64 dma_mask)
 {
+       phys_addr_t paddr = page_to_phys(page) + offset;
        struct dmar_domain *domain;
        phys_addr_t start_paddr;
        unsigned long iova_pfn;
@@ -3657,7 +3669,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        domain = get_valid_domain_for_dev(dev);
        if (!domain)
-               return 0;
+               return DMA_MAPPING_ERROR;
 
        iommu = domain_get_iommu(domain);
        size = aligned_nrpages(paddr, size);
@@ -3695,7 +3707,7 @@ error:
                free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
        pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
                dev_name(dev), size, (unsigned long long)paddr, dir);
-       return 0;
+       return DMA_MAPPING_ERROR;
 }
 
 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
@@ -3703,8 +3715,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
                                 enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       return __intel_map_single(dev, page_to_phys(page) + offset, size,
-                                 dir, *dev->dma_mask);
+       return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
 }
 
 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3795,10 +3806,9 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
                return NULL;
        memset(page_address(page), 0, size);
 
-       *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
-                                        DMA_BIDIRECTIONAL,
-                                        dev->coherent_dma_mask);
-       if (*dma_handle)
+       *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
+                                      dev->coherent_dma_mask);
+       if (*dma_handle != DMA_MAPPING_ERROR)
                return page_address(page);
        if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
                __free_pages(page, order);
@@ -3907,11 +3917,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        return nelems;
 }
 
-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return !dma_addr;
-}
-
 static const struct dma_map_ops intel_dma_ops = {
        .alloc = intel_alloc_coherent,
        .free = intel_free_coherent,
@@ -3919,7 +3924,6 @@ static const struct dma_map_ops intel_dma_ops = {
        .unmap_sg = intel_unmap_sg,
        .map_page = intel_map_page,
        .unmap_page = intel_unmap_page,
-       .mapping_error = intel_mapping_error,
        .dma_supported = dma_direct_supported,
 };
 
@@ -4770,14 +4774,54 @@ const struct attribute_group *intel_iommu_groups[] = {
        NULL,
 };
 
+static int __init platform_optin_force_iommu(void)
+{
+       struct pci_dev *pdev = NULL;
+       bool has_untrusted_dev = false;
+
+       if (!dmar_platform_optin() || no_platform_optin)
+               return 0;
+
+       for_each_pci_dev(pdev) {
+               if (pdev->untrusted) {
+                       has_untrusted_dev = true;
+                       break;
+               }
+       }
+
+       if (!has_untrusted_dev)
+               return 0;
+
+       if (no_iommu || dmar_disabled)
+               pr_info("Intel-IOMMU force enabled due to platform opt in\n");
+
+       /*
+        * If Intel-IOMMU is disabled by default, we will apply identity
+        * map for all devices except those marked as being untrusted.
+        */
+       if (dmar_disabled)
+               iommu_identity_mapping |= IDENTMAP_ALL;
+
+       dmar_disabled = 0;
+#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
+       swiotlb = 0;
+#endif
+       no_iommu = 0;
+
+       return 1;
+}
+
 int __init intel_iommu_init(void)
 {
        int ret = -ENODEV;
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
 
-       /* VT-d is required for a TXT/tboot launch, so enforce that */
-       force_on = tboot_force_iommu();
+       /*
+        * Intel IOMMU is required for a TXT/tboot launch or platform
+        * opt in, so enforce that.
+        */
+       force_on = tboot_force_iommu() || platform_optin_force_iommu();
 
        if (iommu_init_mempool()) {
                if (force_on)