Merge branches 'arm/omap', 'arm/exynos', 'arm/smmu', 'arm/mediatek', 'arm/qcom',...
[sfrench/cifs-2.6.git] / drivers / iommu / intel-iommu.c
index 12d094d08c0a2e982fb712b9310ceadf7213013d..87de0b975672b0a8864277ff799b5e02e56547ea 100644 (file)
 #include <linux/dma-direct.h>
 #include <linux/crash_dump.h>
 #include <linux/numa.h>
 #include <linux/dma-direct.h>
 #include <linux/crash_dump.h>
 #include <linux/numa.h>
+#include <linux/swiotlb.h>
 #include <asm/irq_remapping.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 #include <asm/irq_remapping.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
+#include <trace/events/intel_iommu.h>
 
 #include "irq_remapping.h"
 #include "intel-pasid.h"
 
 #include "irq_remapping.h"
 #include "intel-pasid.h"
@@ -339,11 +341,15 @@ static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
 static void dmar_remove_one_dev_info(struct device *dev);
 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
 static void domain_remove_dev_info(struct dmar_domain *domain);
 static void dmar_remove_one_dev_info(struct device *dev);
 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+                                struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
 static bool device_is_rmrr_locked(struct device *dev);
 static int intel_iommu_attach_device(struct iommu_domain *domain,
                                     struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
 static bool device_is_rmrr_locked(struct device *dev);
 static int intel_iommu_attach_device(struct iommu_domain *domain,
                                     struct device *dev);
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           dma_addr_t iova);
 
 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
 int dmar_disabled = 0;
 
 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
 int dmar_disabled = 0;
@@ -360,6 +366,7 @@ static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
+static int intel_no_bounce;
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
@@ -373,6 +380,9 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
+#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&   \
+                               to_pci_dev(d)->untrusted)
+
 /*
  * Iterate over elements in device_domain_list and call the specified
  * callback @fn against each element.
 /*
  * Iterate over elements in device_domain_list and call the specified
  * callback @fn against each element.
@@ -455,6 +465,9 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
                        intel_iommu_tboot_noforce = 1;
                        printk(KERN_INFO
                                "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
                        intel_iommu_tboot_noforce = 1;
+               } else if (!strncmp(str, "nobounce", 8)) {
+                       pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
+                       intel_no_bounce = 1;
                }
 
                str += strcspn(str, ",");
                }
 
                str += strcspn(str, ",");
@@ -2105,9 +2118,26 @@ out_unlock:
        return ret;
 }
 
        return ret;
 }
 
+struct domain_context_mapping_data {
+       struct dmar_domain *domain;
+       struct intel_iommu *iommu;
+       struct pasid_table *table;
+};
+
+static int domain_context_mapping_cb(struct pci_dev *pdev,
+                                    u16 alias, void *opaque)
+{
+       struct domain_context_mapping_data *data = opaque;
+
+       return domain_context_mapping_one(data->domain, data->iommu,
+                                         data->table, PCI_BUS_NUM(alias),
+                                         alias & 0xff);
+}
+
 static int
 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 {
 static int
 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 {
+       struct domain_context_mapping_data data;
        struct pasid_table *table;
        struct intel_iommu *iommu;
        u8 bus, devfn;
        struct pasid_table *table;
        struct intel_iommu *iommu;
        u8 bus, devfn;
@@ -2117,7 +2147,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
                return -ENODEV;
 
        table = intel_pasid_get_table(dev);
                return -ENODEV;
 
        table = intel_pasid_get_table(dev);
-       return domain_context_mapping_one(domain, iommu, table, bus, devfn);
+
+       if (!dev_is_pci(dev))
+               return domain_context_mapping_one(domain, iommu, table,
+                                                 bus, devfn);
+
+       data.domain = domain;
+       data.iommu = iommu;
+       data.table = table;
+
+       return pci_for_each_dma_alias(to_pci_dev(dev),
+                                     &domain_context_mapping_cb, &data);
 }
 
 static int domain_context_mapped_cb(struct pci_dev *pdev,
 }
 
 static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -3267,7 +3307,7 @@ static int __init init_dmars(void)
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
        }
 
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
        }
 
-       if (iommu_pass_through)
+       if (iommu_default_passthrough())
                iommu_identity_mapping |= IDENTMAP_ALL;
 
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
                iommu_identity_mapping |= IDENTMAP_ALL;
 
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
@@ -3505,6 +3545,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
        start_paddr += paddr & ~PAGE_MASK;
 
        start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
        start_paddr += paddr & ~PAGE_MASK;
+
+       trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
+
        return start_paddr;
 
 error:
        return start_paddr;
 
 error:
@@ -3560,10 +3603,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
        if (dev_is_pci(dev))
                pdev = to_pci_dev(dev);
 
        if (dev_is_pci(dev))
                pdev = to_pci_dev(dev);
 
-       dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
-
        freelist = domain_unmap(domain, start_pfn, last_pfn);
        freelist = domain_unmap(domain, start_pfn, last_pfn);
-
        if (intel_iommu_strict || (pdev && pdev->untrusted) ||
                        !has_iova_flush_queue(&domain->iovad)) {
                iommu_flush_iotlb_psi(iommu, domain, start_pfn,
        if (intel_iommu_strict || (pdev && pdev->untrusted) ||
                        !has_iova_flush_queue(&domain->iovad)) {
                iommu_flush_iotlb_psi(iommu, domain, start_pfn,
@@ -3579,6 +3619,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
                 * cpu used up by the iotlb flush operation...
                 */
        }
                 * cpu used up by the iotlb flush operation...
                 */
        }
+
+       trace_unmap_single(dev, dev_addr, size);
 }
 
 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
 }
 
 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
@@ -3669,6 +3711,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
        }
 
        intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
        }
 
        intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
+
+       trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
 }
 
 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
 }
 
 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
@@ -3725,6 +3769,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
                return 0;
        }
 
                return 0;
        }
 
+       trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
+                    sg_phys(sglist), size << VTD_PAGE_SHIFT);
+
        return nelems;
 }
 
        return nelems;
 }
 
@@ -3740,6 +3787,252 @@ static const struct dma_map_ops intel_dma_ops = {
        .dma_supported = dma_direct_supported,
 };
 
        .dma_supported = dma_direct_supported,
 };
 
+static void
+bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
+                  enum dma_data_direction dir, enum dma_sync_target target)
+{
+       struct dmar_domain *domain;
+       phys_addr_t tlb_addr;
+
+       domain = find_domain(dev);
+       if (WARN_ON(!domain))
+               return;
+
+       tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
+       if (is_swiotlb_buffer(tlb_addr))
+               swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
+}
+
+static dma_addr_t
+bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
+                 enum dma_data_direction dir, unsigned long attrs,
+                 u64 dma_mask)
+{
+       size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
+       struct dmar_domain *domain;
+       struct intel_iommu *iommu;
+       unsigned long iova_pfn;
+       unsigned long nrpages;
+       phys_addr_t tlb_addr;
+       int prot = 0;
+       int ret;
+
+       domain = find_domain(dev);
+       if (WARN_ON(dir == DMA_NONE || !domain))
+               return DMA_MAPPING_ERROR;
+
+       iommu = domain_get_iommu(domain);
+       if (WARN_ON(!iommu))
+               return DMA_MAPPING_ERROR;
+
+       nrpages = aligned_nrpages(0, size);
+       iova_pfn = intel_alloc_iova(dev, domain,
+                                   dma_to_mm_pfn(nrpages), dma_mask);
+       if (!iova_pfn)
+               return DMA_MAPPING_ERROR;
+
+       /*
+        * Check if DMAR supports zero-length reads on write only
+        * mappings..
+        */
+       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
+                       !cap_zlr(iommu->cap))
+               prot |= DMA_PTE_READ;
+       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+               prot |= DMA_PTE_WRITE;
+
+       /*
+        * If both the physical buffer start address and size are
+        * page aligned, we don't need to use a bounce page.
+        */
+       if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
+               tlb_addr = swiotlb_tbl_map_single(dev,
+                               __phys_to_dma(dev, io_tlb_start),
+                               paddr, size, aligned_size, dir, attrs);
+               if (tlb_addr == DMA_MAPPING_ERROR) {
+                       goto swiotlb_error;
+               } else {
+                       /* Cleanup the padding area. */
+                       void *padding_start = phys_to_virt(tlb_addr);
+                       size_t padding_size = aligned_size;
+
+                       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+                           (dir == DMA_TO_DEVICE ||
+                            dir == DMA_BIDIRECTIONAL)) {
+                               padding_start += size;
+                               padding_size -= size;
+                       }
+
+                       memset(padding_start, 0, padding_size);
+               }
+       } else {
+               tlb_addr = paddr;
+       }
+
+       ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
+                                tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
+       if (ret)
+               goto mapping_error;
+
+       trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
+
+       return (phys_addr_t)iova_pfn << PAGE_SHIFT;
+
+mapping_error:
+       if (is_swiotlb_buffer(tlb_addr))
+               swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+                                        aligned_size, dir, attrs);
+swiotlb_error:
+       free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
+       dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
+               size, (unsigned long long)paddr, dir);
+
+       return DMA_MAPPING_ERROR;
+}
+
+static void
+bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+                   enum dma_data_direction dir, unsigned long attrs)
+{
+       size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
+       struct dmar_domain *domain;
+       phys_addr_t tlb_addr;
+
+       domain = find_domain(dev);
+       if (WARN_ON(!domain))
+               return;
+
+       tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
+       if (WARN_ON(!tlb_addr))
+               return;
+
+       intel_unmap(dev, dev_addr, size);
+       if (is_swiotlb_buffer(tlb_addr))
+               swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+                                        aligned_size, dir, attrs);
+
+       trace_bounce_unmap_single(dev, dev_addr, size);
+}
+
+static dma_addr_t
+bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       return bounce_map_single(dev, page_to_phys(page) + offset,
+                                size, dir, attrs, *dev->dma_mask);
+}
+
+static dma_addr_t
+bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
+                   enum dma_data_direction dir, unsigned long attrs)
+{
+       return bounce_map_single(dev, phys_addr, size,
+                                dir, attrs, *dev->dma_mask);
+}
+
+static void
+bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
+                 enum dma_data_direction dir, unsigned long attrs)
+{
+       bounce_unmap_single(dev, dev_addr, size, dir, attrs);
+}
+
+static void
+bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
+                     enum dma_data_direction dir, unsigned long attrs)
+{
+       bounce_unmap_single(dev, dev_addr, size, dir, attrs);
+}
+
+static void
+bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
+               enum dma_data_direction dir, unsigned long attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nelems, i)
+               bounce_unmap_page(dev, sg->dma_address,
+                                 sg_dma_len(sg), dir, attrs);
+}
+
+static int
+bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
+             enum dma_data_direction dir, unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i) {
+               sg->dma_address = bounce_map_page(dev, sg_page(sg),
+                                                 sg->offset, sg->length,
+                                                 dir, attrs);
+               if (sg->dma_address == DMA_MAPPING_ERROR)
+                       goto out_unmap;
+               sg_dma_len(sg) = sg->length;
+       }
+
+       return nelems;
+
+out_unmap:
+       bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+       return 0;
+}
+
+static void
+bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+                          size_t size, enum dma_data_direction dir)
+{
+       bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
+}
+
+static void
+bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
+                             size_t size, enum dma_data_direction dir)
+{
+       bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+static void
+bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
+                      int nelems, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nelems, i)
+               bounce_sync_single(dev, sg_dma_address(sg),
+                                  sg_dma_len(sg), dir, SYNC_FOR_CPU);
+}
+
+static void
+bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+                         int nelems, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nelems, i)
+               bounce_sync_single(dev, sg_dma_address(sg),
+                                  sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
+}
+
+static const struct dma_map_ops bounce_dma_ops = {
+       .alloc                  = intel_alloc_coherent,
+       .free                   = intel_free_coherent,
+       .map_sg                 = bounce_map_sg,
+       .unmap_sg               = bounce_unmap_sg,
+       .map_page               = bounce_map_page,
+       .unmap_page             = bounce_unmap_page,
+       .sync_single_for_cpu    = bounce_sync_single_for_cpu,
+       .sync_single_for_device = bounce_sync_single_for_device,
+       .sync_sg_for_cpu        = bounce_sync_sg_for_cpu,
+       .sync_sg_for_device     = bounce_sync_sg_for_device,
+       .map_resource           = bounce_map_resource,
+       .unmap_resource         = bounce_unmap_resource,
+       .dma_supported          = dma_direct_supported,
+};
+
 static inline int iommu_domain_cache_init(void)
 {
        int ret = 0;
 static inline int iommu_domain_cache_init(void)
 {
        int ret = 0;
@@ -4540,22 +4833,20 @@ const struct attribute_group *intel_iommu_groups[] = {
        NULL,
 };
 
        NULL,
 };
 
-static int __init platform_optin_force_iommu(void)
+static inline bool has_untrusted_dev(void)
 {
        struct pci_dev *pdev = NULL;
 {
        struct pci_dev *pdev = NULL;
-       bool has_untrusted_dev = false;
 
 
-       if (!dmar_platform_optin() || no_platform_optin)
-               return 0;
+       for_each_pci_dev(pdev)
+               if (pdev->untrusted)
+                       return true;
 
 
-       for_each_pci_dev(pdev) {
-               if (pdev->untrusted) {
-                       has_untrusted_dev = true;
-                       break;
-               }
-       }
+       return false;
+}
 
 
-       if (!has_untrusted_dev)
+static int __init platform_optin_force_iommu(void)
+{
+       if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())
                return 0;
 
        if (no_iommu || dmar_disabled)
                return 0;
 
        if (no_iommu || dmar_disabled)
@@ -4569,9 +4860,6 @@ static int __init platform_optin_force_iommu(void)
                iommu_identity_mapping |= IDENTMAP_ALL;
 
        dmar_disabled = 0;
                iommu_identity_mapping |= IDENTMAP_ALL;
 
        dmar_disabled = 0;
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-       swiotlb = 0;
-#endif
        no_iommu = 0;
 
        return 1;
        no_iommu = 0;
 
        return 1;
@@ -4711,7 +4999,14 @@ int __init intel_iommu_init(void)
        up_write(&dmar_global_lock);
 
 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
        up_write(&dmar_global_lock);
 
 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-       swiotlb = 0;
+       /*
+        * If the system has no untrusted device or the user has decided
+        * to disable the bounce page mechanisms, we don't need swiotlb.
+        * Mark this and the pre-allocated bounce pages will be released
+        * later.
+        */
+       if (!has_untrusted_dev() || intel_no_bounce)
+               swiotlb = 0;
 #endif
        dma_ops = &intel_dma_ops;
 
 #endif
        dma_ops = &intel_dma_ops;
 
@@ -4759,6 +5054,28 @@ out_free_dmar:
        return ret;
 }
 
        return ret;
 }
 
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+{
+       struct intel_iommu *iommu = opaque;
+
+       domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+       return 0;
+}
+
+/*
+ * NB - intel-iommu lacks any sort of reference counting for the users of
+ * dependent devices.  If multiple endpoints have intersecting dependent
+ * devices, unbinding the driver from any one of them will possibly leave
+ * the others unable to operate.
+ */
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+{
+       if (!iommu || !dev || !dev_is_pci(dev))
+               return;
+
+       pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+}
+
 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
        struct dmar_domain *domain;
 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
        struct dmar_domain *domain;
@@ -4779,7 +5096,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
                                        PASID_RID2PASID);
 
                iommu_disable_dev_iotlb(info);
                                        PASID_RID2PASID);
 
                iommu_disable_dev_iotlb(info);
-               domain_context_clear_one(iommu, info->bus, info->devfn);
+               domain_context_clear(iommu, info->dev);
                intel_pasid_free_table(info->dev);
        }
 
                intel_pasid_free_table(info->dev);
        }
 
@@ -5153,7 +5470,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
 }
 
 static size_t intel_iommu_unmap(struct iommu_domain *domain,
 }
 
 static size_t intel_iommu_unmap(struct iommu_domain *domain,
-                               unsigned long iova, size_t size)
+                               unsigned long iova, size_t size,
+                               struct iommu_iotlb_gather *gather)
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
        struct page *freelist = NULL;
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
        struct page *freelist = NULL;
@@ -5309,6 +5627,11 @@ static int intel_iommu_add_device(struct device *dev)
                }
        }
 
                }
        }
 
+       if (device_needs_bounce(dev)) {
+               dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
+               set_dma_ops(dev, &bounce_dma_ops);
+       }
+
        return 0;
 }
 
        return 0;
 }
 
@@ -5326,6 +5649,9 @@ static void intel_iommu_remove_device(struct device *dev)
        iommu_group_remove_device(dev);
 
        iommu_device_unlink(&iommu->iommu, dev);
        iommu_group_remove_device(dev);
 
        iommu_device_unlink(&iommu->iommu, dev);
+
+       if (device_needs_bounce(dev))
+               set_dma_ops(dev, NULL);
 }
 
 static void intel_iommu_get_resv_regions(struct device *device,
 }
 
 static void intel_iommu_get_resv_regions(struct device *device,
@@ -5639,20 +5965,46 @@ const struct iommu_ops intel_iommu_ops = {
        .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
 };
 
        .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
 };
 
-static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+static void quirk_iommu_igfx(struct pci_dev *dev)
 {
 {
-       /* G4x/GM45 integrated gfx dmar support is totally busted. */
        pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
        dmar_map_gfx = 0;
 }
 
        pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
        dmar_map_gfx = 0;
 }
 
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+/* G4x/GM45 integrated gfx dmar support is totally busted. */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
+
+/* Broadwell igfx malfunctions with dmar */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
 
 static void quirk_iommu_rwbf(struct pci_dev *dev)
 {
 
 static void quirk_iommu_rwbf(struct pci_dev *dev)
 {