Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
authorDavid Woodhouse <David.Woodhouse@intel.com>
Tue, 21 Oct 2008 18:42:20 +0000 (19:42 +0100)
committerDavid Woodhouse <David.Woodhouse@intel.com>
Tue, 21 Oct 2008 18:42:20 +0000 (19:42 +0100)
Conflicts:

drivers/pci/dmar.c

1  2 
MAINTAINERS
drivers/pci/dmar.c
drivers/pci/quirks.c

diff --combined MAINTAINERS
index 22303e5fe4ce41986bc14a6b2a68cc20b418f845,5c3f79c26384eba384af07241c71f2cb251abdb5..6d51f00dcdc0ba8349469d04005bb8435463f8e2
@@@ -1198,7 -1198,7 +1198,7 @@@ S:      Maintaine
  
  CPU FREQUENCY DRIVERS
  P:    Dave Jones
- M:    davej@codemonkey.org.uk
+ M:    davej@redhat.com
  L:    cpufreq@vger.kernel.org
  W:    http://www.codemonkey.org.uk/projects/cpufreq/
  T:    git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
@@@ -2176,13 -2176,6 +2176,13 @@@ M:    maciej.sosnowski@intel.co
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  
 +INTEL IOMMU (VT-d)
 +P:    David Woodhouse
 +M:    dwmw2@infradead.org
 +L:    iommu@lists.linux-foundation.org
 +T:    git://git.infradead.org/iommu-2.6.git
 +S:    Supported
 +
  INTEL IOP-ADMA DMA DRIVER
  P:    Dan Williams
  M:    dan.j.williams@intel.com
diff --combined drivers/pci/dmar.c
index 7b3751136e63734054d3f53a84ec52f256cb463c,8b29c307f1a11c0b94fe4e3214f6b62faf9c5063..691b3adeb87057799841f112d8b77797e729e4ce
@@@ -188,7 -188,8 +188,7 @@@ dmar_parse_one_drhd(struct acpi_dmar_he
        return 0;
  }
  
 -static int __init
 -dmar_parse_dev(struct dmar_drhd_unit *dmaru)
 +static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
  {
        struct acpi_dmar_hardware_unit *drhd;
        static int include_all;
                include_all = 1;
        }
  
-       if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
+       if (ret) {
                list_del(&dmaru->list);
                kfree(dmaru);
        }
@@@ -276,19 -277,36 +276,37 @@@ dmar_table_print_dmar_entry(struct acpi
                drhd = (struct acpi_dmar_hardware_unit *)header;
                printk (KERN_INFO PREFIX
                        "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
 -                      drhd->flags, drhd->address);
 +                      drhd->flags, (unsigned long long)drhd->address);
                break;
        case ACPI_DMAR_TYPE_RESERVED_MEMORY:
                rmrr = (struct acpi_dmar_reserved_memory *)header;
  
                printk (KERN_INFO PREFIX
                        "RMRR base: 0x%016Lx end: 0x%016Lx\n",
 -                      rmrr->base_address, rmrr->end_address);
 +                      (unsigned long long)rmrr->base_address,
 +                      (unsigned long long)rmrr->end_address);
                break;
        }
  }
  
+ /**
+  * dmar_table_detect - checks to see if the platform supports DMAR devices
+  */
+ static int __init dmar_table_detect(void)
+ {
+       acpi_status status = AE_OK;
+       /* if we could find DMAR table, then there are DMAR devices */
+       status = acpi_get_table(ACPI_SIG_DMAR, 0,
+                               (struct acpi_table_header **)&dmar_tbl);
+       if (ACPI_SUCCESS(status) && !dmar_tbl) {
+               printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+               status = AE_NOT_FOUND;
+       }
+       return (ACPI_SUCCESS(status) ? 1 : 0);
+ }
  
  /**
   * parse_dmar_table - parses the DMA reporting table
@@@ -300,11 -318,17 +318,17 @@@ parse_dmar_table(void
        struct acpi_dmar_header *entry_header;
        int ret = 0;
  
+       /*
+        * Do it again, earlier dmar_tbl mapping could be mapped with
+        * fixed map.
+        */
+       dmar_table_detect();
        dmar = (struct acpi_table_dmar *)dmar_tbl;
        if (!dmar)
                return -ENODEV;
  
 -      if (dmar->width < PAGE_SHIFT_4K - 1) {
 +      if (dmar->width < PAGE_SHIFT - 1) {
                printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
                return -EINVAL;
        }
@@@ -373,10 -397,10 +397,10 @@@ dmar_find_matched_drhd_unit(struct pci_
  
  int __init dmar_dev_scope_init(void)
  {
-       struct dmar_drhd_unit *drhd;
+       struct dmar_drhd_unit *drhd, *drhd_n;
        int ret = -ENODEV;
  
-       for_each_drhd_unit(drhd) {
+       list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
                ret = dmar_parse_dev(drhd);
                if (ret)
                        return ret;
  
  #ifdef CONFIG_DMAR
        {
-               struct dmar_rmrr_unit *rmrr;
-               for_each_rmrr_units(rmrr) {
+               struct dmar_rmrr_unit *rmrr, *rmrr_n;
+               list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
                        ret = rmrr_parse_dev(rmrr);
                        if (ret)
                                return ret;
@@@ -430,33 -454,14 +454,14 @@@ int __init dmar_table_init(void
        return 0;
  }
  
- /**
-  * early_dmar_detect - checks to see if the platform supports DMAR devices
-  */
- int __init early_dmar_detect(void)
- {
-       acpi_status status = AE_OK;
-       /* if we could find DMAR table, then there are DMAR devices */
-       status = acpi_get_table(ACPI_SIG_DMAR, 0,
-                               (struct acpi_table_header **)&dmar_tbl);
-       if (ACPI_SUCCESS(status) && !dmar_tbl) {
-               printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
-               status = AE_NOT_FOUND;
-       }
-       return (ACPI_SUCCESS(status) ? 1 : 0);
- }
  void __init detect_intel_iommu(void)
  {
        int ret;
  
-       ret = early_dmar_detect();
+       ret = dmar_table_detect();
  
 -#ifdef CONFIG_DMAR
        {
 +#ifdef CONFIG_INTR_REMAP
                struct acpi_table_dmar *dmar;
                /*
                 * for now we will disable dma-remapping when interrupt
                 * is added, we will not need this any more.
                 */
                dmar = (struct acpi_table_dmar *) dmar_tbl;
 -              if (ret && cpu_has_x2apic && dmar->flags & 0x1) {
 +              if (ret && cpu_has_x2apic && dmar->flags & 0x1)
                        printk(KERN_INFO
                               "Queued invalidation will be enabled to support "
                               "x2apic and Intr-remapping.\n");
 -                      printk(KERN_INFO
 -                             "Disabling IOMMU detection, because of missing "
 -                             "queued invalidation support for IOTLB "
 -                             "invalidation\n");
 -                      printk(KERN_INFO
 -                             "Use \"nox2apic\", if you want to use Intel "
 -                             " IOMMU for DMA-remapping and don't care about "
 -                             " x2apic support\n");
 -
 -                      dmar_disabled = 1;
 -                      goto end;
 -              }
 -
 +#endif
 +#ifdef CONFIG_DMAR
                if (ret && !no_iommu && !iommu_detected && !swiotlb &&
                    !dmar_disabled)
                        iommu_detected = 1;
 -      }
 -end:
  #endif
 +      }
+       dmar_tbl = NULL;
  }
  
  
@@@ -493,7 -510,7 +498,7 @@@ int alloc_iommu(struct dmar_drhd_unit *
  
        iommu->seq_id = iommu_allocated++;
  
 -      iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
 +      iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
        if (!iommu->reg) {
                printk(KERN_ERR "IOMMU: can't map the region\n");
                goto error;
        /* the registers might be more than one page */
        map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
                cap_max_fault_reg_offset(iommu->cap));
 -      map_size = PAGE_ALIGN_4K(map_size);
 -      if (map_size > PAGE_SIZE_4K) {
 +      map_size = VTD_PAGE_ALIGN(map_size);
 +      if (map_size > VTD_PAGE_SIZE) {
                iounmap(iommu->reg);
                iommu->reg = ioremap(drhd->reg_base_addr, map_size);
                if (!iommu->reg) {
  
        ver = readl(iommu->reg + DMAR_VER_REG);
        pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
 -              drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
 -              iommu->cap, iommu->ecap);
 +              (unsigned long long)drhd->reg_base_addr,
 +              DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
 +              (unsigned long long)iommu->cap,
 +              (unsigned long long)iommu->ecap);
  
        spin_lock_init(&iommu->register_lock);
  
@@@ -572,11 -587,11 +577,11 @@@ void qi_submit_sync(struct qi_desc *des
  
        hw = qi->desc;
  
 -      spin_lock(&qi->q_lock);
 +      spin_lock_irqsave(&qi->q_lock, flags);
        while (qi->free_cnt < 3) {
 -              spin_unlock(&qi->q_lock);
 +              spin_unlock_irqrestore(&qi->q_lock, flags);
                cpu_relax();
 -              spin_lock(&qi->q_lock);
 +              spin_lock_irqsave(&qi->q_lock, flags);
        }
  
        index = qi->free_head;
        qi->free_head = (qi->free_head + 2) % QI_LENGTH;
        qi->free_cnt -= 2;
  
 -      spin_lock_irqsave(&iommu->register_lock, flags);
 +      spin_lock(&iommu->register_lock);
        /*
         * update the HW tail register indicating the presence of
         * new descriptors.
         */
        writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
 -      spin_unlock_irqrestore(&iommu->register_lock, flags);
 +      spin_unlock(&iommu->register_lock);
  
        while (qi->desc_status[wait_index] != QI_DONE) {
 +              /*
 +               * We will leave the interrupts disabled, to prevent interrupt
 +               * context to queue another cmd while a cmd is already submitted
 +               * and waiting for completion on this cpu. This is to avoid
 +               * a deadlock where the interrupt context can wait indefinitely
 +               * for free slots in the queue.
 +               */
                spin_unlock(&qi->q_lock);
                cpu_relax();
                spin_lock(&qi->q_lock);
        qi->desc_status[index] = QI_DONE;
  
        reclaim_free_desc(qi);
 -      spin_unlock(&qi->q_lock);
 +      spin_unlock_irqrestore(&qi->q_lock, flags);
  }
  
  /*
@@@ -637,62 -645,6 +642,62 @@@ void qi_global_iec(struct intel_iommu *
        qi_submit_sync(&desc, iommu);
  }
  
 +int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
 +                   u64 type, int non_present_entry_flush)
 +{
 +
 +      struct qi_desc desc;
 +
 +      if (non_present_entry_flush) {
 +              if (!cap_caching_mode(iommu->cap))
 +                      return 1;
 +              else
 +                      did = 0;
 +      }
 +
 +      desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
 +                      | QI_CC_GRAN(type) | QI_CC_TYPE;
 +      desc.high = 0;
 +
 +      qi_submit_sync(&desc, iommu);
 +
 +      return 0;
 +
 +}
 +
 +int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
 +                 unsigned int size_order, u64 type,
 +                 int non_present_entry_flush)
 +{
 +      u8 dw = 0, dr = 0;
 +
 +      struct qi_desc desc;
 +      int ih = 0;
 +
 +      if (non_present_entry_flush) {
 +              if (!cap_caching_mode(iommu->cap))
 +                      return 1;
 +              else
 +                      did = 0;
 +      }
 +
 +      if (cap_write_drain(iommu->cap))
 +              dw = 1;
 +
 +      if (cap_read_drain(iommu->cap))
 +              dr = 1;
 +
 +      desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
 +              | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
 +      desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
 +              | QI_IOTLB_AM(size_order);
 +
 +      qi_submit_sync(&desc, iommu);
 +
 +      return 0;
 +
 +}
 +
  /*
   * Enable Queued Invalidation interface. This is a must to support
   * interrupt-remapping. Also used by DMA-remapping, which replaces
diff --combined drivers/pci/quirks.c
index 832175d9ca2505e7daa4b4bef302cff1cb239efa,bbf66ea8fd87b4cbe11029dd8ac2aa826ea1fedc..96cf8ecd04ce7b40ea60fc9e1a077607a8c46d77
  #include <linux/kallsyms.h>
  #include "pci.h"
  
+ int isa_dma_bridge_buggy;
+ EXPORT_SYMBOL(isa_dma_bridge_buggy);
+ int pci_pci_problems;
+ EXPORT_SYMBOL(pci_pci_problems);
+ int pcie_mch_quirk;
+ EXPORT_SYMBOL(pcie_mch_quirk);
+ #ifdef CONFIG_PCI_QUIRKS
  /* The Mellanox Tavor device gives false positive parity errors
   * Mark this device with a broken_parity_status, to allow
   * PCI scanning code to "skip" this now blacklisted device.
@@@ -35,20 -43,6 +43,20 @@@ static void __devinit quirk_mellanox_ta
  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
  
 +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
 +int forbid_dac __read_mostly;
 +EXPORT_SYMBOL(forbid_dac);
 +
 +static __devinit void via_no_dac(struct pci_dev *dev)
 +{
 +      if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
 +              dev_info(&dev->dev,
 +                      "VIA PCI bridge detected. Disabling DAC.\n");
 +              forbid_dac = 1;
 +      }
 +}
 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
 +
  /* Deal with broken BIOS'es that neglect to enable passive release,
     which can cause problems in combination with the 82441FX/PPro MTRRs */
  static void quirk_passive_release(struct pci_dev *dev)
@@@ -76,8 -70,6 +84,6 @@@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_
      
      This appears to be BIOS not version dependent. So presumably there is a 
      chipset level fix */
- int isa_dma_bridge_buggy;
- EXPORT_SYMBOL(isa_dma_bridge_buggy);
      
  static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
  {
@@@ -98,9 -90,6 +104,6 @@@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_N
  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,    PCI_DEVICE_ID_NEC_CBUS_2,       quirk_isa_dma_hangs);
  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,    PCI_DEVICE_ID_NEC_CBUS_3,       quirk_isa_dma_hangs);
  
- int pci_pci_problems;
- EXPORT_SYMBOL(pci_pci_problems);
  /*
   *    Chipsets where PCI->PCI transfers vanish or hang
   */
@@@ -1376,9 -1365,6 +1379,6 @@@ static void __init quirk_alder_ioapic(s
  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC,      quirk_alder_ioapic);
  #endif
  
- int pcie_mch_quirk;
- EXPORT_SYMBOL(pcie_mch_quirk);
  static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
  {
        pcie_mch_quirk = 1;
@@@ -1569,84 -1555,6 +1569,6 @@@ static void __devinit fixup_rev1_53c810
  }
  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
  
- static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
- {
-       while (f < end) {
-               if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
-                   (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
- #ifdef DEBUG
-                       dev_dbg(&dev->dev, "calling %pF\n", f->hook);
- #endif
-                       f->hook(dev);
-               }
-               f++;
-       }
- }
- extern struct pci_fixup __start_pci_fixups_early[];
- extern struct pci_fixup __end_pci_fixups_early[];
- extern struct pci_fixup __start_pci_fixups_header[];
- extern struct pci_fixup __end_pci_fixups_header[];
- extern struct pci_fixup __start_pci_fixups_final[];
- extern struct pci_fixup __end_pci_fixups_final[];
- extern struct pci_fixup __start_pci_fixups_enable[];
- extern struct pci_fixup __end_pci_fixups_enable[];
- extern struct pci_fixup __start_pci_fixups_resume[];
- extern struct pci_fixup __end_pci_fixups_resume[];
- extern struct pci_fixup __start_pci_fixups_resume_early[];
- extern struct pci_fixup __end_pci_fixups_resume_early[];
- extern struct pci_fixup __start_pci_fixups_suspend[];
- extern struct pci_fixup __end_pci_fixups_suspend[];
- void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
- {
-       struct pci_fixup *start, *end;
-       switch(pass) {
-       case pci_fixup_early:
-               start = __start_pci_fixups_early;
-               end = __end_pci_fixups_early;
-               break;
-       case pci_fixup_header:
-               start = __start_pci_fixups_header;
-               end = __end_pci_fixups_header;
-               break;
-       case pci_fixup_final:
-               start = __start_pci_fixups_final;
-               end = __end_pci_fixups_final;
-               break;
-       case pci_fixup_enable:
-               start = __start_pci_fixups_enable;
-               end = __end_pci_fixups_enable;
-               break;
-       case pci_fixup_resume:
-               start = __start_pci_fixups_resume;
-               end = __end_pci_fixups_resume;
-               break;
-       case pci_fixup_resume_early:
-               start = __start_pci_fixups_resume_early;
-               end = __end_pci_fixups_resume_early;
-               break;
-       case pci_fixup_suspend:
-               start = __start_pci_fixups_suspend;
-               end = __end_pci_fixups_suspend;
-               break;
-       default:
-               /* stupid compiler warning, you would think with an enum... */
-               return;
-       }
-       pci_do_fixups(dev, start, end);
- }
- EXPORT_SYMBOL(pci_fixup_device);
  /* Enable 1k I/O space granularity on the Intel P64H2 */
  static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
  {
@@@ -2020,3 -1928,82 +1942,82 @@@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_A
                        quirk_msi_intx_disable_bug);
  
  #endif /* CONFIG_PCI_MSI */
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
+ {
+       while (f < end) {
+               if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
+                   (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
+                       dev_dbg(&dev->dev, "calling %pF\n", f->hook);
+                       f->hook(dev);
+               }
+               f++;
+       }
+ }
+ extern struct pci_fixup __start_pci_fixups_early[];
+ extern struct pci_fixup __end_pci_fixups_early[];
+ extern struct pci_fixup __start_pci_fixups_header[];
+ extern struct pci_fixup __end_pci_fixups_header[];
+ extern struct pci_fixup __start_pci_fixups_final[];
+ extern struct pci_fixup __end_pci_fixups_final[];
+ extern struct pci_fixup __start_pci_fixups_enable[];
+ extern struct pci_fixup __end_pci_fixups_enable[];
+ extern struct pci_fixup __start_pci_fixups_resume[];
+ extern struct pci_fixup __end_pci_fixups_resume[];
+ extern struct pci_fixup __start_pci_fixups_resume_early[];
+ extern struct pci_fixup __end_pci_fixups_resume_early[];
+ extern struct pci_fixup __start_pci_fixups_suspend[];
+ extern struct pci_fixup __end_pci_fixups_suspend[];
+ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+ {
+       struct pci_fixup *start, *end;
+       switch(pass) {
+       case pci_fixup_early:
+               start = __start_pci_fixups_early;
+               end = __end_pci_fixups_early;
+               break;
+       case pci_fixup_header:
+               start = __start_pci_fixups_header;
+               end = __end_pci_fixups_header;
+               break;
+       case pci_fixup_final:
+               start = __start_pci_fixups_final;
+               end = __end_pci_fixups_final;
+               break;
+       case pci_fixup_enable:
+               start = __start_pci_fixups_enable;
+               end = __end_pci_fixups_enable;
+               break;
+       case pci_fixup_resume:
+               start = __start_pci_fixups_resume;
+               end = __end_pci_fixups_resume;
+               break;
+       case pci_fixup_resume_early:
+               start = __start_pci_fixups_resume_early;
+               end = __end_pci_fixups_resume_early;
+               break;
+       case pci_fixup_suspend:
+               start = __start_pci_fixups_suspend;
+               end = __end_pci_fixups_suspend;
+               break;
+       default:
+               /* stupid compiler warning, you would think with an enum... */
+               return;
+       }
+       pci_do_fixups(dev, start, end);
+ }
+ #else
+ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
+ #endif
+ EXPORT_SYMBOL(pci_fixup_device);