return 0;
}
-static int __init
-dmar_parse_dev(struct dmar_drhd_unit *dmaru)
+static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
struct acpi_dmar_hardware_unit *drhd;
static int include_all;
include_all = 1;
}
- if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
+ if (ret) {
list_del(&dmaru->list);
kfree(dmaru);
}
drhd = (struct acpi_dmar_hardware_unit *)header;
printk (KERN_INFO PREFIX
"DRHD (flags: 0x%08x)base: 0x%016Lx\n",
- drhd->flags, drhd->address);
+ drhd->flags, (unsigned long long)drhd->address);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
rmrr = (struct acpi_dmar_reserved_memory *)header;
printk (KERN_INFO PREFIX
"RMRR base: 0x%016Lx end: 0x%016Lx\n",
- rmrr->base_address, rmrr->end_address);
+ (unsigned long long)rmrr->base_address,
+ (unsigned long long)rmrr->end_address);
break;
}
}
+ /**
+ * dmar_table_detect - checks to see if the platform supports DMAR devices
+ */
+ static int __init dmar_table_detect(void)
+ {
+ acpi_status status = AE_OK;
+
+ /* if we could find DMAR table, then there are DMAR devices */
+ status = acpi_get_table(ACPI_SIG_DMAR, 0,
+ (struct acpi_table_header **)&dmar_tbl);
+
+ if (ACPI_SUCCESS(status) && !dmar_tbl) {
+ printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+ status = AE_NOT_FOUND;
+ }
+
+ return (ACPI_SUCCESS(status) ? 1 : 0);
+ }
/**
* parse_dmar_table - parses the DMA reporting table
struct acpi_dmar_header *entry_header;
int ret = 0;
+ /*
+ * Do it again, earlier dmar_tbl mapping could be mapped with
+ * fixed map.
+ */
+ dmar_table_detect();
+
dmar = (struct acpi_table_dmar *)dmar_tbl;
if (!dmar)
return -ENODEV;
- if (dmar->width < PAGE_SHIFT_4K - 1) {
+ if (dmar->width < PAGE_SHIFT - 1) {
printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
return -EINVAL;
}
int __init dmar_dev_scope_init(void)
{
- struct dmar_drhd_unit *drhd;
+ struct dmar_drhd_unit *drhd, *drhd_n;
int ret = -ENODEV;
- for_each_drhd_unit(drhd) {
+ list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
ret = dmar_parse_dev(drhd);
if (ret)
return ret;
#ifdef CONFIG_DMAR
{
- struct dmar_rmrr_unit *rmrr;
- for_each_rmrr_units(rmrr) {
+ struct dmar_rmrr_unit *rmrr, *rmrr_n;
+ list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
return 0;
}
- /**
- * early_dmar_detect - checks to see if the platform supports DMAR devices
- */
- int __init early_dmar_detect(void)
- {
- acpi_status status = AE_OK;
-
- /* if we could find DMAR table, then there are DMAR devices */
- status = acpi_get_table(ACPI_SIG_DMAR, 0,
- (struct acpi_table_header **)&dmar_tbl);
-
- if (ACPI_SUCCESS(status) && !dmar_tbl) {
- printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
- status = AE_NOT_FOUND;
- }
-
- return (ACPI_SUCCESS(status) ? 1 : 0);
- }
-
void __init detect_intel_iommu(void)
{
int ret;
- ret = early_dmar_detect();
+ ret = dmar_table_detect();
-#ifdef CONFIG_DMAR
{
+#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar;
/*
* for now we will disable dma-remapping when interrupt
* is added, we will not need this any more.
*/
dmar = (struct acpi_table_dmar *) dmar_tbl;
- if (ret && cpu_has_x2apic && dmar->flags & 0x1) {
+ if (ret && cpu_has_x2apic && dmar->flags & 0x1)
printk(KERN_INFO
"Queued invalidation will be enabled to support "
"x2apic and Intr-remapping.\n");
- printk(KERN_INFO
- "Disabling IOMMU detection, because of missing "
- "queued invalidation support for IOTLB "
- "invalidation\n");
- printk(KERN_INFO
- "Use \"nox2apic\", if you want to use Intel "
- " IOMMU for DMA-remapping and don't care about "
- " x2apic support\n");
-
- dmar_disabled = 1;
- goto end;
- }
-
+#endif
-
+#ifdef CONFIG_DMAR
if (ret && !no_iommu && !iommu_detected && !swiotlb &&
!dmar_disabled)
iommu_detected = 1;
- }
-end:
#endif
+ }
+ dmar_tbl = NULL;
}
iommu->seq_id = iommu_allocated++;
- iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
+ iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
if (!iommu->reg) {
printk(KERN_ERR "IOMMU: can't map the region\n");
goto error;
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap));
- map_size = PAGE_ALIGN_4K(map_size);
- if (map_size > PAGE_SIZE_4K) {
+ map_size = VTD_PAGE_ALIGN(map_size);
+ if (map_size > VTD_PAGE_SIZE) {
iounmap(iommu->reg);
iommu->reg = ioremap(drhd->reg_base_addr, map_size);
if (!iommu->reg) {
ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
- drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
- iommu->cap, iommu->ecap);
+ (unsigned long long)drhd->reg_base_addr,
+ DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
+ (unsigned long long)iommu->cap,
+ (unsigned long long)iommu->ecap);
spin_lock_init(&iommu->register_lock);
hw = qi->desc;
- spin_lock(&qi->q_lock);
+ spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
- spin_unlock(&qi->q_lock);
+ spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax();
- spin_lock(&qi->q_lock);
+ spin_lock_irqsave(&qi->q_lock, flags);
}
index = qi->free_head;
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ spin_lock(&iommu->register_lock);
/*
* update the HW tail register indicating the presence of
* new descriptors.
*/
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ spin_unlock(&iommu->register_lock);
while (qi->desc_status[wait_index] != QI_DONE) {
+ /*
+ * We will leave the interrupts disabled, to prevent interrupt
+ * context to queue another cmd while a cmd is already submitted
+ * and waiting for completion on this cpu. This is to avoid
+ * a deadlock where the interrupt context can wait indefinitely
+ * for free slots in the queue.
+ */
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
- spin_unlock(&qi->q_lock);
+ spin_unlock_irqrestore(&qi->q_lock, flags);
}
/*
qi_submit_sync(&desc, iommu);
}
+int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type, int non_present_entry_flush)
+{
+
+ struct qi_desc desc;
+
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
+ | QI_CC_GRAN(type) | QI_CC_TYPE;
+ desc.high = 0;
+
+ qi_submit_sync(&desc, iommu);
+
+ return 0;
+
+}
+
+int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ int non_present_entry_flush)
+{
+ u8 dw = 0, dr = 0;
+
+ struct qi_desc desc;
+ int ih = 0;
+
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ if (cap_write_drain(iommu->cap))
+ dw = 1;
+
+ if (cap_read_drain(iommu->cap))
+ dr = 1;
+
+ desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
+ | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
+ desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
+ | QI_IOTLB_AM(size_order);
+
+ qi_submit_sync(&desc, iommu);
+
+ return 0;
+
+}
+
/*
* Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces
#include <linux/kallsyms.h>
#include "pci.h"
+ int isa_dma_bridge_buggy;
+ EXPORT_SYMBOL(isa_dma_bridge_buggy);
+ int pci_pci_problems;
+ EXPORT_SYMBOL(pci_pci_problems);
+ int pcie_mch_quirk;
+ EXPORT_SYMBOL(pcie_mch_quirk);
+
+ #ifdef CONFIG_PCI_QUIRKS
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
+/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
+int forbid_dac __read_mostly;
+EXPORT_SYMBOL(forbid_dac);
+
+static __devinit void via_no_dac(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
+ dev_info(&dev->dev,
+ "VIA PCI bridge detected. Disabling DAC.\n");
+ forbid_dac = 1;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
+
/* Deal with broken BIOS'es that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
This appears to be BIOS not version dependent. So presumably there is a
chipset level fix */
- int isa_dma_bridge_buggy;
- EXPORT_SYMBOL(isa_dma_bridge_buggy);
static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
{
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
- int pci_pci_problems;
- EXPORT_SYMBOL(pci_pci_problems);
-
/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
- int pcie_mch_quirk;
- EXPORT_SYMBOL(pcie_mch_quirk);
-
static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
{
pcie_mch_quirk = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
- static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
- {
- while (f < end) {
- if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
- (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
- #ifdef DEBUG
- dev_dbg(&dev->dev, "calling %pF\n", f->hook);
- #endif
- f->hook(dev);
- }
- f++;
- }
- }
-
- extern struct pci_fixup __start_pci_fixups_early[];
- extern struct pci_fixup __end_pci_fixups_early[];
- extern struct pci_fixup __start_pci_fixups_header[];
- extern struct pci_fixup __end_pci_fixups_header[];
- extern struct pci_fixup __start_pci_fixups_final[];
- extern struct pci_fixup __end_pci_fixups_final[];
- extern struct pci_fixup __start_pci_fixups_enable[];
- extern struct pci_fixup __end_pci_fixups_enable[];
- extern struct pci_fixup __start_pci_fixups_resume[];
- extern struct pci_fixup __end_pci_fixups_resume[];
- extern struct pci_fixup __start_pci_fixups_resume_early[];
- extern struct pci_fixup __end_pci_fixups_resume_early[];
- extern struct pci_fixup __start_pci_fixups_suspend[];
- extern struct pci_fixup __end_pci_fixups_suspend[];
-
-
- void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
- {
- struct pci_fixup *start, *end;
-
- switch(pass) {
- case pci_fixup_early:
- start = __start_pci_fixups_early;
- end = __end_pci_fixups_early;
- break;
-
- case pci_fixup_header:
- start = __start_pci_fixups_header;
- end = __end_pci_fixups_header;
- break;
-
- case pci_fixup_final:
- start = __start_pci_fixups_final;
- end = __end_pci_fixups_final;
- break;
-
- case pci_fixup_enable:
- start = __start_pci_fixups_enable;
- end = __end_pci_fixups_enable;
- break;
-
- case pci_fixup_resume:
- start = __start_pci_fixups_resume;
- end = __end_pci_fixups_resume;
- break;
-
- case pci_fixup_resume_early:
- start = __start_pci_fixups_resume_early;
- end = __end_pci_fixups_resume_early;
- break;
-
- case pci_fixup_suspend:
- start = __start_pci_fixups_suspend;
- end = __end_pci_fixups_suspend;
- break;
-
- default:
- /* stupid compiler warning, you would think with an enum... */
- return;
- }
- pci_do_fixups(dev, start, end);
- }
- EXPORT_SYMBOL(pci_fixup_device);
-
/* Enable 1k I/O space granularity on the Intel P64H2 */
static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
{
quirk_msi_intx_disable_bug);
#endif /* CONFIG_PCI_MSI */
+
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
+ {
+ while (f < end) {
+ if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
+ (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
+ dev_dbg(&dev->dev, "calling %pF\n", f->hook);
+ f->hook(dev);
+ }
+ f++;
+ }
+ }
+
+ extern struct pci_fixup __start_pci_fixups_early[];
+ extern struct pci_fixup __end_pci_fixups_early[];
+ extern struct pci_fixup __start_pci_fixups_header[];
+ extern struct pci_fixup __end_pci_fixups_header[];
+ extern struct pci_fixup __start_pci_fixups_final[];
+ extern struct pci_fixup __end_pci_fixups_final[];
+ extern struct pci_fixup __start_pci_fixups_enable[];
+ extern struct pci_fixup __end_pci_fixups_enable[];
+ extern struct pci_fixup __start_pci_fixups_resume[];
+ extern struct pci_fixup __end_pci_fixups_resume[];
+ extern struct pci_fixup __start_pci_fixups_resume_early[];
+ extern struct pci_fixup __end_pci_fixups_resume_early[];
+ extern struct pci_fixup __start_pci_fixups_suspend[];
+ extern struct pci_fixup __end_pci_fixups_suspend[];
+
+
+ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+ {
+ struct pci_fixup *start, *end;
+
+ switch(pass) {
+ case pci_fixup_early:
+ start = __start_pci_fixups_early;
+ end = __end_pci_fixups_early;
+ break;
+
+ case pci_fixup_header:
+ start = __start_pci_fixups_header;
+ end = __end_pci_fixups_header;
+ break;
+
+ case pci_fixup_final:
+ start = __start_pci_fixups_final;
+ end = __end_pci_fixups_final;
+ break;
+
+ case pci_fixup_enable:
+ start = __start_pci_fixups_enable;
+ end = __end_pci_fixups_enable;
+ break;
+
+ case pci_fixup_resume:
+ start = __start_pci_fixups_resume;
+ end = __end_pci_fixups_resume;
+ break;
+
+ case pci_fixup_resume_early:
+ start = __start_pci_fixups_resume_early;
+ end = __end_pci_fixups_resume_early;
+ break;
+
+ case pci_fixup_suspend:
+ start = __start_pci_fixups_suspend;
+ end = __end_pci_fixups_suspend;
+ break;
+
+ default:
+ /* stupid compiler warning, you would think with an enum... */
+ return;
+ }
+ pci_do_fixups(dev, start, end);
+ }
+ #else
+ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
+ #endif
+ EXPORT_SYMBOL(pci_fixup_device);