1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2006, Intel Corporation.
5 * Copyright (C) 2006-2008 Intel Corporation
6 * Author: Ashok Raj <ashok.raj@intel.com>
7 * Author: Shaohua Li <shaohua.li@intel.com>
8 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * This file implements early detection/parsing of Remapping Devices
11 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
14 * These routines are used by both DMA-remapping and Interrupt-remapping
17 #define pr_fmt(fmt) "DMAR: " fmt
19 #include <linux/pci.h>
20 #include <linux/dmar.h>
21 #include <linux/iova.h>
22 #include <linux/timer.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/tboot.h>
26 #include <linux/dmi.h>
27 #include <linux/slab.h>
28 #include <linux/iommu.h>
29 #include <linux/numa.h>
30 #include <linux/limits.h>
31 #include <asm/irq_remapping.h>
34 #include "../irq_remapping.h"
39 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
40 struct dmar_res_callback {
41 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
42 void *arg[ACPI_DMAR_TYPE_RESERVED];
43 bool ignore_unhandled;
49 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
50 * before IO devices managed by that unit.
51 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
52 * after IO devices managed by that unit.
53 * 3) Hotplug events are rare.
55 * Locking rules for DMA and interrupt remapping related global data structures:
56 * 1) Use dmar_global_lock in process context
57 * 2) Use RCU in interrupt context
59 DECLARE_RWSEM(dmar_global_lock);
60 LIST_HEAD(dmar_drhd_units);
62 struct acpi_table_header * __initdata dmar_tbl;
63 static int dmar_dev_scope_status = 1;
64 static DEFINE_IDA(dmar_seq_ids);
66 static int alloc_iommu(struct dmar_drhd_unit *drhd);
67 static void free_iommu(struct intel_iommu *iommu);
69 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
72 * add INCLUDE_ALL at the tail, so scan the list will find it at
75 if (drhd->include_all)
76 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
78 list_add_rcu(&drhd->list, &dmar_drhd_units);
81 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
83 struct acpi_dmar_device_scope *scope;
88 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
90 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
92 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
94 pr_warn("Unsupported device scope\n");
96 start += scope->length;
101 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
104 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
107 struct device *tmp_dev;
109 if (*devices && *cnt) {
110 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
119 /* Optimize out kzalloc()/kfree() for normal cases */
120 static char dmar_pci_notify_info_buf[64];
122 static struct dmar_pci_notify_info *
123 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
128 struct dmar_pci_notify_info *info;
130 BUG_ON(dev->is_virtfn);
133 * Ignore devices that have a domain number higher than what can
134 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
136 if (pci_domain_nr(dev->bus) > U16_MAX)
139 /* Only generate path[] for device addition event */
140 if (event == BUS_NOTIFY_ADD_DEVICE)
141 for (tmp = dev; tmp; tmp = tmp->bus->self)
144 size = struct_size(info, path, level);
145 if (size <= sizeof(dmar_pci_notify_info_buf)) {
146 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
148 info = kzalloc(size, GFP_KERNEL);
150 if (dmar_dev_scope_status == 0)
151 dmar_dev_scope_status = -ENOMEM;
158 info->seg = pci_domain_nr(dev->bus);
160 if (event == BUS_NOTIFY_ADD_DEVICE) {
161 for (tmp = dev; tmp; tmp = tmp->bus->self) {
163 info->path[level].bus = tmp->bus->number;
164 info->path[level].device = PCI_SLOT(tmp->devfn);
165 info->path[level].function = PCI_FUNC(tmp->devfn);
166 if (pci_is_root_bus(tmp->bus))
167 info->bus = tmp->bus->number;
174 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
176 if ((void *)info != dmar_pci_notify_info_buf)
180 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
181 struct acpi_dmar_pci_path *path, int count)
185 if (info->bus != bus)
187 if (info->level != count)
190 for (i = 0; i < count; i++) {
191 if (path[i].device != info->path[i].device ||
192 path[i].function != info->path[i].function)
204 if (bus == info->path[i].bus &&
205 path[0].device == info->path[i].device &&
206 path[0].function == info->path[i].function) {
207 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
208 bus, path[0].device, path[0].function);
215 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
216 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
217 void *start, void*end, u16 segment,
218 struct dmar_dev_scope *devices,
222 struct device *tmp, *dev = &info->dev->dev;
223 struct acpi_dmar_device_scope *scope;
224 struct acpi_dmar_pci_path *path;
226 if (segment != info->seg)
229 for (; start < end; start += scope->length) {
231 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
232 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
235 path = (struct acpi_dmar_pci_path *)(scope + 1);
236 level = (scope->length - sizeof(*scope)) / sizeof(*path);
237 if (!dmar_match_pci_path(info, scope->bus, path, level))
241 * We expect devices with endpoint scope to have normal PCI
242 * headers, and devices with bridge scope to have bridge PCI
243 * headers. However PCI NTB devices may be listed in the
244 * DMAR table with bridge scope, even though they have a
245 * normal PCI header. NTB devices are identified by class
246 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
247 * for this special case.
249 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
250 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
251 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
252 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
253 info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
254 pr_warn("Device scope type does not match for %s\n",
255 pci_name(info->dev));
259 for_each_dev_scope(devices, devices_cnt, i, tmp)
261 devices[i].bus = info->dev->bus->number;
262 devices[i].devfn = info->dev->devfn;
263 rcu_assign_pointer(devices[i].dev,
267 BUG_ON(i >= devices_cnt);
273 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
274 struct dmar_dev_scope *devices, int count)
279 if (info->seg != segment)
282 for_each_active_dev_scope(devices, count, index, tmp)
283 if (tmp == &info->dev->dev) {
284 RCU_INIT_POINTER(devices[index].dev, NULL);
293 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
296 struct dmar_drhd_unit *dmaru;
297 struct acpi_dmar_hardware_unit *drhd;
299 for_each_drhd_unit(dmaru) {
300 if (dmaru->include_all)
303 drhd = container_of(dmaru->hdr,
304 struct acpi_dmar_hardware_unit, header);
305 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
306 ((void *)drhd) + drhd->header.length,
308 dmaru->devices, dmaru->devices_cnt);
313 ret = dmar_iommu_notify_scope_dev(info);
314 if (ret < 0 && dmar_dev_scope_status == 0)
315 dmar_dev_scope_status = ret;
318 intel_irq_remap_add_device(info);
323 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
325 struct dmar_drhd_unit *dmaru;
327 for_each_drhd_unit(dmaru)
328 if (dmar_remove_dev_scope(info, dmaru->segment,
329 dmaru->devices, dmaru->devices_cnt))
331 dmar_iommu_notify_scope_dev(info);
334 static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
336 struct pci_dev *physfn = pci_physfn(pdev);
338 dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
341 static int dmar_pci_bus_notifier(struct notifier_block *nb,
342 unsigned long action, void *data)
344 struct pci_dev *pdev = to_pci_dev(data);
345 struct dmar_pci_notify_info *info;
347 /* Only care about add/remove events for physical functions.
348 * For VFs we actually do the lookup based on the corresponding
349 * PF in device_to_iommu() anyway. */
350 if (pdev->is_virtfn) {
352 * Ensure that the VF device inherits the irq domain of the
353 * PF device. Ideally the device would inherit the domain
354 * from the bus, but DMAR can have multiple units per bus
355 * which makes this impossible. The VF 'bus' could inherit
356 * from the PF device, but that's yet another x86'sism to
357 * inflict on everybody else.
359 if (action == BUS_NOTIFY_ADD_DEVICE)
360 vf_inherit_msi_domain(pdev);
364 if (action != BUS_NOTIFY_ADD_DEVICE &&
365 action != BUS_NOTIFY_REMOVED_DEVICE)
368 info = dmar_alloc_pci_notify_info(pdev, action);
372 down_write(&dmar_global_lock);
373 if (action == BUS_NOTIFY_ADD_DEVICE)
374 dmar_pci_bus_add_dev(info);
375 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
376 dmar_pci_bus_del_dev(info);
377 up_write(&dmar_global_lock);
379 dmar_free_pci_notify_info(info);
384 static struct notifier_block dmar_pci_bus_nb = {
385 .notifier_call = dmar_pci_bus_notifier,
389 static struct dmar_drhd_unit *
390 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
392 struct dmar_drhd_unit *dmaru;
394 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
396 if (dmaru->segment == drhd->segment &&
397 dmaru->reg_base_addr == drhd->address)
404 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
405 * structure which uniquely represent one DMA remapping hardware unit
406 * present in the platform
408 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
410 struct acpi_dmar_hardware_unit *drhd;
411 struct dmar_drhd_unit *dmaru;
414 drhd = (struct acpi_dmar_hardware_unit *)header;
415 dmaru = dmar_find_dmaru(drhd);
419 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
424 * If header is allocated from slab by ACPI _DSM method, we need to
425 * copy the content because the memory buffer will be freed on return.
427 dmaru->hdr = (void *)(dmaru + 1);
428 memcpy(dmaru->hdr, header, header->length);
429 dmaru->reg_base_addr = drhd->address;
430 dmaru->segment = drhd->segment;
431 /* The size of the register set is 2 ^ N 4 KB pages. */
432 dmaru->reg_size = 1UL << (drhd->size + 12);
433 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
434 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
435 ((void *)drhd) + drhd->header.length,
436 &dmaru->devices_cnt);
437 if (dmaru->devices_cnt && dmaru->devices == NULL) {
442 ret = alloc_iommu(dmaru);
444 dmar_free_dev_scope(&dmaru->devices,
445 &dmaru->devices_cnt);
449 dmar_register_drhd_unit(dmaru);
458 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
460 if (dmaru->devices && dmaru->devices_cnt)
461 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
463 free_iommu(dmaru->iommu);
467 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
470 struct acpi_dmar_andd *andd = (void *)header;
472 /* Check for NUL termination within the designated length */
473 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
475 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
476 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
477 dmi_get_system_info(DMI_BIOS_VENDOR),
478 dmi_get_system_info(DMI_BIOS_VERSION),
479 dmi_get_system_info(DMI_PRODUCT_VERSION));
480 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
483 pr_info("ANDD device: %x name: %s\n", andd->device_number,
489 #ifdef CONFIG_ACPI_NUMA
490 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
492 struct acpi_dmar_rhsa *rhsa;
493 struct dmar_drhd_unit *drhd;
495 rhsa = (struct acpi_dmar_rhsa *)header;
496 for_each_drhd_unit(drhd) {
497 if (drhd->reg_base_addr == rhsa->base_address) {
498 int node = pxm_to_node(rhsa->proximity_domain);
500 if (node != NUMA_NO_NODE && !node_online(node))
502 drhd->iommu->node = node;
507 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
508 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
510 dmi_get_system_info(DMI_BIOS_VENDOR),
511 dmi_get_system_info(DMI_BIOS_VERSION),
512 dmi_get_system_info(DMI_PRODUCT_VERSION));
513 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
518 #define dmar_parse_one_rhsa dmar_res_noop
522 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
524 struct acpi_dmar_hardware_unit *drhd;
525 struct acpi_dmar_reserved_memory *rmrr;
526 struct acpi_dmar_atsr *atsr;
527 struct acpi_dmar_rhsa *rhsa;
528 struct acpi_dmar_satc *satc;
530 switch (header->type) {
531 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
532 drhd = container_of(header, struct acpi_dmar_hardware_unit,
534 pr_info("DRHD base: %#016Lx flags: %#x\n",
535 (unsigned long long)drhd->address, drhd->flags);
537 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
538 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
540 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
541 (unsigned long long)rmrr->base_address,
542 (unsigned long long)rmrr->end_address);
544 case ACPI_DMAR_TYPE_ROOT_ATS:
545 atsr = container_of(header, struct acpi_dmar_atsr, header);
546 pr_info("ATSR flags: %#x\n", atsr->flags);
548 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
549 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
550 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
551 (unsigned long long)rhsa->base_address,
552 rhsa->proximity_domain);
554 case ACPI_DMAR_TYPE_NAMESPACE:
555 /* We don't print this here because we need to sanity-check
556 it first. So print it in dmar_parse_one_andd() instead. */
558 case ACPI_DMAR_TYPE_SATC:
559 satc = container_of(header, struct acpi_dmar_satc, header);
560 pr_info("SATC flags: 0x%x\n", satc->flags);
566 * dmar_table_detect - checks to see if the platform supports DMAR devices
568 static int __init dmar_table_detect(void)
570 acpi_status status = AE_OK;
572 /* if we could find DMAR table, then there are DMAR devices */
573 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
575 if (ACPI_SUCCESS(status) && !dmar_tbl) {
576 pr_warn("Unable to map DMAR\n");
577 status = AE_NOT_FOUND;
580 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
583 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
584 size_t len, struct dmar_res_callback *cb)
586 struct acpi_dmar_header *iter, *next;
587 struct acpi_dmar_header *end = ((void *)start) + len;
589 for (iter = start; iter < end; iter = next) {
590 next = (void *)iter + iter->length;
591 if (iter->length == 0) {
592 /* Avoid looping forever on bad ACPI tables */
593 pr_debug(FW_BUG "Invalid 0-length structure\n");
595 } else if (next > end) {
596 /* Avoid passing table end */
597 pr_warn(FW_BUG "Record passes table end\n");
602 dmar_table_print_dmar_entry(iter);
604 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
605 /* continue for forward compatibility */
606 pr_debug("Unknown DMAR structure type %d\n",
608 } else if (cb->cb[iter->type]) {
611 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
614 } else if (!cb->ignore_unhandled) {
615 pr_warn("No handler for DMAR structure type %d\n",
624 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
625 struct dmar_res_callback *cb)
627 return dmar_walk_remapping_entries((void *)(dmar + 1),
628 dmar->header.length - sizeof(*dmar), cb);
632 * parse_dmar_table - parses the DMA reporting table
635 parse_dmar_table(void)
637 struct acpi_table_dmar *dmar;
640 struct dmar_res_callback cb = {
642 .ignore_unhandled = true,
643 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
644 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
645 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
646 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
647 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
648 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
649 .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
653 * Do it again, earlier dmar_tbl mapping could be mapped with
659 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
660 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
662 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
664 dmar = (struct acpi_table_dmar *)dmar_tbl;
668 if (dmar->width < PAGE_SHIFT - 1) {
669 pr_warn("Invalid DMAR haw\n");
673 pr_info("Host address width %d\n", dmar->width + 1);
674 ret = dmar_walk_dmar_table(dmar, &cb);
675 if (ret == 0 && drhd_count == 0)
676 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
681 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
682 int cnt, struct pci_dev *dev)
688 for_each_active_dev_scope(devices, cnt, index, tmp)
689 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
692 /* Check our parent */
693 dev = dev->bus->self;
699 struct dmar_drhd_unit *
700 dmar_find_matched_drhd_unit(struct pci_dev *dev)
702 struct dmar_drhd_unit *dmaru;
703 struct acpi_dmar_hardware_unit *drhd;
705 dev = pci_physfn(dev);
708 for_each_drhd_unit(dmaru) {
709 drhd = container_of(dmaru->hdr,
710 struct acpi_dmar_hardware_unit,
713 if (dmaru->include_all &&
714 drhd->segment == pci_domain_nr(dev->bus))
717 if (dmar_pci_device_match(dmaru->devices,
718 dmaru->devices_cnt, dev))
728 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
729 struct acpi_device *adev)
731 struct dmar_drhd_unit *dmaru;
732 struct acpi_dmar_hardware_unit *drhd;
733 struct acpi_dmar_device_scope *scope;
736 struct acpi_dmar_pci_path *path;
738 for_each_drhd_unit(dmaru) {
739 drhd = container_of(dmaru->hdr,
740 struct acpi_dmar_hardware_unit,
743 for (scope = (void *)(drhd + 1);
744 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
745 scope = ((void *)scope) + scope->length) {
746 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
748 if (scope->enumeration_id != device_number)
751 path = (void *)(scope + 1);
752 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
753 dev_name(&adev->dev), dmaru->reg_base_addr,
754 scope->bus, path->device, path->function);
755 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
757 dmaru->devices[i].bus = scope->bus;
758 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
760 rcu_assign_pointer(dmaru->devices[i].dev,
761 get_device(&adev->dev));
764 BUG_ON(i >= dmaru->devices_cnt);
767 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
768 device_number, dev_name(&adev->dev));
771 static int __init dmar_acpi_dev_scope_init(void)
773 struct acpi_dmar_andd *andd;
775 if (dmar_tbl == NULL)
778 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
779 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
780 andd = ((void *)andd) + andd->header.length) {
781 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
783 struct acpi_device *adev;
785 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
788 pr_err("Failed to find handle for ACPI object %s\n",
792 adev = acpi_fetch_acpi_dev(h);
794 pr_err("Failed to get device for ACPI object %s\n",
798 dmar_acpi_insert_dev_scope(andd->device_number, adev);
804 int __init dmar_dev_scope_init(void)
806 struct pci_dev *dev = NULL;
807 struct dmar_pci_notify_info *info;
809 if (dmar_dev_scope_status != 1)
810 return dmar_dev_scope_status;
812 if (list_empty(&dmar_drhd_units)) {
813 dmar_dev_scope_status = -ENODEV;
815 dmar_dev_scope_status = 0;
817 dmar_acpi_dev_scope_init();
819 for_each_pci_dev(dev) {
823 info = dmar_alloc_pci_notify_info(dev,
824 BUS_NOTIFY_ADD_DEVICE);
827 return dmar_dev_scope_status;
829 dmar_pci_bus_add_dev(info);
830 dmar_free_pci_notify_info(info);
835 return dmar_dev_scope_status;
838 void __init dmar_register_bus_notifier(void)
840 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
844 int __init dmar_table_init(void)
846 static int dmar_table_initialized;
849 if (dmar_table_initialized == 0) {
850 ret = parse_dmar_table();
853 pr_info("Parse DMAR table failure.\n");
854 } else if (list_empty(&dmar_drhd_units)) {
855 pr_info("No DMAR devices found\n");
860 dmar_table_initialized = ret;
862 dmar_table_initialized = 1;
865 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
868 static void warn_invalid_dmar(u64 addr, const char *message)
871 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
872 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
874 dmi_get_system_info(DMI_BIOS_VENDOR),
875 dmi_get_system_info(DMI_BIOS_VERSION),
876 dmi_get_system_info(DMI_PRODUCT_VERSION));
877 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
881 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
883 struct acpi_dmar_hardware_unit *drhd;
887 drhd = (void *)entry;
888 if (!drhd->address) {
889 warn_invalid_dmar(0, "");
894 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
896 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
898 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
902 cap = dmar_readq(addr + DMAR_CAP_REG);
903 ecap = dmar_readq(addr + DMAR_ECAP_REG);
908 early_iounmap(addr, VTD_PAGE_SIZE);
910 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
911 warn_invalid_dmar(drhd->address, " returns all ones");
918 void __init detect_intel_iommu(void)
921 struct dmar_res_callback validate_drhd_cb = {
922 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
923 .ignore_unhandled = true,
926 down_write(&dmar_global_lock);
927 ret = dmar_table_detect();
929 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
931 if (!ret && !no_iommu && !iommu_detected &&
932 (!dmar_disabled || dmar_platform_optin())) {
934 /* Make sure ACS will be enabled */
940 x86_init.iommu.iommu_init = intel_iommu_init;
941 x86_platform.iommu_shutdown = intel_iommu_shutdown;
947 acpi_put_table(dmar_tbl);
950 up_write(&dmar_global_lock);
953 static void unmap_iommu(struct intel_iommu *iommu)
956 release_mem_region(iommu->reg_phys, iommu->reg_size);
960 * map_iommu: map the iommu's registers
961 * @iommu: the iommu to map
962 * @drhd: DMA remapping hardware definition structure
964 * Memory map the iommu's registers. Start w/ a single page, and
965 * possibly expand if that turns out to be insufficent.
967 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
969 u64 phys_addr = drhd->reg_base_addr;
972 iommu->reg_phys = phys_addr;
973 iommu->reg_size = drhd->reg_size;
975 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
976 pr_err("Can't reserve memory\n");
981 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
983 pr_err("Can't map the region\n");
988 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
989 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
991 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
993 warn_invalid_dmar(phys_addr, " returns all ones");
996 if (ecap_vcs(iommu->ecap))
997 iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
999 /* the registers might be more than one page */
1000 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
1001 cap_max_fault_reg_offset(iommu->cap));
1002 map_size = VTD_PAGE_ALIGN(map_size);
1003 if (map_size > iommu->reg_size) {
1004 iounmap(iommu->reg);
1005 release_mem_region(iommu->reg_phys, iommu->reg_size);
1006 iommu->reg_size = map_size;
1007 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1009 pr_err("Can't reserve memory\n");
1013 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1015 pr_err("Can't map the region\n");
1021 if (cap_ecmds(iommu->cap)) {
1024 for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) {
1025 iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
1026 i * DMA_ECMD_REG_STEP);
1034 iounmap(iommu->reg);
1036 release_mem_region(iommu->reg_phys, iommu->reg_size);
1041 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1043 struct intel_iommu *iommu;
1049 if (!drhd->reg_base_addr) {
1050 warn_invalid_dmar(0, "");
1054 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1058 iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1059 DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
1060 if (iommu->seq_id < 0) {
1061 pr_err("Failed to allocate seq_id\n");
1062 err = iommu->seq_id;
1065 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1067 err = map_iommu(iommu, drhd);
1069 pr_err("Failed to map %s\n", iommu->name);
1070 goto error_free_seq_id;
1074 if (!cap_sagaw(iommu->cap) &&
1075 (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1076 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1081 if (!drhd->ignored) {
1082 agaw = iommu_calculate_agaw(iommu);
1084 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1089 if (!drhd->ignored) {
1090 msagaw = iommu_calculate_max_sagaw(iommu);
1092 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1099 iommu->msagaw = msagaw;
1100 iommu->segment = drhd->segment;
1102 iommu->node = NUMA_NO_NODE;
1104 ver = readl(iommu->reg + DMAR_VER_REG);
1105 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1107 (unsigned long long)drhd->reg_base_addr,
1108 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1109 (unsigned long long)iommu->cap,
1110 (unsigned long long)iommu->ecap);
1112 /* Reflect status in gcmd */
1113 sts = readl(iommu->reg + DMAR_GSTS_REG);
1114 if (sts & DMA_GSTS_IRES)
1115 iommu->gcmd |= DMA_GCMD_IRE;
1116 if (sts & DMA_GSTS_TES)
1117 iommu->gcmd |= DMA_GCMD_TE;
1118 if (sts & DMA_GSTS_QIES)
1119 iommu->gcmd |= DMA_GCMD_QIE;
1121 if (alloc_iommu_pmu(iommu))
1122 pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1124 raw_spin_lock_init(&iommu->register_lock);
1127 * A value of N in PSS field of eCap register indicates hardware
1128 * supports PASID field of N+1 bits.
1130 if (pasid_supported(iommu))
1131 iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
1134 * This is only for hotplug; at boot time intel_iommu_enabled won't
1135 * be set yet. When intel_iommu_init() runs, it registers the units
1136 * present at boot time, then sets intel_iommu_enabled.
1138 if (intel_iommu_enabled && !drhd->ignored) {
1139 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1145 err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1149 iommu_pmu_register(iommu);
1152 drhd->iommu = iommu;
1158 iommu_device_sysfs_remove(&iommu->iommu);
1160 free_iommu_pmu(iommu);
1163 ida_free(&dmar_seq_ids, iommu->seq_id);
1169 static void free_iommu(struct intel_iommu *iommu)
1171 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1172 iommu_pmu_unregister(iommu);
1173 iommu_device_unregister(&iommu->iommu);
1174 iommu_device_sysfs_remove(&iommu->iommu);
1177 free_iommu_pmu(iommu);
1180 if (iommu->pr_irq) {
1181 free_irq(iommu->pr_irq, iommu);
1182 dmar_free_hwirq(iommu->pr_irq);
1185 free_irq(iommu->irq, iommu);
1186 dmar_free_hwirq(iommu->irq);
1191 free_page((unsigned long)iommu->qi->desc);
1192 kfree(iommu->qi->desc_status);
1199 ida_free(&dmar_seq_ids, iommu->seq_id);
1204 * Reclaim all the submitted descriptors which have completed its work.
1206 static inline void reclaim_free_desc(struct q_inval *qi)
1208 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1209 qi->desc_status[qi->free_tail] == QI_ABORT) {
1210 qi->desc_status[qi->free_tail] = QI_FREE;
1211 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1216 static const char *qi_type_string(u8 type)
1220 return "Context-cache Invalidation";
1222 return "IOTLB Invalidation";
1223 case QI_DIOTLB_TYPE:
1224 return "Device-TLB Invalidation";
1226 return "Interrupt Entry Cache Invalidation";
1228 return "Invalidation Wait";
1229 case QI_EIOTLB_TYPE:
1230 return "PASID-based IOTLB Invalidation";
1232 return "PASID-cache Invalidation";
1233 case QI_DEIOTLB_TYPE:
1234 return "PASID-based Device-TLB Invalidation";
1235 case QI_PGRP_RESP_TYPE:
1236 return "Page Group Response";
1242 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1244 unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
1245 u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1246 struct qi_desc *desc = iommu->qi->desc + head;
1248 if (fault & DMA_FSTS_IQE)
1249 pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
1250 DMAR_IQER_REG_IQEI(iqe_err));
1251 if (fault & DMA_FSTS_ITE)
1252 pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
1253 DMAR_IQER_REG_ITESID(iqe_err));
1254 if (fault & DMA_FSTS_ICE)
1255 pr_err("VT-d detected Invalidation Completion Error: SID %llx",
1256 DMAR_IQER_REG_ICESID(iqe_err));
1258 pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1259 qi_type_string(desc->qw0 & 0xf),
1260 (unsigned long long)desc->qw0,
1261 (unsigned long long)desc->qw1);
1263 head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1264 head <<= qi_shift(iommu);
1265 desc = iommu->qi->desc + head;
1267 pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1268 qi_type_string(desc->qw0 & 0xf),
1269 (unsigned long long)desc->qw0,
1270 (unsigned long long)desc->qw1);
1273 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1277 struct q_inval *qi = iommu->qi;
1278 int shift = qi_shift(iommu);
1280 if (qi->desc_status[wait_index] == QI_ABORT)
1283 fault = readl(iommu->reg + DMAR_FSTS_REG);
1284 if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
1285 qi_dump_fault(iommu, fault);
1288 * If IQE happens, the head points to the descriptor associated
1289 * with the error. No new descriptors are fetched until the IQE
1292 if (fault & DMA_FSTS_IQE) {
1293 head = readl(iommu->reg + DMAR_IQH_REG);
1294 if ((head >> shift) == index) {
1295 struct qi_desc *desc = qi->desc + head;
1298 * desc->qw2 and desc->qw3 are either reserved or
1299 * used by software as private data. We won't print
1300 * out these two qw's for security consideration.
1302 memcpy(desc, qi->desc + (wait_index << shift),
1304 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1305 pr_info("Invalidation Queue Error (IQE) cleared\n");
1311 * If ITE happens, all pending wait_desc commands are aborted.
1312 * No new descriptors are fetched until the ITE is cleared.
1314 if (fault & DMA_FSTS_ITE) {
1315 head = readl(iommu->reg + DMAR_IQH_REG);
1316 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1318 tail = readl(iommu->reg + DMAR_IQT_REG);
1319 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1321 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1322 pr_info("Invalidation Time-out Error (ITE) cleared\n");
1325 if (qi->desc_status[head] == QI_IN_USE)
1326 qi->desc_status[head] = QI_ABORT;
1327 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1328 } while (head != tail);
1330 if (qi->desc_status[wait_index] == QI_ABORT)
1334 if (fault & DMA_FSTS_ICE) {
1335 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1336 pr_info("Invalidation Completion Error (ICE) cleared\n");
1343 * Function to submit invalidation descriptors of all types to the queued
1344 * invalidation interface(QI). Multiple descriptors can be submitted at a
1345 * time, a wait descriptor will be appended to each submission to ensure
1346 * hardware has completed the invalidation before return. Wait descriptors
1347 * can be part of the submission but it will not be polled for completion.
1349 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1350 unsigned int count, unsigned long options)
1352 struct q_inval *qi = iommu->qi;
1353 s64 devtlb_start_ktime = 0;
1354 s64 iotlb_start_ktime = 0;
1355 s64 iec_start_ktime = 0;
1356 struct qi_desc wait_desc;
1357 int wait_index, index;
1358 unsigned long flags;
1366 type = desc->qw0 & GENMASK_ULL(3, 0);
1368 if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
1369 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1370 iotlb_start_ktime = ktime_to_ns(ktime_get());
1372 if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
1373 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1374 devtlb_start_ktime = ktime_to_ns(ktime_get());
1376 if (type == QI_IEC_TYPE &&
1377 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1378 iec_start_ktime = ktime_to_ns(ktime_get());
1383 raw_spin_lock_irqsave(&qi->q_lock, flags);
1385 * Check if we have enough empty slots in the queue to submit,
1386 * the calculation is based on:
1387 * # of desc + 1 wait desc + 1 space between head and tail
1389 while (qi->free_cnt < count + 2) {
1390 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1392 raw_spin_lock_irqsave(&qi->q_lock, flags);
1395 index = qi->free_head;
1396 wait_index = (index + count) % QI_LENGTH;
1397 shift = qi_shift(iommu);
1399 for (i = 0; i < count; i++) {
1400 offset = ((index + i) % QI_LENGTH) << shift;
1401 memcpy(qi->desc + offset, &desc[i], 1 << shift);
1402 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1403 trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1404 desc[i].qw2, desc[i].qw3);
1406 qi->desc_status[wait_index] = QI_IN_USE;
1408 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1409 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1410 if (options & QI_OPT_WAIT_DRAIN)
1411 wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1412 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1416 offset = wait_index << shift;
1417 memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1419 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1420 qi->free_cnt -= count + 1;
1423 * update the HW tail register indicating the presence of
1426 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1428 while (qi->desc_status[wait_index] != QI_DONE) {
1430 * We will leave the interrupts disabled, to prevent interrupt
1431 * context to queue another cmd while a cmd is already submitted
1432 * and waiting for completion on this cpu. This is to avoid
1433 * a deadlock where the interrupt context can wait indefinitely
1434 * for free slots in the queue.
1436 rc = qi_check_fault(iommu, index, wait_index);
1440 raw_spin_unlock(&qi->q_lock);
1442 raw_spin_lock(&qi->q_lock);
1445 for (i = 0; i < count; i++)
1446 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
1448 reclaim_free_desc(qi);
1449 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1454 if (iotlb_start_ktime)
1455 dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1456 ktime_to_ns(ktime_get()) - iotlb_start_ktime);
1458 if (devtlb_start_ktime)
1459 dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1460 ktime_to_ns(ktime_get()) - devtlb_start_ktime);
1462 if (iec_start_ktime)
1463 dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1464 ktime_to_ns(ktime_get()) - iec_start_ktime);
1470 * Flush the global interrupt entry cache.
1472 void qi_global_iec(struct intel_iommu *iommu)
1474 struct qi_desc desc;
1476 desc.qw0 = QI_IEC_TYPE;
1481 /* should never fail */
1482 qi_submit_sync(iommu, &desc, 1, 0);
1485 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1488 struct qi_desc desc;
1490 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1491 | QI_CC_GRAN(type) | QI_CC_TYPE;
1496 qi_submit_sync(iommu, &desc, 1, 0);
1499 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1500 unsigned int size_order, u64 type)
1504 struct qi_desc desc;
1507 if (cap_write_drain(iommu->cap))
1510 if (cap_read_drain(iommu->cap))
1513 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1514 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1515 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1516 | QI_IOTLB_AM(size_order);
1520 qi_submit_sync(iommu, &desc, 1, 0);
1523 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1524 u16 qdep, u64 addr, unsigned mask)
1526 struct qi_desc desc;
1529 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1530 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1532 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1534 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1537 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1538 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1542 qi_submit_sync(iommu, &desc, 1, 0);
1545 /* PASID-based IOTLB invalidation */
1546 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1547 unsigned long npages, bool ih)
1549 struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1552 * npages == -1 means a PASID-selective invalidation, otherwise,
1553 * a positive value for Page-selective-within-PASID invalidation.
1554 * 0 is not a valid input.
1556 if (WARN_ON(!npages)) {
1557 pr_err("Invalid input npages = %ld\n", npages);
1562 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1563 QI_EIOTLB_DID(did) |
1564 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1568 int mask = ilog2(__roundup_pow_of_two(npages));
1569 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1571 if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1572 addr = ALIGN_DOWN(addr, align);
1574 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1575 QI_EIOTLB_DID(did) |
1576 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1578 desc.qw1 = QI_EIOTLB_ADDR(addr) |
1583 qi_submit_sync(iommu, &desc, 1, 0);
1586 /* PASID-based device IOTLB Invalidate */
1587 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1588 u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
1590 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1591 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1593 desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1594 QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1595 QI_DEV_IOTLB_PFSID(pfsid);
1598 * If S bit is 0, we only flush a single page. If S bit is set,
1599 * The least significant zero bit indicates the invalidation address
1600 * range. VT-d spec 6.5.2.6.
1601 * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1602 * size order = 0 is PAGE_SIZE 4KB
1603 * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1606 if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1607 pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1610 /* Take page address */
1611 desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1615 * Existing 0s in address below size_order may be the least
1616 * significant bit, we must set them to 1s to avoid having
1617 * smaller size than desired.
1619 desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1621 /* Clear size_order bit to indicate size */
1623 /* Set the S bit to indicate flushing more than 1 page */
1624 desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1627 qi_submit_sync(iommu, &desc, 1, 0);
1630 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1631 u64 granu, u32 pasid)
1633 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1635 desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1636 QI_PC_GRAN(granu) | QI_PC_TYPE;
1637 qi_submit_sync(iommu, &desc, 1, 0);
1641 * Disable Queued Invalidation interface.
1643 void dmar_disable_qi(struct intel_iommu *iommu)
1645 unsigned long flags;
1647 cycles_t start_time = get_cycles();
1649 if (!ecap_qis(iommu->ecap))
1652 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1654 sts = readl(iommu->reg + DMAR_GSTS_REG);
1655 if (!(sts & DMA_GSTS_QIES))
1659 * Give a chance to HW to complete the pending invalidation requests.
1661 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1662 readl(iommu->reg + DMAR_IQH_REG)) &&
1663 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1666 iommu->gcmd &= ~DMA_GCMD_QIE;
1667 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1669 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1670 !(sts & DMA_GSTS_QIES), sts);
1672 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1676 * Enable queued invalidation.
1678 static void __dmar_enable_qi(struct intel_iommu *iommu)
1681 unsigned long flags;
1682 struct q_inval *qi = iommu->qi;
1683 u64 val = virt_to_phys(qi->desc);
1685 qi->free_head = qi->free_tail = 0;
1686 qi->free_cnt = QI_LENGTH;
1689 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1692 if (ecap_smts(iommu->ecap))
1693 val |= (1 << 11) | 1;
1695 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1697 /* write zero to the tail reg */
1698 writel(0, iommu->reg + DMAR_IQT_REG);
1700 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1702 iommu->gcmd |= DMA_GCMD_QIE;
1703 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1705 /* Make sure hardware complete it */
1706 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1708 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1712 * Enable Queued Invalidation interface. This is a must to support
1713 * interrupt-remapping. Also used by DMA-remapping, which replaces
1714 * register based IOTLB invalidation.
1716 int dmar_enable_qi(struct intel_iommu *iommu)
1719 struct page *desc_page;
1721 if (!ecap_qis(iommu->ecap))
1725 * queued invalidation is already setup and enabled.
1730 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1737 * Need two pages to accommodate 256 descriptors of 256 bits each
1738 * if the remapping hardware supports scalable mode translation.
1740 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1741 !!ecap_smts(iommu->ecap));
1748 qi->desc = page_address(desc_page);
1750 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1751 if (!qi->desc_status) {
1752 free_page((unsigned long) qi->desc);
1758 raw_spin_lock_init(&qi->q_lock);
1760 __dmar_enable_qi(iommu);
1765 /* iommu interrupt handling. Most stuff are MSI-like. */
1773 static const char *dma_remap_fault_reasons[] =
1776 "Present bit in root entry is clear",
1777 "Present bit in context entry is clear",
1778 "Invalid context entry",
1779 "Access beyond MGAW",
1780 "PTE Write access is not set",
1781 "PTE Read access is not set",
1782 "Next page table ptr is invalid",
1783 "Root table address invalid",
1784 "Context table ptr is invalid",
1785 "non-zero reserved fields in RTP",
1786 "non-zero reserved fields in CTP",
1787 "non-zero reserved fields in PTE",
1788 "PCE for translation request specifies blocking",
1791 static const char * const dma_remap_sm_fault_reasons[] = {
1792 "SM: Invalid Root Table Address",
1793 "SM: TTM 0 for request with PASID",
1794 "SM: TTM 0 for page group request",
1795 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1796 "SM: Error attempting to access Root Entry",
1797 "SM: Present bit in Root Entry is clear",
1798 "SM: Non-zero reserved field set in Root Entry",
1799 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1800 "SM: Error attempting to access Context Entry",
1801 "SM: Present bit in Context Entry is clear",
1802 "SM: Non-zero reserved field set in the Context Entry",
1803 "SM: Invalid Context Entry",
1804 "SM: DTE field in Context Entry is clear",
1805 "SM: PASID Enable field in Context Entry is clear",
1806 "SM: PASID is larger than the max in Context Entry",
1807 "SM: PRE field in Context-Entry is clear",
1808 "SM: RID_PASID field error in Context-Entry",
1809 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1810 "SM: Error attempting to access the PASID Directory Entry",
1811 "SM: Present bit in Directory Entry is clear",
1812 "SM: Non-zero reserved field set in PASID Directory Entry",
1813 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1814 "SM: Error attempting to access PASID Table Entry",
1815 "SM: Present bit in PASID Table Entry is clear",
1816 "SM: Non-zero reserved field set in PASID Table Entry",
1817 "SM: Invalid Scalable-Mode PASID Table Entry",
1818 "SM: ERE field is clear in PASID Table Entry",
1819 "SM: SRE field is clear in PASID Table Entry",
1820 "Unknown", "Unknown",/* 0x5E-0x5F */
1821 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1822 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1823 "SM: Error attempting to access first-level paging entry",
1824 "SM: Present bit in first-level paging entry is clear",
1825 "SM: Non-zero reserved field set in first-level paging entry",
1826 "SM: Error attempting to access FL-PML4 entry",
1827 "SM: First-level entry address beyond MGAW in Nested translation",
1828 "SM: Read permission error in FL-PML4 entry in Nested translation",
1829 "SM: Read permission error in first-level paging entry in Nested translation",
1830 "SM: Write permission error in first-level paging entry in Nested translation",
1831 "SM: Error attempting to access second-level paging entry",
1832 "SM: Read/Write permission error in second-level paging entry",
1833 "SM: Non-zero reserved field set in second-level paging entry",
1834 "SM: Invalid second-level page table pointer",
1835 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1836 "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1837 "SM: Address in first-level translation is not canonical",
1838 "SM: U/S set 0 for first-level translation with user privilege",
1839 "SM: No execute permission for request with PASID and ER=1",
1840 "SM: Address beyond the DMA hardware max",
1841 "SM: Second-level entry address beyond the max",
1842 "SM: No write permission for Write/AtomicOp request",
1843 "SM: No read permission for Read/AtomicOp request",
1844 "SM: Invalid address-interrupt address",
1845 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1846 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1849 static const char *irq_remap_fault_reasons[] =
1851 "Detected reserved fields in the decoded interrupt-remapped request",
1852 "Interrupt index exceeded the interrupt-remapping table size",
1853 "Present field in the IRTE entry is clear",
1854 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1855 "Detected reserved fields in the IRTE entry",
1856 "Blocked a compatibility format interrupt request",
1857 "Blocked an interrupt request due to source-id verification failure",
1860 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1862 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1863 ARRAY_SIZE(irq_remap_fault_reasons))) {
1864 *fault_type = INTR_REMAP;
1865 return irq_remap_fault_reasons[fault_reason - 0x20];
1866 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1867 ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1868 *fault_type = DMA_REMAP;
1869 return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1870 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1871 *fault_type = DMA_REMAP;
1872 return dma_remap_fault_reasons[fault_reason];
1874 *fault_type = UNKNOWN;
1880 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1882 if (iommu->irq == irq)
1883 return DMAR_FECTL_REG;
1884 else if (iommu->pr_irq == irq)
1885 return DMAR_PECTL_REG;
1886 else if (iommu->perf_irq == irq)
1887 return DMAR_PERFINTRCTL_REG;
1892 void dmar_msi_unmask(struct irq_data *data)
1894 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1895 int reg = dmar_msi_reg(iommu, data->irq);
1899 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1900 writel(0, iommu->reg + reg);
1901 /* Read a reg to force flush the post write */
1902 readl(iommu->reg + reg);
1903 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1906 void dmar_msi_mask(struct irq_data *data)
1908 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1909 int reg = dmar_msi_reg(iommu, data->irq);
1913 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1914 writel(DMA_FECTL_IM, iommu->reg + reg);
1915 /* Read a reg to force flush the post write */
1916 readl(iommu->reg + reg);
1917 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1920 void dmar_msi_write(int irq, struct msi_msg *msg)
1922 struct intel_iommu *iommu = irq_get_handler_data(irq);
1923 int reg = dmar_msi_reg(iommu, irq);
1926 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1927 writel(msg->data, iommu->reg + reg + 4);
1928 writel(msg->address_lo, iommu->reg + reg + 8);
1929 writel(msg->address_hi, iommu->reg + reg + 12);
1930 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1933 void dmar_msi_read(int irq, struct msi_msg *msg)
1935 struct intel_iommu *iommu = irq_get_handler_data(irq);
1936 int reg = dmar_msi_reg(iommu, irq);
1939 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1940 msg->data = readl(iommu->reg + reg + 4);
1941 msg->address_lo = readl(iommu->reg + reg + 8);
1942 msg->address_hi = readl(iommu->reg + reg + 12);
1943 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1946 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1947 u8 fault_reason, u32 pasid, u16 source_id,
1948 unsigned long long addr)
1953 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1955 if (fault_type == INTR_REMAP) {
1956 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
1957 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1958 PCI_FUNC(source_id & 0xFF), addr >> 48,
1959 fault_reason, reason);
1964 if (pasid == INVALID_IOASID)
1965 pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1966 type ? "DMA Read" : "DMA Write",
1967 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1968 PCI_FUNC(source_id & 0xFF), addr,
1969 fault_reason, reason);
1971 pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1972 type ? "DMA Read" : "DMA Write", pasid,
1973 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1974 PCI_FUNC(source_id & 0xFF), addr,
1975 fault_reason, reason);
1977 dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
1982 #define PRIMARY_FAULT_REG_LEN (16)
1983 irqreturn_t dmar_fault(int irq, void *dev_id)
1985 struct intel_iommu *iommu = dev_id;
1986 int reg, fault_index;
1989 static DEFINE_RATELIMIT_STATE(rs,
1990 DEFAULT_RATELIMIT_INTERVAL,
1991 DEFAULT_RATELIMIT_BURST);
1993 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1994 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1995 if (fault_status && __ratelimit(&rs))
1996 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1998 /* TBD: ignore advanced fault log currently */
1999 if (!(fault_status & DMA_FSTS_PPF))
2002 fault_index = dma_fsts_fault_record_index(fault_status);
2003 reg = cap_fault_reg_offset(iommu->cap);
2005 /* Disable printing, simply clear the fault when ratelimited */
2006 bool ratelimited = !__ratelimit(&rs);
2015 /* highest 32 bits */
2016 data = readl(iommu->reg + reg +
2017 fault_index * PRIMARY_FAULT_REG_LEN + 12);
2018 if (!(data & DMA_FRCD_F))
2022 fault_reason = dma_frcd_fault_reason(data);
2023 type = dma_frcd_type(data);
2025 pasid = dma_frcd_pasid_value(data);
2026 data = readl(iommu->reg + reg +
2027 fault_index * PRIMARY_FAULT_REG_LEN + 8);
2028 source_id = dma_frcd_source_id(data);
2030 pasid_present = dma_frcd_pasid_present(data);
2031 guest_addr = dmar_readq(iommu->reg + reg +
2032 fault_index * PRIMARY_FAULT_REG_LEN);
2033 guest_addr = dma_frcd_page_addr(guest_addr);
2036 /* clear the fault */
2037 writel(DMA_FRCD_F, iommu->reg + reg +
2038 fault_index * PRIMARY_FAULT_REG_LEN + 12);
2040 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2043 /* Using pasid -1 if pasid is not present */
2044 dmar_fault_do_one(iommu, type, fault_reason,
2045 pasid_present ? pasid : INVALID_IOASID,
2046 source_id, guest_addr);
2049 if (fault_index >= cap_num_fault_regs(iommu->cap))
2051 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2054 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
2055 iommu->reg + DMAR_FSTS_REG);
2058 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2062 int dmar_set_interrupt(struct intel_iommu *iommu)
2067 * Check if the fault interrupt is already initialized.
2072 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2076 pr_err("No free IRQ vectors\n");
2080 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2082 pr_err("Can't request irq\n");
2086 int __init enable_drhd_fault_handling(void)
2088 struct dmar_drhd_unit *drhd;
2089 struct intel_iommu *iommu;
2092 * Enable fault control interrupt.
2094 for_each_iommu(iommu, drhd) {
2096 int ret = dmar_set_interrupt(iommu);
2099 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
2100 (unsigned long long)drhd->reg_base_addr, ret);
2105 * Clear any previous faults.
2107 dmar_fault(iommu->irq, iommu);
2108 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2109 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2116 * Re-enable Queued Invalidation interface.
2118 int dmar_reenable_qi(struct intel_iommu *iommu)
2120 if (!ecap_qis(iommu->ecap))
2127 * First disable queued invalidation.
2129 dmar_disable_qi(iommu);
2131 * Then enable queued invalidation again. Since there is no pending
2132 * invalidation requests now, it's safe to re-enable queued
2135 __dmar_enable_qi(iommu);
2141 * Check interrupt remapping support in DMAR table description.
2143 int __init dmar_ir_support(void)
2145 struct acpi_table_dmar *dmar;
2146 dmar = (struct acpi_table_dmar *)dmar_tbl;
2149 return dmar->flags & 0x1;
2152 /* Check whether DMAR units are in use */
2153 static inline bool dmar_in_use(void)
2155 return irq_remapping_enabled || intel_iommu_enabled;
2158 static int __init dmar_free_unused_resources(void)
2160 struct dmar_drhd_unit *dmaru, *dmaru_n;
2165 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2166 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2168 down_write(&dmar_global_lock);
2169 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2170 list_del(&dmaru->list);
2171 dmar_free_drhd(dmaru);
2173 up_write(&dmar_global_lock);
2178 late_initcall(dmar_free_unused_resources);
2181 * DMAR Hotplug Support
2182 * For more details, please refer to Intel(R) Virtualization Technology
2183 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2184 * "Remapping Hardware Unit Hot Plug".
2186 static guid_t dmar_hp_guid =
2187 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2188 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2191 * Currently there's only one revision and BIOS will not check the revision id,
2192 * so use 0 for safety.
2194 #define DMAR_DSM_REV_ID 0
2195 #define DMAR_DSM_FUNC_DRHD 1
2196 #define DMAR_DSM_FUNC_ATSR 2
2197 #define DMAR_DSM_FUNC_RHSA 3
2198 #define DMAR_DSM_FUNC_SATC 4
2200 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2202 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2205 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2206 dmar_res_handler_t handler, void *arg)
2209 union acpi_object *obj;
2210 struct acpi_dmar_header *start;
2211 struct dmar_res_callback callback;
2212 static int res_type[] = {
2213 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2214 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2215 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2216 [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
2219 if (!dmar_detect_dsm(handle, func))
2222 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2223 func, NULL, ACPI_TYPE_BUFFER);
2227 memset(&callback, 0, sizeof(callback));
2228 callback.cb[res_type[func]] = handler;
2229 callback.arg[res_type[func]] = arg;
2230 start = (struct acpi_dmar_header *)obj->buffer.pointer;
2231 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2238 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2241 struct dmar_drhd_unit *dmaru;
2243 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2247 ret = dmar_ir_hotplug(dmaru, true);
2249 ret = dmar_iommu_hotplug(dmaru, true);
2254 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2258 struct dmar_drhd_unit *dmaru;
2260 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2265 * All PCI devices managed by this unit should have been destroyed.
2267 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2268 for_each_active_dev_scope(dmaru->devices,
2269 dmaru->devices_cnt, i, dev)
2273 ret = dmar_ir_hotplug(dmaru, false);
2275 ret = dmar_iommu_hotplug(dmaru, false);
2280 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2282 struct dmar_drhd_unit *dmaru;
2284 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2286 list_del_rcu(&dmaru->list);
2288 dmar_free_drhd(dmaru);
2294 static int dmar_hotplug_insert(acpi_handle handle)
2299 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2300 &dmar_validate_one_drhd, (void *)1);
2304 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2305 &dmar_parse_one_drhd, (void *)&drhd_count);
2306 if (ret == 0 && drhd_count == 0) {
2307 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2313 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2314 &dmar_parse_one_rhsa, NULL);
2318 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2319 &dmar_parse_one_atsr, NULL);
2323 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2324 &dmar_hp_add_drhd, NULL);
2328 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2329 &dmar_hp_remove_drhd, NULL);
2331 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2332 &dmar_release_one_atsr, NULL);
2334 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2335 &dmar_hp_release_drhd, NULL);
2340 static int dmar_hotplug_remove(acpi_handle handle)
2344 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2345 &dmar_check_one_atsr, NULL);
2349 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2350 &dmar_hp_remove_drhd, NULL);
2352 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2353 &dmar_release_one_atsr, NULL));
2354 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2355 &dmar_hp_release_drhd, NULL));
2357 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2358 &dmar_hp_add_drhd, NULL);
2364 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2365 void *context, void **retval)
2367 acpi_handle *phdl = retval;
2369 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2371 return AE_CTRL_TERMINATE;
2377 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2380 acpi_handle tmp = NULL;
2386 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2389 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2391 dmar_get_dsm_handle,
2393 if (ACPI_FAILURE(status)) {
2394 pr_warn("Failed to locate _DSM method.\n");
2401 down_write(&dmar_global_lock);
2403 ret = dmar_hotplug_insert(tmp);
2405 ret = dmar_hotplug_remove(tmp);
2406 up_write(&dmar_global_lock);
2411 int dmar_device_add(acpi_handle handle)
2413 return dmar_device_hotplug(handle, true);
2416 int dmar_device_remove(acpi_handle handle)
2418 return dmar_device_hotplug(handle, false);
2422 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2424 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2425 * the ACPI DMAR table. This means that the platform boot firmware has made
2426 * sure no device can issue DMA outside of RMRR regions.
2428 bool dmar_platform_optin(void)
2430 struct acpi_table_dmar *dmar;
2434 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2435 (struct acpi_table_header **)&dmar);
2436 if (ACPI_FAILURE(status))
2439 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2440 acpi_put_table((struct acpi_table_header *)dmar);
2444 EXPORT_SYMBOL_GPL(dmar_platform_optin);