2 * Copyright (C) 2016, Semihalf
3 * Author: Tomasz Nowicki <tn@semihalf.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * This file implements early detection/parsing of I/O mapping
15 * reported to OS through firmware via I/O Remapping Table (IORT)
16 * IORT document number: ARM DEN 0049A
19 #define pr_fmt(fmt) "ACPI: IORT: " fmt
21 #include <linux/acpi_iort.h>
22 #include <linux/iommu.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/pci.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
29 #define IORT_TYPE_MASK(type) (1 << (type))
30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
32 struct iort_its_msi_chip {
33 struct list_head list;
34 struct fwnode_handle *fw_node;
39 struct list_head list;
40 struct acpi_iort_node *iort_node;
41 struct fwnode_handle *fwnode;
43 static LIST_HEAD(iort_fwnode_list);
44 static DEFINE_SPINLOCK(iort_fwnode_lock);
47 * iort_set_fwnode() - Create iort_fwnode and use it to register
48 * iommu data in the iort_fwnode_list
50 * @node: IORT table node associated with the IOMMU
51 * @fwnode: fwnode associated with the IORT node
53 * Returns: 0 on success
56 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
57 struct fwnode_handle *fwnode)
59 struct iort_fwnode *np;
61 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
66 INIT_LIST_HEAD(&np->list);
67 np->iort_node = iort_node;
70 spin_lock(&iort_fwnode_lock);
71 list_add_tail(&np->list, &iort_fwnode_list);
72 spin_unlock(&iort_fwnode_lock);
78 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
80 * @node: IORT table node to be looked-up
82 * Returns: fwnode_handle pointer on success, NULL on failure
85 struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
87 struct iort_fwnode *curr;
88 struct fwnode_handle *fwnode = NULL;
90 spin_lock(&iort_fwnode_lock);
91 list_for_each_entry(curr, &iort_fwnode_list, list) {
92 if (curr->iort_node == node) {
93 fwnode = curr->fwnode;
97 spin_unlock(&iort_fwnode_lock);
103 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
105 * @node: IORT table node associated with fwnode to delete
107 static inline void iort_delete_fwnode(struct acpi_iort_node *node)
109 struct iort_fwnode *curr, *tmp;
111 spin_lock(&iort_fwnode_lock);
112 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
113 if (curr->iort_node == node) {
114 list_del(&curr->list);
119 spin_unlock(&iort_fwnode_lock);
122 typedef acpi_status (*iort_find_node_callback)
123 (struct acpi_iort_node *node, void *context);
125 /* Root pointer to the mapped IORT table */
126 static struct acpi_table_header *iort_table;
128 static LIST_HEAD(iort_msi_chip_list);
129 static DEFINE_SPINLOCK(iort_msi_chip_lock);
132 * iort_register_domain_token() - register domain token and related ITS ID
133 * to the list from where we can get it back later on.
135 * @fw_node: Domain token.
137 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
139 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
141 struct iort_its_msi_chip *its_msi_chip;
143 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
147 its_msi_chip->fw_node = fw_node;
148 its_msi_chip->translation_id = trans_id;
150 spin_lock(&iort_msi_chip_lock);
151 list_add(&its_msi_chip->list, &iort_msi_chip_list);
152 spin_unlock(&iort_msi_chip_lock);
158 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
163 void iort_deregister_domain_token(int trans_id)
165 struct iort_its_msi_chip *its_msi_chip, *t;
167 spin_lock(&iort_msi_chip_lock);
168 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
169 if (its_msi_chip->translation_id == trans_id) {
170 list_del(&its_msi_chip->list);
175 spin_unlock(&iort_msi_chip_lock);
179 * iort_find_domain_token() - Find domain token based on given ITS ID
182 * Returns: domain token when find on the list, NULL otherwise
184 struct fwnode_handle *iort_find_domain_token(int trans_id)
186 struct fwnode_handle *fw_node = NULL;
187 struct iort_its_msi_chip *its_msi_chip;
189 spin_lock(&iort_msi_chip_lock);
190 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
191 if (its_msi_chip->translation_id == trans_id) {
192 fw_node = its_msi_chip->fw_node;
196 spin_unlock(&iort_msi_chip_lock);
201 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
202 iort_find_node_callback callback,
205 struct acpi_iort_node *iort_node, *iort_end;
206 struct acpi_table_iort *iort;
212 /* Get the first IORT node */
213 iort = (struct acpi_table_iort *)iort_table;
214 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
216 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
219 for (i = 0; i < iort->node_count; i++) {
220 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
221 "IORT node pointer overflows, bad table!\n"))
224 if (iort_node->type == type &&
225 ACPI_SUCCESS(callback(iort_node, context)))
228 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
236 iort_match_type_callback(struct acpi_iort_node *node, void *context)
241 bool iort_node_match(u8 type)
243 struct acpi_iort_node *node;
245 node = iort_scan_node(type, iort_match_type_callback, NULL);
250 static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
253 struct device *dev = context;
256 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
257 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
258 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
259 struct acpi_iort_named_component *ncomp;
262 status = AE_NOT_FOUND;
266 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
267 if (ACPI_FAILURE(status)) {
268 dev_warn(dev, "Can't get device full path name\n");
272 ncomp = (struct acpi_iort_named_component *)node->node_data;
273 status = !strcmp(ncomp->device_name, buf.pointer) ?
274 AE_OK : AE_NOT_FOUND;
275 acpi_os_free(buf.pointer);
276 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
277 struct acpi_iort_root_complex *pci_rc;
280 bus = to_pci_bus(dev);
281 pci_rc = (struct acpi_iort_root_complex *)node->node_data;
284 * It is assumed that PCI segment numbers maps one-to-one
285 * with root complexes. Each segment number can represent only
288 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
289 AE_OK : AE_NOT_FOUND;
291 status = AE_NOT_FOUND;
297 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
300 /* Single mapping does not care for input id */
301 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
302 if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
303 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
304 *rid_out = map->output_base;
308 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
313 if (rid_in < map->input_base ||
314 (rid_in >= map->input_base + map->id_count))
317 *rid_out = map->output_base + (rid_in - map->input_base);
322 struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
323 u32 *id_out, u8 type_mask,
326 struct acpi_iort_node *parent;
327 struct acpi_iort_id_mapping *map;
329 if (!node->mapping_offset || !node->mapping_count ||
330 index >= node->mapping_count)
333 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
334 node->mapping_offset);
337 if (!map->output_reference) {
338 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
343 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
344 map->output_reference);
346 if (!(IORT_TYPE_MASK(parent->type) & type_mask))
349 if (map[index].flags & ACPI_IORT_ID_SINGLE_MAPPING) {
350 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
351 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
352 *id_out = map[index].output_base;
360 static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
361 u32 rid_in, u32 *rid_out,
366 /* Parse the ID mapping tree to find specified node type */
368 struct acpi_iort_id_mapping *map;
371 if (IORT_TYPE_MASK(node->type) & type_mask) {
377 if (!node->mapping_offset || !node->mapping_count)
380 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
381 node->mapping_offset);
384 if (!map->output_reference) {
385 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
390 /* Do the RID translation */
391 for (i = 0; i < node->mapping_count; i++, map++) {
392 if (!iort_id_map(map, node->type, rid, &rid))
396 if (i == node->mapping_count)
399 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
400 map->output_reference);
404 /* Map input RID to output RID unchanged on mapping failure*/
411 static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
413 struct pci_bus *pbus;
415 if (!dev_is_pci(dev))
416 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
417 iort_match_node_callback, dev);
419 /* Find a PCI root bus */
420 pbus = to_pci_dev(dev)->bus;
421 while (!pci_is_root_bus(pbus))
424 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
425 iort_match_node_callback, &pbus->dev);
429 * iort_msi_map_rid() - Map a MSI requester ID for a device
430 * @dev: The device for which the mapping is to be done.
431 * @req_id: The device requester ID.
433 * Returns: mapped MSI RID on success, input requester ID otherwise
435 u32 iort_msi_map_rid(struct device *dev, u32 req_id)
437 struct acpi_iort_node *node;
440 node = iort_find_dev_node(dev);
444 iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
449 * iort_dev_find_its_id() - Find the ITS identifier for a device
451 * @idx: Index of the ITS identifier list.
452 * @its_id: ITS identifier.
454 * Returns: 0 on success, appropriate error value otherwise
456 static int iort_dev_find_its_id(struct device *dev, u32 req_id,
457 unsigned int idx, int *its_id)
459 struct acpi_iort_its_group *its;
460 struct acpi_iort_node *node;
462 node = iort_find_dev_node(dev);
466 node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
470 /* Move to ITS specific data */
471 its = (struct acpi_iort_its_group *)node->node_data;
472 if (idx > its->its_count) {
473 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
474 idx, its->its_count);
478 *its_id = its->identifiers[idx];
483 * iort_get_device_domain() - Find MSI domain related to a device
485 * @req_id: Requester ID for the device.
487 * Returns: the MSI domain for this device, NULL otherwise
489 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
491 struct fwnode_handle *handle;
494 if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
497 handle = iort_find_domain_token(its_id);
501 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
504 static void __init acpi_iort_register_irq(int hwirq, const char *name,
506 struct resource *res)
508 int irq = acpi_register_gsi(NULL, hwirq, trigger,
512 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
519 res->flags = IORESOURCE_IRQ;
523 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
525 struct acpi_iort_smmu_v3 *smmu;
526 /* Always present mem resource */
529 /* Retrieve SMMUv3 specific data */
530 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
532 if (smmu->event_gsiv)
547 static void __init arm_smmu_v3_init_resources(struct resource *res,
548 struct acpi_iort_node *node)
550 struct acpi_iort_smmu_v3 *smmu;
553 /* Retrieve SMMUv3 specific data */
554 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
556 res[num_res].start = smmu->base_address;
557 res[num_res].end = smmu->base_address + SZ_128K - 1;
558 res[num_res].flags = IORESOURCE_MEM;
562 if (smmu->event_gsiv)
563 acpi_iort_register_irq(smmu->event_gsiv, "eventq",
568 acpi_iort_register_irq(smmu->pri_gsiv, "priq",
573 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
578 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
583 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
585 struct acpi_iort_smmu_v3 *smmu;
587 /* Retrieve SMMUv3 specific data */
588 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
590 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
593 static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
595 struct acpi_iort_smmu *smmu;
597 /* Retrieve SMMU specific data */
598 smmu = (struct acpi_iort_smmu *)node->node_data;
601 * Only consider the global fault interrupt and ignore the
602 * configuration access interrupt.
604 * MMIO address and global fault interrupt resources are always
605 * present so add them to the context interrupt count as a static
608 return smmu->context_interrupt_count + 2;
611 static void __init arm_smmu_init_resources(struct resource *res,
612 struct acpi_iort_node *node)
614 struct acpi_iort_smmu *smmu;
615 int i, hw_irq, trigger, num_res = 0;
616 u64 *ctx_irq, *glb_irq;
618 /* Retrieve SMMU specific data */
619 smmu = (struct acpi_iort_smmu *)node->node_data;
621 res[num_res].start = smmu->base_address;
622 res[num_res].end = smmu->base_address + smmu->span - 1;
623 res[num_res].flags = IORESOURCE_MEM;
626 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
628 hw_irq = IORT_IRQ_MASK(glb_irq[0]);
629 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
631 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
635 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
636 for (i = 0; i < smmu->context_interrupt_count; i++) {
637 hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
638 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
640 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
645 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
647 struct acpi_iort_smmu *smmu;
649 /* Retrieve SMMU specific data */
650 smmu = (struct acpi_iort_smmu *)node->node_data;
652 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
655 struct iort_iommu_config {
657 int (*iommu_init)(struct acpi_iort_node *node);
658 bool (*iommu_is_coherent)(struct acpi_iort_node *node);
659 int (*iommu_count_resources)(struct acpi_iort_node *node);
660 void (*iommu_init_resources)(struct resource *res,
661 struct acpi_iort_node *node);
664 static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
665 .name = "arm-smmu-v3",
666 .iommu_is_coherent = arm_smmu_v3_is_coherent,
667 .iommu_count_resources = arm_smmu_v3_count_resources,
668 .iommu_init_resources = arm_smmu_v3_init_resources
671 static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
673 .iommu_is_coherent = arm_smmu_is_coherent,
674 .iommu_count_resources = arm_smmu_count_resources,
675 .iommu_init_resources = arm_smmu_init_resources
679 const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
681 switch (node->type) {
682 case ACPI_IORT_NODE_SMMU_V3:
683 return &iort_arm_smmu_v3_cfg;
684 case ACPI_IORT_NODE_SMMU:
685 return &iort_arm_smmu_cfg;
692 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
693 * @node: Pointer to SMMU ACPI IORT node
695 * Returns: 0 on success, <0 failure
697 static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
699 struct fwnode_handle *fwnode;
700 struct platform_device *pdev;
702 enum dev_dma_attr attr;
704 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
709 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
711 return PTR_ERR(pdev);
713 count = ops->iommu_count_resources(node);
715 r = kcalloc(count, sizeof(*r), GFP_KERNEL);
721 ops->iommu_init_resources(r, node);
723 ret = platform_device_add_resources(pdev, r, count);
725 * Resources are duplicated in platform_device_add_resources,
726 * free their allocated memory
734 * Add a copy of IORT node pointer to platform_data to
735 * be used to retrieve IORT data information.
737 ret = platform_device_add_data(pdev, &node, sizeof(node));
742 * We expect the dma masks to be equivalent for
745 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
747 fwnode = iort_get_fwnode(node);
754 pdev->dev.fwnode = fwnode;
756 attr = ops->iommu_is_coherent(node) ?
757 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
759 /* Configure DMA for the page table walker */
760 acpi_dma_configure(&pdev->dev, attr);
762 ret = platform_device_add(pdev);
764 goto dma_deconfigure;
769 acpi_dma_deconfigure(&pdev->dev);
771 platform_device_put(pdev);
776 static void __init iort_init_platform_devices(void)
778 struct acpi_iort_node *iort_node, *iort_end;
779 struct acpi_table_iort *iort;
780 struct fwnode_handle *fwnode;
784 * iort_table and iort both point to the start of IORT table, but
785 * have different struct types
787 iort = (struct acpi_table_iort *)iort_table;
789 /* Get the first IORT node */
790 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
792 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
795 for (i = 0; i < iort->node_count; i++) {
796 if (iort_node >= iort_end) {
797 pr_err("iort node pointer overflows, bad table\n");
801 if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
802 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
804 fwnode = acpi_alloc_fwnode_static();
808 iort_set_fwnode(iort_node, fwnode);
810 ret = iort_add_smmu_platform_device(iort_node);
812 iort_delete_fwnode(iort_node);
813 acpi_free_fwnode_static(fwnode);
818 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
823 void __init acpi_iort_init(void)
827 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
828 if (ACPI_FAILURE(status)) {
829 if (status != AE_NOT_FOUND) {
830 const char *msg = acpi_format_exception(status);
832 pr_err("Failed to get table, %s\n", msg);
838 iort_init_platform_devices();
840 acpi_probe_device_table(iort);