2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/acpi.h>
19 #include <linux/acpi_iort.h>
20 #include <linux/bitmap.h>
21 #include <linux/cpu.h>
22 #include <linux/delay.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/log2.h>
28 #include <linux/msi.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_pci.h>
33 #include <linux/of_platform.h>
34 #include <linux/percpu.h>
35 #include <linux/slab.h>
37 #include <linux/irqchip.h>
38 #include <linux/irqchip/arm-gic-v3.h>
40 #include <asm/cputype.h>
41 #include <asm/exception.h>
43 #include "irq-gic-common.h"
45 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
46 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
47 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
49 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
51 static u32 lpi_id_bits;
54 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
55 * deal with (one configuration byte per interrupt). PENDBASE has to
56 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
58 #define LPI_NRBITS lpi_id_bits
59 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
60 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62 #define LPI_PROP_DEFAULT_PRIO 0xa0
65 * Collection structure - just an ID, and a redistributor address to
66 * ping. We use one per CPU as a bag of interrupts assigned to this
69 struct its_collection {
75 * The ITS_BASER structure - contains memory information, cached
76 * value of BASER register configuration and ITS page size.
86 * The ITS structure - contains most of the infrastructure, with the
87 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
92 struct list_head entry;
94 phys_addr_t phys_base;
95 struct its_cmd_block *cmd_base;
96 struct its_cmd_block *cmd_write;
97 struct its_baser tables[GITS_BASER_NR_REGS];
98 struct its_collection *collections;
99 struct list_head its_device_list;
107 #define ITS_ITT_ALIGN SZ_256
109 /* Convert page order to size in bytes */
110 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
112 struct event_lpi_map {
113 unsigned long *lpi_map;
115 irq_hw_number_t lpi_base;
120 * The ITS view of a device - belongs to an ITS, a collection, owns an
121 * interrupt translation table, and a list of interrupts.
124 struct list_head entry;
125 struct its_node *its;
126 struct event_lpi_map event_map;
132 static LIST_HEAD(its_nodes);
133 static DEFINE_SPINLOCK(its_lock);
134 static struct rdists *gic_rdists;
135 static struct irq_domain *its_parent;
138 * We have a maximum number of 16 ITSs in the whole system if we're
139 * using the ITSList mechanism
141 #define ITS_LIST_MAX 16
143 static unsigned long its_list_map;
145 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
146 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
148 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
151 struct its_node *its = its_dev->its;
153 return its->collections + its_dev->event_map.col_map[event];
157 * ITS command descriptors - parameters to be encoded in a command
160 struct its_cmd_desc {
163 struct its_device *dev;
168 struct its_device *dev;
173 struct its_device *dev;
178 struct its_collection *col;
183 struct its_device *dev;
189 struct its_device *dev;
190 struct its_collection *col;
195 struct its_device *dev;
200 struct its_collection *col;
206 * The ITS command block, which is what the ITS actually parses.
208 struct its_cmd_block {
212 #define ITS_CMD_QUEUE_SZ SZ_64K
213 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
215 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
216 struct its_cmd_desc *);
218 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
220 u64 mask = GENMASK_ULL(h, l);
222 *raw_cmd |= (val << l) & mask;
225 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
227 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
230 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
232 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
235 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
237 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
240 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
242 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
245 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
247 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
250 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
252 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
255 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
257 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
260 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
262 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
265 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
267 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
270 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
272 /* Let's fixup BE commands */
273 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
274 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
275 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
276 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
279 static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
280 struct its_cmd_desc *desc)
282 unsigned long itt_addr;
283 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
285 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
286 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
288 its_encode_cmd(cmd, GITS_CMD_MAPD);
289 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
290 its_encode_size(cmd, size - 1);
291 its_encode_itt(cmd, itt_addr);
292 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
299 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
300 struct its_cmd_desc *desc)
302 its_encode_cmd(cmd, GITS_CMD_MAPC);
303 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
304 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
305 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
309 return desc->its_mapc_cmd.col;
312 static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
313 struct its_cmd_desc *desc)
315 struct its_collection *col;
317 col = dev_event_to_col(desc->its_mapti_cmd.dev,
318 desc->its_mapti_cmd.event_id);
320 its_encode_cmd(cmd, GITS_CMD_MAPTI);
321 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
322 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
323 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
324 its_encode_collection(cmd, col->col_id);
331 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
332 struct its_cmd_desc *desc)
334 struct its_collection *col;
336 col = dev_event_to_col(desc->its_movi_cmd.dev,
337 desc->its_movi_cmd.event_id);
339 its_encode_cmd(cmd, GITS_CMD_MOVI);
340 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
341 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
342 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
349 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
350 struct its_cmd_desc *desc)
352 struct its_collection *col;
354 col = dev_event_to_col(desc->its_discard_cmd.dev,
355 desc->its_discard_cmd.event_id);
357 its_encode_cmd(cmd, GITS_CMD_DISCARD);
358 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
359 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
366 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
367 struct its_cmd_desc *desc)
369 struct its_collection *col;
371 col = dev_event_to_col(desc->its_inv_cmd.dev,
372 desc->its_inv_cmd.event_id);
374 its_encode_cmd(cmd, GITS_CMD_INV);
375 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
376 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
383 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
384 struct its_cmd_desc *desc)
386 its_encode_cmd(cmd, GITS_CMD_INVALL);
387 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
394 static u64 its_cmd_ptr_to_offset(struct its_node *its,
395 struct its_cmd_block *ptr)
397 return (ptr - its->cmd_base) * sizeof(*ptr);
400 static int its_queue_full(struct its_node *its)
405 widx = its->cmd_write - its->cmd_base;
406 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
408 /* This is incredibly unlikely to happen, unless the ITS locks up. */
409 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
415 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
417 struct its_cmd_block *cmd;
418 u32 count = 1000000; /* 1s! */
420 while (its_queue_full(its)) {
423 pr_err_ratelimited("ITS queue not draining\n");
430 cmd = its->cmd_write++;
432 /* Handle queue wrapping */
433 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
434 its->cmd_write = its->cmd_base;
445 static struct its_cmd_block *its_post_commands(struct its_node *its)
447 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
449 writel_relaxed(wr, its->base + GITS_CWRITER);
451 return its->cmd_write;
454 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
457 * Make sure the commands written to memory are observable by
460 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
461 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
466 static void its_wait_for_range_completion(struct its_node *its,
467 struct its_cmd_block *from,
468 struct its_cmd_block *to)
470 u64 rd_idx, from_idx, to_idx;
471 u32 count = 1000000; /* 1s! */
473 from_idx = its_cmd_ptr_to_offset(its, from);
474 to_idx = its_cmd_ptr_to_offset(its, to);
477 rd_idx = readl_relaxed(its->base + GITS_CREADR);
480 if (from_idx < to_idx && rd_idx >= to_idx)
484 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
489 pr_err_ratelimited("ITS queue timeout\n");
497 /* Warning, macro hell follows */
498 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
499 void name(struct its_node *its, \
501 struct its_cmd_desc *desc) \
503 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
504 synctype *sync_obj; \
505 unsigned long flags; \
507 raw_spin_lock_irqsave(&its->lock, flags); \
509 cmd = its_allocate_entry(its); \
510 if (!cmd) { /* We're soooooo screewed... */ \
511 raw_spin_unlock_irqrestore(&its->lock, flags); \
514 sync_obj = builder(cmd, desc); \
515 its_flush_cmd(its, cmd); \
518 sync_cmd = its_allocate_entry(its); \
522 buildfn(sync_cmd, sync_obj); \
523 its_flush_cmd(its, sync_cmd); \
527 next_cmd = its_post_commands(its); \
528 raw_spin_unlock_irqrestore(&its->lock, flags); \
530 its_wait_for_range_completion(its, cmd, next_cmd); \
533 static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
534 struct its_collection *sync_col)
536 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
537 its_encode_target(sync_cmd, sync_col->target_address);
539 its_fixup_cmd(sync_cmd);
542 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
543 struct its_collection, its_build_sync_cmd)
545 static void its_send_inv(struct its_device *dev, u32 event_id)
547 struct its_cmd_desc desc;
549 desc.its_inv_cmd.dev = dev;
550 desc.its_inv_cmd.event_id = event_id;
552 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
555 static void its_send_mapd(struct its_device *dev, int valid)
557 struct its_cmd_desc desc;
559 desc.its_mapd_cmd.dev = dev;
560 desc.its_mapd_cmd.valid = !!valid;
562 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
565 static void its_send_mapc(struct its_node *its, struct its_collection *col,
568 struct its_cmd_desc desc;
570 desc.its_mapc_cmd.col = col;
571 desc.its_mapc_cmd.valid = !!valid;
573 its_send_single_command(its, its_build_mapc_cmd, &desc);
576 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
578 struct its_cmd_desc desc;
580 desc.its_mapti_cmd.dev = dev;
581 desc.its_mapti_cmd.phys_id = irq_id;
582 desc.its_mapti_cmd.event_id = id;
584 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
587 static void its_send_movi(struct its_device *dev,
588 struct its_collection *col, u32 id)
590 struct its_cmd_desc desc;
592 desc.its_movi_cmd.dev = dev;
593 desc.its_movi_cmd.col = col;
594 desc.its_movi_cmd.event_id = id;
596 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
599 static void its_send_discard(struct its_device *dev, u32 id)
601 struct its_cmd_desc desc;
603 desc.its_discard_cmd.dev = dev;
604 desc.its_discard_cmd.event_id = id;
606 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
609 static void its_send_invall(struct its_node *its, struct its_collection *col)
611 struct its_cmd_desc desc;
613 desc.its_invall_cmd.col = col;
615 its_send_single_command(its, its_build_invall_cmd, &desc);
619 * irqchip functions - assumes MSI, mostly.
622 static inline u32 its_get_event_id(struct irq_data *d)
624 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
625 return d->hwirq - its_dev->event_map.lpi_base;
628 static void lpi_set_config(struct irq_data *d, bool enable)
630 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
631 irq_hw_number_t hwirq = d->hwirq;
632 u32 id = its_get_event_id(d);
633 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
636 *cfg |= LPI_PROP_ENABLED;
638 *cfg &= ~LPI_PROP_ENABLED;
641 * Make the above write visible to the redistributors.
642 * And yes, we're flushing exactly: One. Single. Byte.
645 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
646 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
649 its_send_inv(its_dev, id);
652 static void its_mask_irq(struct irq_data *d)
654 lpi_set_config(d, false);
657 static void its_unmask_irq(struct irq_data *d)
659 lpi_set_config(d, true);
662 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
666 const struct cpumask *cpu_mask = cpu_online_mask;
667 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
668 struct its_collection *target_col;
669 u32 id = its_get_event_id(d);
671 /* lpi cannot be routed to a redistributor that is on a foreign node */
672 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
673 if (its_dev->its->numa_node >= 0) {
674 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
675 if (!cpumask_intersects(mask_val, cpu_mask))
680 cpu = cpumask_any_and(mask_val, cpu_mask);
682 if (cpu >= nr_cpu_ids)
685 /* don't set the affinity when the target cpu is same as current one */
686 if (cpu != its_dev->event_map.col_map[id]) {
687 target_col = &its_dev->its->collections[cpu];
688 its_send_movi(its_dev, target_col, id);
689 its_dev->event_map.col_map[id] = cpu;
692 return IRQ_SET_MASK_OK_DONE;
695 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
697 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
698 struct its_node *its;
702 addr = its->phys_base + GITS_TRANSLATER;
704 msg->address_lo = lower_32_bits(addr);
705 msg->address_hi = upper_32_bits(addr);
706 msg->data = its_get_event_id(d);
708 iommu_dma_map_msi_msg(d->irq, msg);
711 static struct irq_chip its_irq_chip = {
713 .irq_mask = its_mask_irq,
714 .irq_unmask = its_unmask_irq,
715 .irq_eoi = irq_chip_eoi_parent,
716 .irq_set_affinity = its_set_affinity,
717 .irq_compose_msi_msg = its_irq_compose_msi_msg,
721 * How we allocate LPIs:
723 * The GIC has id_bits bits for interrupt identifiers. From there, we
724 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
725 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
728 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
730 #define IRQS_PER_CHUNK_SHIFT 5
731 #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
732 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
734 static unsigned long *lpi_bitmap;
735 static u32 lpi_chunks;
736 static DEFINE_SPINLOCK(lpi_lock);
738 static int its_lpi_to_chunk(int lpi)
740 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
743 static int its_chunk_to_lpi(int chunk)
745 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
748 static int __init its_lpi_init(u32 id_bits)
750 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
752 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
759 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
763 static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
765 unsigned long *bitmap = NULL;
770 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
772 spin_lock(&lpi_lock);
775 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
777 if (chunk_id < lpi_chunks)
781 } while (nr_chunks > 0);
786 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
791 for (i = 0; i < nr_chunks; i++)
792 set_bit(chunk_id + i, lpi_bitmap);
794 *base = its_chunk_to_lpi(chunk_id);
795 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
798 spin_unlock(&lpi_lock);
806 static void its_lpi_free(struct event_lpi_map *map)
808 int base = map->lpi_base;
809 int nr_ids = map->nr_lpis;
812 spin_lock(&lpi_lock);
814 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
815 int chunk = its_lpi_to_chunk(lpi);
816 BUG_ON(chunk > lpi_chunks);
817 if (test_bit(chunk, lpi_bitmap)) {
818 clear_bit(chunk, lpi_bitmap);
820 pr_err("Bad LPI chunk %d\n", chunk);
824 spin_unlock(&lpi_lock);
830 static int __init its_alloc_lpi_tables(void)
834 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
835 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
836 get_order(LPI_PROPBASE_SZ));
837 if (!gic_rdists->prop_page) {
838 pr_err("Failed to allocate PROPBASE\n");
842 paddr = page_to_phys(gic_rdists->prop_page);
843 pr_info("GIC: using LPI property table @%pa\n", &paddr);
845 /* Priority 0xa0, Group-1, disabled */
846 memset(page_address(gic_rdists->prop_page),
847 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
850 /* Make sure the GIC will observe the written configuration */
851 gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
853 return its_lpi_init(lpi_id_bits);
856 static const char *its_base_type_string[] = {
857 [GITS_BASER_TYPE_DEVICE] = "Devices",
858 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
859 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
860 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
861 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
862 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
863 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
866 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
868 u32 idx = baser - its->tables;
870 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
873 static void its_write_baser(struct its_node *its, struct its_baser *baser,
876 u32 idx = baser - its->tables;
878 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
879 baser->val = its_read_baser(its, baser);
882 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
883 u64 cache, u64 shr, u32 psz, u32 order,
886 u64 val = its_read_baser(its, baser);
887 u64 esz = GITS_BASER_ENTRY_SIZE(val);
888 u64 type = GITS_BASER_TYPE(val);
894 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
895 if (alloc_pages > GITS_BASER_PAGES_MAX) {
896 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
897 &its->phys_base, its_base_type_string[type],
898 alloc_pages, GITS_BASER_PAGES_MAX);
899 alloc_pages = GITS_BASER_PAGES_MAX;
900 order = get_order(GITS_BASER_PAGES_MAX * psz);
903 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
908 val = (virt_to_phys(base) |
909 (type << GITS_BASER_TYPE_SHIFT) |
910 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
911 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
916 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
920 val |= GITS_BASER_PAGE_SIZE_4K;
923 val |= GITS_BASER_PAGE_SIZE_16K;
926 val |= GITS_BASER_PAGE_SIZE_64K;
930 its_write_baser(its, baser, val);
933 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
935 * Shareability didn't stick. Just use
936 * whatever the read reported, which is likely
937 * to be the only thing this redistributor
938 * supports. If that's zero, make it
939 * non-cacheable as well.
941 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
943 cache = GITS_BASER_nC;
944 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
949 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
951 * Page size didn't stick. Let's try a smaller
952 * size and retry. If we reach 4K, then
953 * something is horribly wrong...
955 free_pages((unsigned long)base, order);
961 goto retry_alloc_baser;
964 goto retry_alloc_baser;
969 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
970 &its->phys_base, its_base_type_string[type],
972 free_pages((unsigned long)base, order);
976 baser->order = order;
979 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
981 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
982 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
983 its_base_type_string[type],
984 (unsigned long)virt_to_phys(base),
985 indirect ? "indirect" : "flat", (int)esz,
986 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
991 static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
994 u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
995 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
996 u32 ids = its->device_ids;
997 u32 new_order = *order;
998 bool indirect = false;
1000 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1001 if ((esz << ids) > (psz * 2)) {
1003 * Find out whether hw supports a single or two-level table by
1004 * table by reading bit at offset '62' after writing '1' to it.
1006 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1007 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1011 * The size of the lvl2 table is equal to ITS page size
1012 * which is 'psz'. For computing lvl1 table size,
1013 * subtract ID bits that sparse lvl2 table from 'ids'
1014 * which is reported by ITS hardware times lvl1 table
1017 ids -= ilog2(psz / (int)esz);
1018 esz = GITS_LVL1_ENTRY_SIZE;
1023 * Allocate as many entries as required to fit the
1024 * range of device IDs that the ITS can grok... The ID
1025 * space being incredibly sparse, this results in a
1026 * massive waste of memory if two-level device table
1027 * feature is not supported by hardware.
1029 new_order = max_t(u32, get_order(esz << ids), new_order);
1030 if (new_order >= MAX_ORDER) {
1031 new_order = MAX_ORDER - 1;
1032 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1033 pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
1034 &its->phys_base, its->device_ids, ids);
1042 static void its_free_tables(struct its_node *its)
1046 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1047 if (its->tables[i].base) {
1048 free_pages((unsigned long)its->tables[i].base,
1049 its->tables[i].order);
1050 its->tables[i].base = NULL;
1055 static int its_alloc_tables(struct its_node *its)
1057 u64 typer = gic_read_typer(its->base + GITS_TYPER);
1058 u32 ids = GITS_TYPER_DEVBITS(typer);
1059 u64 shr = GITS_BASER_InnerShareable;
1060 u64 cache = GITS_BASER_RaWaWb;
1064 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
1066 * erratum 22375: only alloc 8MB table size
1067 * erratum 24313: ignore memory access type
1069 cache = GITS_BASER_nCnB;
1070 ids = 0x14; /* 20 bits, 8MB */
1073 its->device_ids = ids;
1075 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1076 struct its_baser *baser = its->tables + i;
1077 u64 val = its_read_baser(its, baser);
1078 u64 type = GITS_BASER_TYPE(val);
1079 u32 order = get_order(psz);
1080 bool indirect = false;
1082 if (type == GITS_BASER_TYPE_NONE)
1085 if (type == GITS_BASER_TYPE_DEVICE)
1086 indirect = its_parse_baser_device(its, baser, psz, &order);
1088 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1090 its_free_tables(its);
1094 /* Update settings which will be used for next BASERn */
1096 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1097 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1103 static int its_alloc_collections(struct its_node *its)
1105 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
1107 if (!its->collections)
1113 static void its_cpu_init_lpis(void)
1115 void __iomem *rbase = gic_data_rdist_rd_base();
1116 struct page *pend_page;
1119 /* If we didn't allocate the pending table yet, do it now */
1120 pend_page = gic_data_rdist()->pend_page;
1124 * The pending pages have to be at least 64kB aligned,
1125 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1127 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
1128 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1130 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1131 smp_processor_id());
1135 /* Make sure the GIC will observe the zero-ed page */
1136 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1138 paddr = page_to_phys(pend_page);
1139 pr_info("CPU%d: using LPI pending table @%pa\n",
1140 smp_processor_id(), &paddr);
1141 gic_data_rdist()->pend_page = pend_page;
1145 val = readl_relaxed(rbase + GICR_CTLR);
1146 val &= ~GICR_CTLR_ENABLE_LPIS;
1147 writel_relaxed(val, rbase + GICR_CTLR);
1150 * Make sure any change to the table is observable by the GIC.
1155 val = (page_to_phys(gic_rdists->prop_page) |
1156 GICR_PROPBASER_InnerShareable |
1157 GICR_PROPBASER_RaWaWb |
1158 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1160 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1161 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
1163 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1164 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1166 * The HW reports non-shareable, we must
1167 * remove the cacheability attributes as
1170 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1171 GICR_PROPBASER_CACHEABILITY_MASK);
1172 val |= GICR_PROPBASER_nC;
1173 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1175 pr_info_once("GIC: using cache flushing for LPI property table\n");
1176 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1180 val = (page_to_phys(pend_page) |
1181 GICR_PENDBASER_InnerShareable |
1182 GICR_PENDBASER_RaWaWb);
1184 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1185 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
1187 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1189 * The HW reports non-shareable, we must remove the
1190 * cacheability attributes as well.
1192 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1193 GICR_PENDBASER_CACHEABILITY_MASK);
1194 val |= GICR_PENDBASER_nC;
1195 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1199 val = readl_relaxed(rbase + GICR_CTLR);
1200 val |= GICR_CTLR_ENABLE_LPIS;
1201 writel_relaxed(val, rbase + GICR_CTLR);
1203 /* Make sure the GIC has seen the above */
1207 static void its_cpu_init_collection(void)
1209 struct its_node *its;
1212 spin_lock(&its_lock);
1213 cpu = smp_processor_id();
1215 list_for_each_entry(its, &its_nodes, entry) {
1218 /* avoid cross node collections and its mapping */
1219 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1220 struct device_node *cpu_node;
1222 cpu_node = of_get_cpu_node(cpu, NULL);
1223 if (its->numa_node != NUMA_NO_NODE &&
1224 its->numa_node != of_node_to_nid(cpu_node))
1229 * We now have to bind each collection to its target
1232 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1234 * This ITS wants the physical address of the
1237 target = gic_data_rdist()->phys_base;
1240 * This ITS wants a linear CPU number.
1242 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
1243 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1246 /* Perform collection mapping */
1247 its->collections[cpu].target_address = target;
1248 its->collections[cpu].col_id = cpu;
1250 its_send_mapc(its, &its->collections[cpu], 1);
1251 its_send_invall(its, &its->collections[cpu]);
1254 spin_unlock(&its_lock);
1257 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1259 struct its_device *its_dev = NULL, *tmp;
1260 unsigned long flags;
1262 raw_spin_lock_irqsave(&its->lock, flags);
1264 list_for_each_entry(tmp, &its->its_device_list, entry) {
1265 if (tmp->device_id == dev_id) {
1271 raw_spin_unlock_irqrestore(&its->lock, flags);
1276 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1280 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1281 if (GITS_BASER_TYPE(its->tables[i].val) == type)
1282 return &its->tables[i];
1288 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1290 struct its_baser *baser;
1295 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1297 /* Don't allow device id that exceeds ITS hardware limit */
1299 return (ilog2(dev_id) < its->device_ids);
1301 /* Don't allow device id that exceeds single, flat table limit */
1302 esz = GITS_BASER_ENTRY_SIZE(baser->val);
1303 if (!(baser->val & GITS_BASER_INDIRECT))
1304 return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
1306 /* Compute 1st level table index & check if that exceeds table limit */
1307 idx = dev_id >> ilog2(baser->psz / esz);
1308 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1311 table = baser->base;
1313 /* Allocate memory for 2nd level table */
1315 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
1319 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
1320 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1321 gic_flush_dcache_to_poc(page_address(page), baser->psz);
1323 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
1325 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
1326 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1327 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
1329 /* Ensure updated table contents are visible to ITS hardware */
1336 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1339 struct its_device *dev;
1340 unsigned long *lpi_map;
1341 unsigned long flags;
1342 u16 *col_map = NULL;
1349 if (!its_alloc_device_table(its, dev_id))
1352 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1354 * At least one bit of EventID is being used, hence a minimum
1355 * of two entries. No, the architecture doesn't let you
1356 * express an ITT with a single entry.
1358 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1359 sz = nr_ites * its->ite_size;
1360 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1361 itt = kzalloc(sz, GFP_KERNEL);
1362 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1364 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
1366 if (!dev || !itt || !lpi_map || !col_map) {
1374 gic_flush_dcache_to_poc(itt, sz);
1378 dev->nr_ites = nr_ites;
1379 dev->event_map.lpi_map = lpi_map;
1380 dev->event_map.col_map = col_map;
1381 dev->event_map.lpi_base = lpi_base;
1382 dev->event_map.nr_lpis = nr_lpis;
1383 dev->device_id = dev_id;
1384 INIT_LIST_HEAD(&dev->entry);
1386 raw_spin_lock_irqsave(&its->lock, flags);
1387 list_add(&dev->entry, &its->its_device_list);
1388 raw_spin_unlock_irqrestore(&its->lock, flags);
1390 /* Map device to its ITT */
1391 its_send_mapd(dev, 1);
1396 static void its_free_device(struct its_device *its_dev)
1398 unsigned long flags;
1400 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1401 list_del(&its_dev->entry);
1402 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1403 kfree(its_dev->itt);
1407 static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1411 idx = find_first_zero_bit(dev->event_map.lpi_map,
1412 dev->event_map.nr_lpis);
1413 if (idx == dev->event_map.nr_lpis)
1416 *hwirq = dev->event_map.lpi_base + idx;
1417 set_bit(idx, dev->event_map.lpi_map);
1422 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1423 int nvec, msi_alloc_info_t *info)
1425 struct its_node *its;
1426 struct its_device *its_dev;
1427 struct msi_domain_info *msi_info;
1431 * We ignore "dev" entierely, and rely on the dev_id that has
1432 * been passed via the scratchpad. This limits this domain's
1433 * usefulness to upper layers that definitely know that they
1434 * are built on top of the ITS.
1436 dev_id = info->scratchpad[0].ul;
1438 msi_info = msi_get_domain_info(domain);
1439 its = msi_info->data;
1441 its_dev = its_find_device(its, dev_id);
1444 * We already have seen this ID, probably through
1445 * another alias (PCI bridge of some sort). No need to
1446 * create the device.
1448 pr_debug("Reusing ITT for devID %x\n", dev_id);
1452 its_dev = its_create_device(its, dev_id, nvec);
1456 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
1458 info->scratchpad[0].ptr = its_dev;
1462 static struct msi_domain_ops its_msi_domain_ops = {
1463 .msi_prepare = its_msi_prepare,
1466 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1468 irq_hw_number_t hwirq)
1470 struct irq_fwspec fwspec;
1472 if (irq_domain_get_of_node(domain->parent)) {
1473 fwspec.fwnode = domain->parent->fwnode;
1474 fwspec.param_count = 3;
1475 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
1476 fwspec.param[1] = hwirq;
1477 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
1478 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
1479 fwspec.fwnode = domain->parent->fwnode;
1480 fwspec.param_count = 2;
1481 fwspec.param[0] = hwirq;
1482 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
1487 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
1490 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1491 unsigned int nr_irqs, void *args)
1493 msi_alloc_info_t *info = args;
1494 struct its_device *its_dev = info->scratchpad[0].ptr;
1495 irq_hw_number_t hwirq;
1499 for (i = 0; i < nr_irqs; i++) {
1500 err = its_alloc_device_irq(its_dev, &hwirq);
1504 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1508 irq_domain_set_hwirq_and_chip(domain, virq + i,
1509 hwirq, &its_irq_chip, its_dev);
1510 pr_debug("ID:%d pID:%d vID:%d\n",
1511 (int)(hwirq - its_dev->event_map.lpi_base),
1512 (int) hwirq, virq + i);
1518 static void its_irq_domain_activate(struct irq_domain *domain,
1521 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1522 u32 event = its_get_event_id(d);
1523 const struct cpumask *cpu_mask = cpu_online_mask;
1525 /* get the cpu_mask of local node */
1526 if (its_dev->its->numa_node >= 0)
1527 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1529 /* Bind the LPI to the first possible CPU */
1530 its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
1532 /* Map the GIC IRQ and event to the device */
1533 its_send_mapti(its_dev, d->hwirq, event);
1536 static void its_irq_domain_deactivate(struct irq_domain *domain,
1539 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1540 u32 event = its_get_event_id(d);
1542 /* Stop the delivery of interrupts */
1543 its_send_discard(its_dev, event);
1546 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1547 unsigned int nr_irqs)
1549 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1550 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1553 for (i = 0; i < nr_irqs; i++) {
1554 struct irq_data *data = irq_domain_get_irq_data(domain,
1556 u32 event = its_get_event_id(data);
1558 /* Mark interrupt index as unused */
1559 clear_bit(event, its_dev->event_map.lpi_map);
1561 /* Nuke the entry in the domain */
1562 irq_domain_reset_irq_data(data);
1565 /* If all interrupts have been freed, start mopping the floor */
1566 if (bitmap_empty(its_dev->event_map.lpi_map,
1567 its_dev->event_map.nr_lpis)) {
1568 its_lpi_free(&its_dev->event_map);
1570 /* Unmap device/itt */
1571 its_send_mapd(its_dev, 0);
1572 its_free_device(its_dev);
1575 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1578 static const struct irq_domain_ops its_domain_ops = {
1579 .alloc = its_irq_domain_alloc,
1580 .free = its_irq_domain_free,
1581 .activate = its_irq_domain_activate,
1582 .deactivate = its_irq_domain_deactivate,
1585 static int its_force_quiescent(void __iomem *base)
1587 u32 count = 1000000; /* 1s */
1590 val = readl_relaxed(base + GITS_CTLR);
1592 * GIC architecture specification requires the ITS to be both
1593 * disabled and quiescent for writes to GITS_BASER<n> or
1594 * GITS_CBASER to not have UNPREDICTABLE results.
1596 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
1599 /* Disable the generation of all interrupts to this ITS */
1600 val &= ~GITS_CTLR_ENABLE;
1601 writel_relaxed(val, base + GITS_CTLR);
1603 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1605 val = readl_relaxed(base + GITS_CTLR);
1606 if (val & GITS_CTLR_QUIESCENT)
1618 static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
1620 struct its_node *its = data;
1622 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
1625 static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
1627 struct its_node *its = data;
1629 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
1632 static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
1634 struct its_node *its = data;
1636 /* On QDF2400, the size of the ITE is 16Bytes */
1640 static const struct gic_quirk its_quirks[] = {
1641 #ifdef CONFIG_CAVIUM_ERRATUM_22375
1643 .desc = "ITS: Cavium errata 22375, 24313",
1644 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1646 .init = its_enable_quirk_cavium_22375,
1649 #ifdef CONFIG_CAVIUM_ERRATUM_23144
1651 .desc = "ITS: Cavium erratum 23144",
1652 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1654 .init = its_enable_quirk_cavium_23144,
1657 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
1659 .desc = "ITS: QDF2400 erratum 0065",
1660 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
1662 .init = its_enable_quirk_qdf2400_e0065,
1669 static void its_enable_quirks(struct its_node *its)
1671 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
1673 gic_enable_quirks(iidr, its_quirks, its);
1676 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
1678 struct irq_domain *inner_domain;
1679 struct msi_domain_info *info;
1681 info = kzalloc(sizeof(*info), GFP_KERNEL);
1685 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
1686 if (!inner_domain) {
1691 inner_domain->parent = its_parent;
1692 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
1693 inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
1694 info->ops = &its_msi_domain_ops;
1696 inner_domain->host_data = info;
1701 static int __init its_compute_its_list_map(struct resource *res,
1702 void __iomem *its_base)
1708 * This is assumed to be done early enough that we're
1709 * guaranteed to be single-threaded, hence no
1710 * locking. Should this change, we should address
1713 its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
1714 if (its_number >= ITS_LIST_MAX) {
1715 pr_err("ITS@%pa: No ITSList entry available!\n",
1720 ctlr = readl_relaxed(its_base + GITS_CTLR);
1721 ctlr &= ~GITS_CTLR_ITS_NUMBER;
1722 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
1723 writel_relaxed(ctlr, its_base + GITS_CTLR);
1724 ctlr = readl_relaxed(its_base + GITS_CTLR);
1725 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
1726 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
1727 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
1730 if (test_and_set_bit(its_number, &its_list_map)) {
1731 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
1732 &res->start, its_number);
1739 static int __init its_probe_one(struct resource *res,
1740 struct fwnode_handle *handle, int numa_node)
1742 struct its_node *its;
1743 void __iomem *its_base;
1745 u64 baser, tmp, typer;
1748 its_base = ioremap(res->start, resource_size(res));
1750 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
1754 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1755 if (val != 0x30 && val != 0x40) {
1756 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
1761 err = its_force_quiescent(its_base);
1763 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
1767 pr_info("ITS %pR\n", res);
1769 its = kzalloc(sizeof(*its), GFP_KERNEL);
1775 raw_spin_lock_init(&its->lock);
1776 INIT_LIST_HEAD(&its->entry);
1777 INIT_LIST_HEAD(&its->its_device_list);
1778 typer = gic_read_typer(its_base + GITS_TYPER);
1779 its->base = its_base;
1780 its->phys_base = res->start;
1781 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
1782 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
1784 if (!(typer & GITS_TYPER_VMOVP)) {
1785 err = its_compute_its_list_map(res, its_base);
1789 pr_info("ITS@%pa: Using ITS number %d\n",
1792 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
1796 its->numa_node = numa_node;
1798 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1799 get_order(ITS_CMD_QUEUE_SZ));
1800 if (!its->cmd_base) {
1804 its->cmd_write = its->cmd_base;
1806 its_enable_quirks(its);
1808 err = its_alloc_tables(its);
1812 err = its_alloc_collections(its);
1814 goto out_free_tables;
1816 baser = (virt_to_phys(its->cmd_base) |
1817 GITS_CBASER_RaWaWb |
1818 GITS_CBASER_InnerShareable |
1819 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
1822 gits_write_cbaser(baser, its->base + GITS_CBASER);
1823 tmp = gits_read_cbaser(its->base + GITS_CBASER);
1825 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
1826 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
1828 * The HW reports non-shareable, we must
1829 * remove the cacheability attributes as
1832 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
1833 GITS_CBASER_CACHEABILITY_MASK);
1834 baser |= GITS_CBASER_nC;
1835 gits_write_cbaser(baser, its->base + GITS_CBASER);
1837 pr_info("ITS: using cache flushing for cmd queue\n");
1838 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1841 gits_write_cwriter(0, its->base + GITS_CWRITER);
1842 ctlr = readl_relaxed(its->base + GITS_CTLR);
1843 writel_relaxed(ctlr | GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1845 err = its_init_domain(handle, its);
1847 goto out_free_tables;
1849 spin_lock(&its_lock);
1850 list_add(&its->entry, &its_nodes);
1851 spin_unlock(&its_lock);
1856 its_free_tables(its);
1858 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
1863 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
1867 static bool gic_rdists_supports_plpis(void)
1869 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
1872 int its_cpu_init(void)
1874 if (!list_empty(&its_nodes)) {
1875 if (!gic_rdists_supports_plpis()) {
1876 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1879 its_cpu_init_lpis();
1880 its_cpu_init_collection();
1886 static const struct of_device_id its_device_id[] = {
1887 { .compatible = "arm,gic-v3-its", },
1891 static int __init its_of_probe(struct device_node *node)
1893 struct device_node *np;
1894 struct resource res;
1896 for (np = of_find_matching_node(node, its_device_id); np;
1897 np = of_find_matching_node(np, its_device_id)) {
1898 if (!of_property_read_bool(np, "msi-controller")) {
1899 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
1904 if (of_address_to_resource(np, 0, &res)) {
1905 pr_warn("%pOF: no regs?\n", np);
1909 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
1916 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
1918 #if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531)
1919 struct its_srat_map {
1926 static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata;
1927 static int its_in_srat __initdata;
1929 static int __init acpi_get_its_numa_node(u32 its_id)
1933 for (i = 0; i < its_in_srat; i++) {
1934 if (its_id == its_srat_maps[i].its_id)
1935 return its_srat_maps[i].numa_node;
1937 return NUMA_NO_NODE;
1940 static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
1941 const unsigned long end)
1944 struct acpi_srat_gic_its_affinity *its_affinity;
1946 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
1950 if (its_affinity->header.length < sizeof(*its_affinity)) {
1951 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
1952 its_affinity->header.length);
1956 if (its_in_srat >= MAX_NUMNODES) {
1957 pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
1962 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
1964 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
1965 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
1969 its_srat_maps[its_in_srat].numa_node = node;
1970 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
1972 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
1973 its_affinity->proximity_domain, its_affinity->its_id, node);
1978 static void __init acpi_table_parse_srat_its(void)
1980 acpi_table_parse_entries(ACPI_SIG_SRAT,
1981 sizeof(struct acpi_table_srat),
1982 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
1983 gic_acpi_parse_srat_its, 0);
1986 static void __init acpi_table_parse_srat_its(void) { }
1987 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
1990 static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
1991 const unsigned long end)
1993 struct acpi_madt_generic_translator *its_entry;
1994 struct fwnode_handle *dom_handle;
1995 struct resource res;
1998 its_entry = (struct acpi_madt_generic_translator *)header;
1999 memset(&res, 0, sizeof(res));
2000 res.start = its_entry->base_address;
2001 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
2002 res.flags = IORESOURCE_MEM;
2004 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
2006 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
2011 err = iort_register_domain_token(its_entry->translation_id, dom_handle);
2013 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
2014 &res.start, its_entry->translation_id);
2018 err = its_probe_one(&res, dom_handle,
2019 acpi_get_its_numa_node(its_entry->translation_id));
2023 iort_deregister_domain_token(its_entry->translation_id);
2025 irq_domain_free_fwnode(dom_handle);
2029 static void __init its_acpi_probe(void)
2031 acpi_table_parse_srat_its();
2032 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
2033 gic_acpi_parse_madt_its, 0);
2036 static void __init its_acpi_probe(void) { }
2039 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
2040 struct irq_domain *parent_domain)
2042 struct device_node *of_node;
2044 its_parent = parent_domain;
2045 of_node = to_of_node(handle);
2047 its_of_probe(of_node);
2051 if (list_empty(&its_nodes)) {
2052 pr_warn("ITS: No ITS available, not enabling LPIs\n");
2056 gic_rdists = rdists;
2057 return its_alloc_lpi_tables();