1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/efi.h>
15 #include <linux/interrupt.h>
16 #include <linux/iommu.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqdomain.h>
19 #include <linux/list.h>
20 #include <linux/log2.h>
21 #include <linux/memblock.h>
23 #include <linux/msi.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_pci.h>
28 #include <linux/of_platform.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/syscore_ops.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/arm-gic-v4.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
40 #include "irq-gic-common.h"
42 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
45 #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
47 #define RD_LOCAL_LPI_ENABLED BIT(0)
48 #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
49 #define RD_LOCAL_MEMRESERVE_DONE BIT(2)
51 static u32 lpi_id_bits;
54 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
55 * deal with (one configuration byte per interrupt). PENDBASE has to
56 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
58 #define LPI_NRBITS lpi_id_bits
59 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
60 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
65 * Collection structure - just an ID, and a redistributor address to
66 * ping. We use one per CPU as a bag of interrupts assigned to this
69 struct its_collection {
75 * The ITS_BASER structure - contains memory information, cached
76 * value of BASER register configuration and ITS page size.
88 * The ITS structure - contains most of the infrastructure, with the
89 * top-level MSI domain, the command queue, the collections, and the
90 * list of devices writing to it.
92 * dev_alloc_lock has to be taken for device allocations, while the
93 * spinlock must be taken to parse data structures such as the device
98 struct mutex dev_alloc_lock;
99 struct list_head entry;
101 void __iomem *sgir_base;
102 phys_addr_t phys_base;
103 struct its_cmd_block *cmd_base;
104 struct its_cmd_block *cmd_write;
105 struct its_baser tables[GITS_BASER_NR_REGS];
106 struct its_collection *collections;
107 struct fwnode_handle *fwnode_handle;
108 u64 (*get_msi_base)(struct its_device *its_dev);
113 struct list_head its_device_list;
115 unsigned long list_nr;
117 unsigned int msi_domain_flags;
118 u32 pre_its_base; /* for Socionext Synquacer */
119 int vlpi_redist_offset;
122 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
123 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
124 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
126 #define ITS_ITT_ALIGN SZ_256
128 /* The maximum number of VPEID bits supported by VLPI commands */
129 #define ITS_MAX_VPEID_BITS \
132 if (gic_rdists->has_rvpeid && \
133 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
134 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
139 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
141 /* Convert page order to size in bytes */
142 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
144 struct event_lpi_map {
145 unsigned long *lpi_map;
147 irq_hw_number_t lpi_base;
149 raw_spinlock_t vlpi_lock;
151 struct its_vlpi_map *vlpi_maps;
156 * The ITS view of a device - belongs to an ITS, owns an interrupt
157 * translation table, and a list of interrupts. If it some of its
158 * LPIs are injected into a guest (GICv4), the event_map.vm field
159 * indicates which one.
162 struct list_head entry;
163 struct its_node *its;
164 struct event_lpi_map event_map;
173 struct its_device *dev;
174 struct its_vpe **vpes;
178 struct cpu_lpi_count {
183 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
185 static LIST_HEAD(its_nodes);
186 static DEFINE_RAW_SPINLOCK(its_lock);
187 static struct rdists *gic_rdists;
188 static struct irq_domain *its_parent;
190 static unsigned long its_list_map;
191 static u16 vmovp_seq_num;
192 static DEFINE_RAW_SPINLOCK(vmovp_lock);
194 static DEFINE_IDA(its_vpeid_ida);
196 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
197 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
198 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
199 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
202 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
203 * always have vSGIs mapped.
205 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
207 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
210 static bool rdists_support_shareable(void)
212 return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
215 static u16 get_its_list(struct its_vm *vm)
217 struct its_node *its;
218 unsigned long its_list = 0;
220 list_for_each_entry(its, &its_nodes, entry) {
224 if (require_its_list_vmovp(vm, its))
225 __set_bit(its->list_nr, &its_list);
228 return (u16)its_list;
231 static inline u32 its_get_event_id(struct irq_data *d)
233 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
234 return d->hwirq - its_dev->event_map.lpi_base;
237 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
240 struct its_node *its = its_dev->its;
242 return its->collections + its_dev->event_map.col_map[event];
245 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
248 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
251 return &its_dev->event_map.vlpi_maps[event];
254 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
256 if (irqd_is_forwarded_to_vcpu(d)) {
257 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
258 u32 event = its_get_event_id(d);
260 return dev_event_to_vlpi_map(its_dev, event);
266 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
268 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
272 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
274 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
277 static struct irq_chip its_vpe_irq_chip;
279 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
281 struct its_vpe *vpe = NULL;
284 if (d->chip == &its_vpe_irq_chip) {
285 vpe = irq_data_get_irq_chip_data(d);
287 struct its_vlpi_map *map = get_vlpi_map(d);
293 cpu = vpe_to_cpuid_lock(vpe, flags);
295 /* Physical LPIs are already locked via the irq_desc lock */
296 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
297 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
298 /* Keep GCC quiet... */
305 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
307 struct its_vpe *vpe = NULL;
309 if (d->chip == &its_vpe_irq_chip) {
310 vpe = irq_data_get_irq_chip_data(d);
312 struct its_vlpi_map *map = get_vlpi_map(d);
318 vpe_to_cpuid_unlock(vpe, flags);
321 static struct its_collection *valid_col(struct its_collection *col)
323 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
329 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
331 if (valid_col(its->collections + vpe->col_idx))
338 * ITS command descriptors - parameters to be encoded in a command
341 struct its_cmd_desc {
344 struct its_device *dev;
349 struct its_device *dev;
354 struct its_device *dev;
359 struct its_device *dev;
364 struct its_collection *col;
369 struct its_device *dev;
375 struct its_device *dev;
376 struct its_collection *col;
381 struct its_device *dev;
386 struct its_collection *col;
395 struct its_collection *col;
401 struct its_device *dev;
409 struct its_device *dev;
416 struct its_collection *col;
437 * The ITS command block, which is what the ITS actually parses.
439 struct its_cmd_block {
442 __le64 raw_cmd_le[4];
446 #define ITS_CMD_QUEUE_SZ SZ_64K
447 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
449 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
450 struct its_cmd_block *,
451 struct its_cmd_desc *);
453 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
454 struct its_cmd_block *,
455 struct its_cmd_desc *);
457 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
459 u64 mask = GENMASK_ULL(h, l);
461 *raw_cmd |= (val << l) & mask;
464 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
466 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
469 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
471 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
474 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
476 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
479 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
481 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
484 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
486 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
489 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
491 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
494 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
496 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
499 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
501 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
504 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
506 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
509 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
511 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
514 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
516 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
519 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
521 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
524 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
526 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
529 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
531 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
534 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
536 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
539 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
541 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
544 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
546 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
549 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
551 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
554 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
556 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
559 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
561 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
564 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
567 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
570 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
573 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
576 static void its_encode_db(struct its_cmd_block *cmd, bool db)
578 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
581 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
583 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
586 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
588 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
591 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
593 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
596 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
598 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
601 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
603 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
606 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
608 /* Let's fixup BE commands */
609 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
610 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
611 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
612 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
615 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
616 struct its_cmd_block *cmd,
617 struct its_cmd_desc *desc)
619 unsigned long itt_addr;
620 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
622 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
623 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
625 its_encode_cmd(cmd, GITS_CMD_MAPD);
626 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
627 its_encode_size(cmd, size - 1);
628 its_encode_itt(cmd, itt_addr);
629 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
636 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
637 struct its_cmd_block *cmd,
638 struct its_cmd_desc *desc)
640 its_encode_cmd(cmd, GITS_CMD_MAPC);
641 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
642 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
643 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
647 return desc->its_mapc_cmd.col;
650 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
651 struct its_cmd_block *cmd,
652 struct its_cmd_desc *desc)
654 struct its_collection *col;
656 col = dev_event_to_col(desc->its_mapti_cmd.dev,
657 desc->its_mapti_cmd.event_id);
659 its_encode_cmd(cmd, GITS_CMD_MAPTI);
660 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
661 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
662 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
663 its_encode_collection(cmd, col->col_id);
667 return valid_col(col);
670 static struct its_collection *its_build_movi_cmd(struct its_node *its,
671 struct its_cmd_block *cmd,
672 struct its_cmd_desc *desc)
674 struct its_collection *col;
676 col = dev_event_to_col(desc->its_movi_cmd.dev,
677 desc->its_movi_cmd.event_id);
679 its_encode_cmd(cmd, GITS_CMD_MOVI);
680 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
681 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
682 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
686 return valid_col(col);
689 static struct its_collection *its_build_discard_cmd(struct its_node *its,
690 struct its_cmd_block *cmd,
691 struct its_cmd_desc *desc)
693 struct its_collection *col;
695 col = dev_event_to_col(desc->its_discard_cmd.dev,
696 desc->its_discard_cmd.event_id);
698 its_encode_cmd(cmd, GITS_CMD_DISCARD);
699 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
700 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
704 return valid_col(col);
707 static struct its_collection *its_build_inv_cmd(struct its_node *its,
708 struct its_cmd_block *cmd,
709 struct its_cmd_desc *desc)
711 struct its_collection *col;
713 col = dev_event_to_col(desc->its_inv_cmd.dev,
714 desc->its_inv_cmd.event_id);
716 its_encode_cmd(cmd, GITS_CMD_INV);
717 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
718 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
722 return valid_col(col);
725 static struct its_collection *its_build_int_cmd(struct its_node *its,
726 struct its_cmd_block *cmd,
727 struct its_cmd_desc *desc)
729 struct its_collection *col;
731 col = dev_event_to_col(desc->its_int_cmd.dev,
732 desc->its_int_cmd.event_id);
734 its_encode_cmd(cmd, GITS_CMD_INT);
735 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
736 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
740 return valid_col(col);
743 static struct its_collection *its_build_clear_cmd(struct its_node *its,
744 struct its_cmd_block *cmd,
745 struct its_cmd_desc *desc)
747 struct its_collection *col;
749 col = dev_event_to_col(desc->its_clear_cmd.dev,
750 desc->its_clear_cmd.event_id);
752 its_encode_cmd(cmd, GITS_CMD_CLEAR);
753 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
754 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
758 return valid_col(col);
761 static struct its_collection *its_build_invall_cmd(struct its_node *its,
762 struct its_cmd_block *cmd,
763 struct its_cmd_desc *desc)
765 its_encode_cmd(cmd, GITS_CMD_INVALL);
766 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
770 return desc->its_invall_cmd.col;
773 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
774 struct its_cmd_block *cmd,
775 struct its_cmd_desc *desc)
777 its_encode_cmd(cmd, GITS_CMD_VINVALL);
778 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
782 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
785 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
786 struct its_cmd_block *cmd,
787 struct its_cmd_desc *desc)
789 struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
790 unsigned long vpt_addr, vconf_addr;
794 its_encode_cmd(cmd, GITS_CMD_VMAPP);
795 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
796 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
798 if (!desc->its_vmapp_cmd.valid) {
800 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
801 its_encode_alloc(cmd, alloc);
803 * Unmapping a VPE is self-synchronizing on GICv4.1,
804 * no need to issue a VSYNC.
812 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
813 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
815 its_encode_target(cmd, target);
816 its_encode_vpt_addr(cmd, vpt_addr);
817 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
822 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
824 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
826 its_encode_alloc(cmd, alloc);
829 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
830 * to be unmapped first, and in this case, we may remap the vPE
831 * back while the VPT is not empty. So we can't assume that the
832 * VPT is empty on map. This is why we never advertise PTZ.
834 its_encode_ptz(cmd, false);
835 its_encode_vconf_addr(cmd, vconf_addr);
836 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
844 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
845 struct its_cmd_block *cmd,
846 struct its_cmd_desc *desc)
850 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
851 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
855 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
856 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
857 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
858 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
859 its_encode_db_phys_id(cmd, db);
860 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
864 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
867 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
868 struct its_cmd_block *cmd,
869 struct its_cmd_desc *desc)
873 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
874 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
878 its_encode_cmd(cmd, GITS_CMD_VMOVI);
879 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
880 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
881 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
882 its_encode_db_phys_id(cmd, db);
883 its_encode_db_valid(cmd, true);
887 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
890 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
891 struct its_cmd_block *cmd,
892 struct its_cmd_desc *desc)
896 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
897 its_encode_cmd(cmd, GITS_CMD_VMOVP);
898 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
899 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
900 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
901 its_encode_target(cmd, target);
904 its_encode_db(cmd, true);
905 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
910 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
913 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
914 struct its_cmd_block *cmd,
915 struct its_cmd_desc *desc)
917 struct its_vlpi_map *map;
919 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
920 desc->its_inv_cmd.event_id);
922 its_encode_cmd(cmd, GITS_CMD_INV);
923 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
924 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
928 return valid_vpe(its, map->vpe);
931 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
932 struct its_cmd_block *cmd,
933 struct its_cmd_desc *desc)
935 struct its_vlpi_map *map;
937 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
938 desc->its_int_cmd.event_id);
940 its_encode_cmd(cmd, GITS_CMD_INT);
941 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
942 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
946 return valid_vpe(its, map->vpe);
949 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
950 struct its_cmd_block *cmd,
951 struct its_cmd_desc *desc)
953 struct its_vlpi_map *map;
955 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
956 desc->its_clear_cmd.event_id);
958 its_encode_cmd(cmd, GITS_CMD_CLEAR);
959 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
960 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
964 return valid_vpe(its, map->vpe);
967 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
968 struct its_cmd_block *cmd,
969 struct its_cmd_desc *desc)
971 if (WARN_ON(!is_v4_1(its)))
974 its_encode_cmd(cmd, GITS_CMD_INVDB);
975 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
979 return valid_vpe(its, desc->its_invdb_cmd.vpe);
982 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
983 struct its_cmd_block *cmd,
984 struct its_cmd_desc *desc)
986 if (WARN_ON(!is_v4_1(its)))
989 its_encode_cmd(cmd, GITS_CMD_VSGI);
990 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
991 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
992 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
993 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
994 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
995 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
999 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1002 static u64 its_cmd_ptr_to_offset(struct its_node *its,
1003 struct its_cmd_block *ptr)
1005 return (ptr - its->cmd_base) * sizeof(*ptr);
1008 static int its_queue_full(struct its_node *its)
1013 widx = its->cmd_write - its->cmd_base;
1014 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1016 /* This is incredibly unlikely to happen, unless the ITS locks up. */
1017 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
1023 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1025 struct its_cmd_block *cmd;
1026 u32 count = 1000000; /* 1s! */
1028 while (its_queue_full(its)) {
1031 pr_err_ratelimited("ITS queue not draining\n");
1038 cmd = its->cmd_write++;
1040 /* Handle queue wrapping */
1041 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1042 its->cmd_write = its->cmd_base;
1045 cmd->raw_cmd[0] = 0;
1046 cmd->raw_cmd[1] = 0;
1047 cmd->raw_cmd[2] = 0;
1048 cmd->raw_cmd[3] = 0;
1053 static struct its_cmd_block *its_post_commands(struct its_node *its)
1055 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1057 writel_relaxed(wr, its->base + GITS_CWRITER);
1059 return its->cmd_write;
1062 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1065 * Make sure the commands written to memory are observable by
1068 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1069 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1074 static int its_wait_for_range_completion(struct its_node *its,
1076 struct its_cmd_block *to)
1078 u64 rd_idx, to_idx, linear_idx;
1079 u32 count = 1000000; /* 1s! */
1081 /* Linearize to_idx if the command set has wrapped around */
1082 to_idx = its_cmd_ptr_to_offset(its, to);
1083 if (to_idx < prev_idx)
1084 to_idx += ITS_CMD_QUEUE_SZ;
1086 linear_idx = prev_idx;
1091 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1094 * Compute the read pointer progress, taking the
1095 * potential wrap-around into account.
1097 delta = rd_idx - prev_idx;
1098 if (rd_idx < prev_idx)
1099 delta += ITS_CMD_QUEUE_SZ;
1101 linear_idx += delta;
1102 if (linear_idx >= to_idx)
1107 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1108 to_idx, linear_idx);
1119 /* Warning, macro hell follows */
1120 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1121 void name(struct its_node *its, \
1122 buildtype builder, \
1123 struct its_cmd_desc *desc) \
1125 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1126 synctype *sync_obj; \
1127 unsigned long flags; \
1130 raw_spin_lock_irqsave(&its->lock, flags); \
1132 cmd = its_allocate_entry(its); \
1133 if (!cmd) { /* We're soooooo screewed... */ \
1134 raw_spin_unlock_irqrestore(&its->lock, flags); \
1137 sync_obj = builder(its, cmd, desc); \
1138 its_flush_cmd(its, cmd); \
1141 sync_cmd = its_allocate_entry(its); \
1145 buildfn(its, sync_cmd, sync_obj); \
1146 its_flush_cmd(its, sync_cmd); \
1150 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1151 next_cmd = its_post_commands(its); \
1152 raw_spin_unlock_irqrestore(&its->lock, flags); \
1154 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1155 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1158 static void its_build_sync_cmd(struct its_node *its,
1159 struct its_cmd_block *sync_cmd,
1160 struct its_collection *sync_col)
1162 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1163 its_encode_target(sync_cmd, sync_col->target_address);
1165 its_fixup_cmd(sync_cmd);
1168 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1169 struct its_collection, its_build_sync_cmd)
1171 static void its_build_vsync_cmd(struct its_node *its,
1172 struct its_cmd_block *sync_cmd,
1173 struct its_vpe *sync_vpe)
1175 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1176 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1178 its_fixup_cmd(sync_cmd);
1181 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1182 struct its_vpe, its_build_vsync_cmd)
1184 static void its_send_int(struct its_device *dev, u32 event_id)
1186 struct its_cmd_desc desc;
1188 desc.its_int_cmd.dev = dev;
1189 desc.its_int_cmd.event_id = event_id;
1191 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1194 static void its_send_clear(struct its_device *dev, u32 event_id)
1196 struct its_cmd_desc desc;
1198 desc.its_clear_cmd.dev = dev;
1199 desc.its_clear_cmd.event_id = event_id;
1201 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1204 static void its_send_inv(struct its_device *dev, u32 event_id)
1206 struct its_cmd_desc desc;
1208 desc.its_inv_cmd.dev = dev;
1209 desc.its_inv_cmd.event_id = event_id;
1211 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1214 static void its_send_mapd(struct its_device *dev, int valid)
1216 struct its_cmd_desc desc;
1218 desc.its_mapd_cmd.dev = dev;
1219 desc.its_mapd_cmd.valid = !!valid;
1221 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1224 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1227 struct its_cmd_desc desc;
1229 desc.its_mapc_cmd.col = col;
1230 desc.its_mapc_cmd.valid = !!valid;
1232 its_send_single_command(its, its_build_mapc_cmd, &desc);
1235 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1237 struct its_cmd_desc desc;
1239 desc.its_mapti_cmd.dev = dev;
1240 desc.its_mapti_cmd.phys_id = irq_id;
1241 desc.its_mapti_cmd.event_id = id;
1243 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1246 static void its_send_movi(struct its_device *dev,
1247 struct its_collection *col, u32 id)
1249 struct its_cmd_desc desc;
1251 desc.its_movi_cmd.dev = dev;
1252 desc.its_movi_cmd.col = col;
1253 desc.its_movi_cmd.event_id = id;
1255 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1258 static void its_send_discard(struct its_device *dev, u32 id)
1260 struct its_cmd_desc desc;
1262 desc.its_discard_cmd.dev = dev;
1263 desc.its_discard_cmd.event_id = id;
1265 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1268 static void its_send_invall(struct its_node *its, struct its_collection *col)
1270 struct its_cmd_desc desc;
1272 desc.its_invall_cmd.col = col;
1274 its_send_single_command(its, its_build_invall_cmd, &desc);
1277 static void its_send_vmapti(struct its_device *dev, u32 id)
1279 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1280 struct its_cmd_desc desc;
1282 desc.its_vmapti_cmd.vpe = map->vpe;
1283 desc.its_vmapti_cmd.dev = dev;
1284 desc.its_vmapti_cmd.virt_id = map->vintid;
1285 desc.its_vmapti_cmd.event_id = id;
1286 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1288 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1291 static void its_send_vmovi(struct its_device *dev, u32 id)
1293 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1294 struct its_cmd_desc desc;
1296 desc.its_vmovi_cmd.vpe = map->vpe;
1297 desc.its_vmovi_cmd.dev = dev;
1298 desc.its_vmovi_cmd.event_id = id;
1299 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1301 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1304 static void its_send_vmapp(struct its_node *its,
1305 struct its_vpe *vpe, bool valid)
1307 struct its_cmd_desc desc;
1309 desc.its_vmapp_cmd.vpe = vpe;
1310 desc.its_vmapp_cmd.valid = valid;
1311 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1313 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1316 static void its_send_vmovp(struct its_vpe *vpe)
1318 struct its_cmd_desc desc = {};
1319 struct its_node *its;
1320 unsigned long flags;
1321 int col_id = vpe->col_idx;
1323 desc.its_vmovp_cmd.vpe = vpe;
1325 if (!its_list_map) {
1326 its = list_first_entry(&its_nodes, struct its_node, entry);
1327 desc.its_vmovp_cmd.col = &its->collections[col_id];
1328 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1333 * Yet another marvel of the architecture. If using the
1334 * its_list "feature", we need to make sure that all ITSs
1335 * receive all VMOVP commands in the same order. The only way
1336 * to guarantee this is to make vmovp a serialization point.
1340 raw_spin_lock_irqsave(&vmovp_lock, flags);
1342 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1343 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1346 list_for_each_entry(its, &its_nodes, entry) {
1350 if (!require_its_list_vmovp(vpe->its_vm, its))
1353 desc.its_vmovp_cmd.col = &its->collections[col_id];
1354 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1357 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1360 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1362 struct its_cmd_desc desc;
1364 desc.its_vinvall_cmd.vpe = vpe;
1365 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1368 static void its_send_vinv(struct its_device *dev, u32 event_id)
1370 struct its_cmd_desc desc;
1373 * There is no real VINV command. This is just a normal INV,
1374 * with a VSYNC instead of a SYNC.
1376 desc.its_inv_cmd.dev = dev;
1377 desc.its_inv_cmd.event_id = event_id;
1379 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1382 static void its_send_vint(struct its_device *dev, u32 event_id)
1384 struct its_cmd_desc desc;
1387 * There is no real VINT command. This is just a normal INT,
1388 * with a VSYNC instead of a SYNC.
1390 desc.its_int_cmd.dev = dev;
1391 desc.its_int_cmd.event_id = event_id;
1393 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1396 static void its_send_vclear(struct its_device *dev, u32 event_id)
1398 struct its_cmd_desc desc;
1401 * There is no real VCLEAR command. This is just a normal CLEAR,
1402 * with a VSYNC instead of a SYNC.
1404 desc.its_clear_cmd.dev = dev;
1405 desc.its_clear_cmd.event_id = event_id;
1407 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1410 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1412 struct its_cmd_desc desc;
1414 desc.its_invdb_cmd.vpe = vpe;
1415 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1419 * irqchip functions - assumes MSI, mostly.
1421 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1423 struct its_vlpi_map *map = get_vlpi_map(d);
1424 irq_hw_number_t hwirq;
1429 va = page_address(map->vm->vprop_page);
1430 hwirq = map->vintid;
1432 /* Remember the updated property */
1433 map->properties &= ~clr;
1434 map->properties |= set | LPI_PROP_GROUP1;
1436 va = gic_rdists->prop_table_va;
1440 cfg = va + hwirq - 8192;
1442 *cfg |= set | LPI_PROP_GROUP1;
1445 * Make the above write visible to the redistributors.
1446 * And yes, we're flushing exactly: One. Single. Byte.
1449 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1450 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1455 static void wait_for_syncr(void __iomem *rdbase)
1457 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1461 static void __direct_lpi_inv(struct irq_data *d, u64 val)
1463 void __iomem *rdbase;
1464 unsigned long flags;
1467 /* Target the redistributor this LPI is currently routed to */
1468 cpu = irq_to_cpuid_lock(d, &flags);
1469 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1471 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1472 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1473 wait_for_syncr(rdbase);
1475 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1476 irq_to_cpuid_unlock(d, flags);
1479 static void direct_lpi_inv(struct irq_data *d)
1481 struct its_vlpi_map *map = get_vlpi_map(d);
1485 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1487 WARN_ON(!is_v4_1(its_dev->its));
1489 val = GICR_INVLPIR_V;
1490 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1491 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1496 __direct_lpi_inv(d, val);
1499 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1501 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1503 lpi_write_config(d, clr, set);
1504 if (gic_rdists->has_direct_lpi &&
1505 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1507 else if (!irqd_is_forwarded_to_vcpu(d))
1508 its_send_inv(its_dev, its_get_event_id(d));
1510 its_send_vinv(its_dev, its_get_event_id(d));
1513 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1515 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1516 u32 event = its_get_event_id(d);
1517 struct its_vlpi_map *map;
1520 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1523 if (is_v4_1(its_dev->its))
1526 map = dev_event_to_vlpi_map(its_dev, event);
1528 if (map->db_enabled == enable)
1531 map->db_enabled = enable;
1534 * More fun with the architecture:
1536 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1537 * value or to 1023, depending on the enable bit. But that
1538 * would be issuing a mapping for an /existing/ DevID+EventID
1539 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1540 * to the /same/ vPE, using this opportunity to adjust the
1541 * doorbell. Mouahahahaha. We loves it, Precious.
1543 its_send_vmovi(its_dev, event);
1546 static void its_mask_irq(struct irq_data *d)
1548 if (irqd_is_forwarded_to_vcpu(d))
1549 its_vlpi_set_doorbell(d, false);
1551 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1554 static void its_unmask_irq(struct irq_data *d)
1556 if (irqd_is_forwarded_to_vcpu(d))
1557 its_vlpi_set_doorbell(d, true);
1559 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1562 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1564 if (irqd_affinity_is_managed(d))
1565 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1567 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1570 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1572 if (irqd_affinity_is_managed(d))
1573 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1575 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1578 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1580 if (irqd_affinity_is_managed(d))
1581 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1583 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1586 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1587 const struct cpumask *cpu_mask)
1589 unsigned int cpu = nr_cpu_ids, tmp;
1590 int count = S32_MAX;
1592 for_each_cpu(tmp, cpu_mask) {
1593 int this_count = its_read_lpi_count(d, tmp);
1594 if (this_count < count) {
1604 * As suggested by Thomas Gleixner in:
1605 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1607 static int its_select_cpu(struct irq_data *d,
1608 const struct cpumask *aff_mask)
1610 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1611 static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1612 static struct cpumask __tmpmask;
1613 struct cpumask *tmpmask;
1614 unsigned long flags;
1616 node = its_dev->its->numa_node;
1617 tmpmask = &__tmpmask;
1619 raw_spin_lock_irqsave(&tmpmask_lock, flags);
1621 if (!irqd_affinity_is_managed(d)) {
1622 /* First try the NUMA node */
1623 if (node != NUMA_NO_NODE) {
1625 * Try the intersection of the affinity mask and the
1626 * node mask (and the online mask, just to be safe).
1628 cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1629 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1632 * Ideally, we would check if the mask is empty, and
1633 * try again on the full node here.
1635 * But it turns out that the way ACPI describes the
1636 * affinity for ITSs only deals about memory, and
1637 * not target CPUs, so it cannot describe a single
1638 * ITS placed next to two NUMA nodes.
1640 * Instead, just fallback on the online mask. This
1641 * diverges from Thomas' suggestion above.
1643 cpu = cpumask_pick_least_loaded(d, tmpmask);
1644 if (cpu < nr_cpu_ids)
1647 /* If we can't cross sockets, give up */
1648 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1651 /* If the above failed, expand the search */
1654 /* Try the intersection of the affinity and online masks */
1655 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1657 /* If that doesn't fly, the online mask is the last resort */
1658 if (cpumask_empty(tmpmask))
1659 cpumask_copy(tmpmask, cpu_online_mask);
1661 cpu = cpumask_pick_least_loaded(d, tmpmask);
1663 cpumask_copy(tmpmask, aff_mask);
1665 /* If we cannot cross sockets, limit the search to that node */
1666 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1667 node != NUMA_NO_NODE)
1668 cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1670 cpu = cpumask_pick_least_loaded(d, tmpmask);
1673 raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1675 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1679 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1682 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1683 struct its_collection *target_col;
1684 u32 id = its_get_event_id(d);
1687 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1688 if (irqd_is_forwarded_to_vcpu(d))
1691 prev_cpu = its_dev->event_map.col_map[id];
1692 its_dec_lpi_count(d, prev_cpu);
1695 cpu = its_select_cpu(d, mask_val);
1697 cpu = cpumask_pick_least_loaded(d, mask_val);
1699 if (cpu < 0 || cpu >= nr_cpu_ids)
1702 /* don't set the affinity when the target cpu is same as current one */
1703 if (cpu != prev_cpu) {
1704 target_col = &its_dev->its->collections[cpu];
1705 its_send_movi(its_dev, target_col, id);
1706 its_dev->event_map.col_map[id] = cpu;
1707 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1710 its_inc_lpi_count(d, cpu);
1712 return IRQ_SET_MASK_OK_DONE;
1715 its_inc_lpi_count(d, prev_cpu);
1719 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1721 struct its_node *its = its_dev->its;
1723 return its->phys_base + GITS_TRANSLATER;
1726 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1728 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1729 struct its_node *its;
1733 addr = its->get_msi_base(its_dev);
1735 msg->address_lo = lower_32_bits(addr);
1736 msg->address_hi = upper_32_bits(addr);
1737 msg->data = its_get_event_id(d);
1739 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1742 static int its_irq_set_irqchip_state(struct irq_data *d,
1743 enum irqchip_irq_state which,
1746 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1747 u32 event = its_get_event_id(d);
1749 if (which != IRQCHIP_STATE_PENDING)
1752 if (irqd_is_forwarded_to_vcpu(d)) {
1754 its_send_vint(its_dev, event);
1756 its_send_vclear(its_dev, event);
1759 its_send_int(its_dev, event);
1761 its_send_clear(its_dev, event);
1767 static int its_irq_retrigger(struct irq_data *d)
1769 return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1773 * Two favourable cases:
1775 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1778 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1779 * and we're better off mapping all VPEs always
1781 * If neither (a) nor (b) is true, then we map vPEs on demand.
1784 static bool gic_requires_eager_mapping(void)
1786 if (!its_list_map || gic_rdists->has_rvpeid)
1792 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1794 unsigned long flags;
1796 if (gic_requires_eager_mapping())
1799 raw_spin_lock_irqsave(&vmovp_lock, flags);
1802 * If the VM wasn't mapped yet, iterate over the vpes and get
1805 vm->vlpi_count[its->list_nr]++;
1807 if (vm->vlpi_count[its->list_nr] == 1) {
1810 for (i = 0; i < vm->nr_vpes; i++) {
1811 struct its_vpe *vpe = vm->vpes[i];
1812 struct irq_data *d = irq_get_irq_data(vpe->irq);
1814 /* Map the VPE to the first possible CPU */
1815 vpe->col_idx = cpumask_first(cpu_online_mask);
1816 its_send_vmapp(its, vpe, true);
1817 its_send_vinvall(its, vpe);
1818 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1822 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1825 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1827 unsigned long flags;
1829 /* Not using the ITS list? Everything is always mapped. */
1830 if (gic_requires_eager_mapping())
1833 raw_spin_lock_irqsave(&vmovp_lock, flags);
1835 if (!--vm->vlpi_count[its->list_nr]) {
1838 for (i = 0; i < vm->nr_vpes; i++)
1839 its_send_vmapp(its, vm->vpes[i], false);
1842 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1845 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1847 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1848 u32 event = its_get_event_id(d);
1854 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1856 if (!its_dev->event_map.vm) {
1857 struct its_vlpi_map *maps;
1859 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1866 its_dev->event_map.vm = info->map->vm;
1867 its_dev->event_map.vlpi_maps = maps;
1868 } else if (its_dev->event_map.vm != info->map->vm) {
1873 /* Get our private copy of the mapping information */
1874 its_dev->event_map.vlpi_maps[event] = *info->map;
1876 if (irqd_is_forwarded_to_vcpu(d)) {
1877 /* Already mapped, move it around */
1878 its_send_vmovi(its_dev, event);
1880 /* Ensure all the VPEs are mapped on this ITS */
1881 its_map_vm(its_dev->its, info->map->vm);
1884 * Flag the interrupt as forwarded so that we can
1885 * start poking the virtual property table.
1887 irqd_set_forwarded_to_vcpu(d);
1889 /* Write out the property to the prop table */
1890 lpi_write_config(d, 0xff, info->map->properties);
1892 /* Drop the physical mapping */
1893 its_send_discard(its_dev, event);
1895 /* and install the virtual one */
1896 its_send_vmapti(its_dev, event);
1898 /* Increment the number of VLPIs */
1899 its_dev->event_map.nr_vlpis++;
1903 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1907 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1909 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1910 struct its_vlpi_map *map;
1913 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1915 map = get_vlpi_map(d);
1917 if (!its_dev->event_map.vm || !map) {
1922 /* Copy our mapping information to the incoming request */
1926 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1930 static int its_vlpi_unmap(struct irq_data *d)
1932 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1933 u32 event = its_get_event_id(d);
1936 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1938 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1943 /* Drop the virtual mapping */
1944 its_send_discard(its_dev, event);
1946 /* and restore the physical one */
1947 irqd_clr_forwarded_to_vcpu(d);
1948 its_send_mapti(its_dev, d->hwirq, event);
1949 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1953 /* Potentially unmap the VM from this ITS */
1954 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1957 * Drop the refcount and make the device available again if
1958 * this was the last VLPI.
1960 if (!--its_dev->event_map.nr_vlpis) {
1961 its_dev->event_map.vm = NULL;
1962 kfree(its_dev->event_map.vlpi_maps);
1966 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1970 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1972 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1974 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1977 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1978 lpi_update_config(d, 0xff, info->config);
1980 lpi_write_config(d, 0xff, info->config);
1981 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1986 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1988 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1989 struct its_cmd_info *info = vcpu_info;
1992 if (!is_v4(its_dev->its))
1995 /* Unmap request? */
1997 return its_vlpi_unmap(d);
1999 switch (info->cmd_type) {
2001 return its_vlpi_map(d, info);
2004 return its_vlpi_get(d, info);
2006 case PROP_UPDATE_VLPI:
2007 case PROP_UPDATE_AND_INV_VLPI:
2008 return its_vlpi_prop_update(d, info);
2015 static struct irq_chip its_irq_chip = {
2017 .irq_mask = its_mask_irq,
2018 .irq_unmask = its_unmask_irq,
2019 .irq_eoi = irq_chip_eoi_parent,
2020 .irq_set_affinity = its_set_affinity,
2021 .irq_compose_msi_msg = its_irq_compose_msi_msg,
2022 .irq_set_irqchip_state = its_irq_set_irqchip_state,
2023 .irq_retrigger = its_irq_retrigger,
2024 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
2029 * How we allocate LPIs:
2031 * lpi_range_list contains ranges of LPIs that are to available to
2032 * allocate from. To allocate LPIs, just pick the first range that
2033 * fits the required allocation, and reduce it by the required
2034 * amount. Once empty, remove the range from the list.
2036 * To free a range of LPIs, add a free range to the list, sort it and
2037 * merge the result if the new range happens to be adjacent to an
2038 * already free block.
2040 * The consequence of the above is that allocation is cost is low, but
2041 * freeing is expensive. We assumes that freeing rarely occurs.
2043 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2045 static DEFINE_MUTEX(lpi_range_lock);
2046 static LIST_HEAD(lpi_range_list);
2049 struct list_head entry;
2054 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2056 struct lpi_range *range;
2058 range = kmalloc(sizeof(*range), GFP_KERNEL);
2060 range->base_id = base;
2067 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2069 struct lpi_range *range, *tmp;
2072 mutex_lock(&lpi_range_lock);
2074 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2075 if (range->span >= nr_lpis) {
2076 *base = range->base_id;
2077 range->base_id += nr_lpis;
2078 range->span -= nr_lpis;
2080 if (range->span == 0) {
2081 list_del(&range->entry);
2090 mutex_unlock(&lpi_range_lock);
2092 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2096 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2098 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2100 if (a->base_id + a->span != b->base_id)
2102 b->base_id = a->base_id;
2104 list_del(&a->entry);
2108 static int free_lpi_range(u32 base, u32 nr_lpis)
2110 struct lpi_range *new, *old;
2112 new = mk_lpi_range(base, nr_lpis);
2116 mutex_lock(&lpi_range_lock);
2118 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2119 if (old->base_id < base)
2123 * old is the last element with ->base_id smaller than base,
2124 * so new goes right after it. If there are no elements with
2125 * ->base_id smaller than base, &old->entry ends up pointing
2126 * at the head of the list, and inserting new it the start of
2127 * the list is the right thing to do in that case as well.
2129 list_add(&new->entry, &old->entry);
2131 * Now check if we can merge with the preceding and/or
2134 merge_lpi_ranges(old, new);
2135 merge_lpi_ranges(new, list_next_entry(new, entry));
2137 mutex_unlock(&lpi_range_lock);
2141 static int __init its_lpi_init(u32 id_bits)
2143 u32 lpis = (1UL << id_bits) - 8192;
2147 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2149 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2151 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2156 * Initializing the allocator is just the same as freeing the
2157 * full range of LPIs.
2159 err = free_lpi_range(8192, lpis);
2160 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2164 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2166 unsigned long *bitmap = NULL;
2170 err = alloc_lpi_range(nr_irqs, base);
2175 } while (nr_irqs > 0);
2183 bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2191 *base = *nr_ids = 0;
2196 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2198 WARN_ON(free_lpi_range(base, nr_ids));
2199 bitmap_free(bitmap);
2202 static void gic_reset_prop_table(void *va)
2204 /* Priority 0xa0, Group-1, disabled */
2205 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2207 /* Make sure the GIC will observe the written configuration */
2208 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2211 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2213 struct page *prop_page;
2215 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2219 gic_reset_prop_table(page_address(prop_page));
2224 static void its_free_prop_table(struct page *prop_page)
2226 free_pages((unsigned long)page_address(prop_page),
2227 get_order(LPI_PROPBASE_SZ));
2230 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2232 phys_addr_t start, end, addr_end;
2236 * We don't bother checking for a kdump kernel as by
2237 * construction, the LPI tables are out of this kernel's
2240 if (is_kdump_kernel())
2243 addr_end = addr + size - 1;
2245 for_each_reserved_mem_range(i, &start, &end) {
2246 if (addr >= start && addr_end <= end)
2250 /* Not found, not a good sign... */
2251 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2253 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2257 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2259 if (efi_enabled(EFI_CONFIG_TABLES))
2260 return efi_mem_reserve_persistent(addr, size);
2265 static int __init its_setup_lpi_prop_table(void)
2267 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2270 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2271 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2273 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2274 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2277 gic_reset_prop_table(gic_rdists->prop_table_va);
2281 lpi_id_bits = min_t(u32,
2282 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2283 ITS_MAX_LPI_NRBITS);
2284 page = its_allocate_prop_table(GFP_NOWAIT);
2286 pr_err("Failed to allocate PROPBASE\n");
2290 gic_rdists->prop_table_pa = page_to_phys(page);
2291 gic_rdists->prop_table_va = page_address(page);
2292 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2296 pr_info("GICv3: using LPI property table @%pa\n",
2297 &gic_rdists->prop_table_pa);
2299 return its_lpi_init(lpi_id_bits);
2302 static const char *its_base_type_string[] = {
2303 [GITS_BASER_TYPE_DEVICE] = "Devices",
2304 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2305 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2306 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2307 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2308 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2309 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2312 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2314 u32 idx = baser - its->tables;
2316 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2319 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2322 u32 idx = baser - its->tables;
2324 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2325 baser->val = its_read_baser(its, baser);
2328 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2329 u64 cache, u64 shr, u32 order, bool indirect)
2331 u64 val = its_read_baser(its, baser);
2332 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2333 u64 type = GITS_BASER_TYPE(val);
2334 u64 baser_phys, tmp;
2335 u32 alloc_pages, psz;
2340 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2341 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2342 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2343 &its->phys_base, its_base_type_string[type],
2344 alloc_pages, GITS_BASER_PAGES_MAX);
2345 alloc_pages = GITS_BASER_PAGES_MAX;
2346 order = get_order(GITS_BASER_PAGES_MAX * psz);
2349 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2353 base = (void *)page_address(page);
2354 baser_phys = virt_to_phys(base);
2356 /* Check if the physical address of the memory is above 48bits */
2357 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2359 /* 52bit PA is supported only when PageSize=64K */
2360 if (psz != SZ_64K) {
2361 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2362 free_pages((unsigned long)base, order);
2366 /* Convert 52bit PA to 48bit field */
2367 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2372 (type << GITS_BASER_TYPE_SHIFT) |
2373 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2374 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2379 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2383 val |= GITS_BASER_PAGE_SIZE_4K;
2386 val |= GITS_BASER_PAGE_SIZE_16K;
2389 val |= GITS_BASER_PAGE_SIZE_64K;
2394 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2396 its_write_baser(its, baser, val);
2399 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2401 * Shareability didn't stick. Just use
2402 * whatever the read reported, which is likely
2403 * to be the only thing this redistributor
2404 * supports. If that's zero, make it
2405 * non-cacheable as well.
2407 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2409 cache = GITS_BASER_nC;
2415 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2416 &its->phys_base, its_base_type_string[type],
2418 free_pages((unsigned long)base, order);
2422 baser->order = order;
2425 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2427 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2428 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2429 its_base_type_string[type],
2430 (unsigned long)virt_to_phys(base),
2431 indirect ? "indirect" : "flat", (int)esz,
2432 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2437 static bool its_parse_indirect_baser(struct its_node *its,
2438 struct its_baser *baser,
2439 u32 *order, u32 ids)
2441 u64 tmp = its_read_baser(its, baser);
2442 u64 type = GITS_BASER_TYPE(tmp);
2443 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2444 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2445 u32 new_order = *order;
2446 u32 psz = baser->psz;
2447 bool indirect = false;
2449 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2450 if ((esz << ids) > (psz * 2)) {
2452 * Find out whether hw supports a single or two-level table by
2453 * table by reading bit at offset '62' after writing '1' to it.
2455 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2456 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2460 * The size of the lvl2 table is equal to ITS page size
2461 * which is 'psz'. For computing lvl1 table size,
2462 * subtract ID bits that sparse lvl2 table from 'ids'
2463 * which is reported by ITS hardware times lvl1 table
2466 ids -= ilog2(psz / (int)esz);
2467 esz = GITS_LVL1_ENTRY_SIZE;
2472 * Allocate as many entries as required to fit the
2473 * range of device IDs that the ITS can grok... The ID
2474 * space being incredibly sparse, this results in a
2475 * massive waste of memory if two-level device table
2476 * feature is not supported by hardware.
2478 new_order = max_t(u32, get_order(esz << ids), new_order);
2479 if (new_order > MAX_PAGE_ORDER) {
2480 new_order = MAX_PAGE_ORDER;
2481 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2482 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2483 &its->phys_base, its_base_type_string[type],
2484 device_ids(its), ids);
2492 static u32 compute_common_aff(u64 val)
2496 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2497 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2499 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2502 static u32 compute_its_aff(struct its_node *its)
2508 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2509 * the resulting affinity. We then use that to see if this match
2512 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2513 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2514 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2515 return compute_common_aff(val);
2518 static struct its_node *find_sibling_its(struct its_node *cur_its)
2520 struct its_node *its;
2523 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2526 aff = compute_its_aff(cur_its);
2528 list_for_each_entry(its, &its_nodes, entry) {
2531 if (!is_v4_1(its) || its == cur_its)
2534 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2537 if (aff != compute_its_aff(its))
2540 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2541 baser = its->tables[2].val;
2542 if (!(baser & GITS_BASER_VALID))
2551 static void its_free_tables(struct its_node *its)
2555 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2556 if (its->tables[i].base) {
2557 free_pages((unsigned long)its->tables[i].base,
2558 its->tables[i].order);
2559 its->tables[i].base = NULL;
2564 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2571 val = its_read_baser(its, baser);
2572 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2576 gpsz = GITS_BASER_PAGE_SIZE_64K;
2579 gpsz = GITS_BASER_PAGE_SIZE_16K;
2583 gpsz = GITS_BASER_PAGE_SIZE_4K;
2587 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2589 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2590 its_write_baser(its, baser, val);
2592 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2612 static int its_alloc_tables(struct its_node *its)
2614 u64 shr = GITS_BASER_InnerShareable;
2615 u64 cache = GITS_BASER_RaWaWb;
2618 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2619 /* erratum 24313: ignore memory access type */
2620 cache = GITS_BASER_nCnB;
2622 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2623 cache = GITS_BASER_nC;
2627 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2628 struct its_baser *baser = its->tables + i;
2629 u64 val = its_read_baser(its, baser);
2630 u64 type = GITS_BASER_TYPE(val);
2631 bool indirect = false;
2634 if (type == GITS_BASER_TYPE_NONE)
2637 if (its_probe_baser_psz(its, baser)) {
2638 its_free_tables(its);
2642 order = get_order(baser->psz);
2645 case GITS_BASER_TYPE_DEVICE:
2646 indirect = its_parse_indirect_baser(its, baser, &order,
2650 case GITS_BASER_TYPE_VCPU:
2652 struct its_node *sibling;
2655 if ((sibling = find_sibling_its(its))) {
2656 *baser = sibling->tables[2];
2657 its_write_baser(its, baser, baser->val);
2662 indirect = its_parse_indirect_baser(its, baser, &order,
2663 ITS_MAX_VPEID_BITS);
2667 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2669 its_free_tables(its);
2673 /* Update settings which will be used for next BASERn */
2674 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2675 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2681 static u64 inherit_vpe_l1_table_from_its(void)
2683 struct its_node *its;
2687 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2688 aff = compute_common_aff(val);
2690 list_for_each_entry(its, &its_nodes, entry) {
2696 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2699 if (aff != compute_its_aff(its))
2702 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2703 baser = its->tables[2].val;
2704 if (!(baser & GITS_BASER_VALID))
2707 /* We have a winner! */
2708 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2710 val = GICR_VPROPBASER_4_1_VALID;
2711 if (baser & GITS_BASER_INDIRECT)
2712 val |= GICR_VPROPBASER_4_1_INDIRECT;
2713 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2714 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2715 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2716 case GIC_PAGE_SIZE_64K:
2717 addr = GITS_BASER_ADDR_48_to_52(baser);
2720 addr = baser & GENMASK_ULL(47, 12);
2723 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2724 if (rdists_support_shareable()) {
2725 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2726 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2727 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2728 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2730 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2738 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2744 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2745 aff = compute_common_aff(val);
2747 for_each_possible_cpu(cpu) {
2748 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2750 if (!base || cpu == smp_processor_id())
2753 val = gic_read_typer(base + GICR_TYPER);
2754 if (aff != compute_common_aff(val))
2758 * At this point, we have a victim. This particular CPU
2759 * has already booted, and has an affinity that matches
2760 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2761 * Make sure we don't write the Z bit in that case.
2763 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2764 val &= ~GICR_VPROPBASER_4_1_Z;
2766 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2767 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2775 static bool allocate_vpe_l2_table(int cpu, u32 id)
2777 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2778 unsigned int psz, esz, idx, npg, gpsz;
2783 if (!gic_rdists->has_rvpeid)
2786 /* Skip non-present CPUs */
2790 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2792 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2793 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2794 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2800 case GIC_PAGE_SIZE_4K:
2803 case GIC_PAGE_SIZE_16K:
2806 case GIC_PAGE_SIZE_64K:
2811 /* Don't allow vpe_id that exceeds single, flat table limit */
2812 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2813 return (id < (npg * psz / (esz * SZ_8)));
2815 /* Compute 1st level table index & check if that exceeds table limit */
2816 idx = id >> ilog2(psz / (esz * SZ_8));
2817 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2820 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2822 /* Allocate memory for 2nd level table */
2824 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2828 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2829 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2830 gic_flush_dcache_to_poc(page_address(page), psz);
2832 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2834 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2835 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2836 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2838 /* Ensure updated table contents are visible to RD hardware */
2845 static int allocate_vpe_l1_table(void)
2847 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2848 u64 val, gpsz, npg, pa;
2849 unsigned int psz = SZ_64K;
2850 unsigned int np, epp, esz;
2853 if (!gic_rdists->has_rvpeid)
2857 * if VPENDBASER.Valid is set, disable any previously programmed
2858 * VPE by setting PendingLast while clearing Valid. This has the
2859 * effect of making sure no doorbell will be generated and we can
2860 * then safely clear VPROPBASER.Valid.
2862 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2863 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2864 vlpi_base + GICR_VPENDBASER);
2867 * If we can inherit the configuration from another RD, let's do
2868 * so. Otherwise, we have to go through the allocation process. We
2869 * assume that all RDs have the exact same requirements, as
2870 * nothing will work otherwise.
2872 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2873 if (val & GICR_VPROPBASER_4_1_VALID)
2876 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2877 if (!gic_data_rdist()->vpe_table_mask)
2880 val = inherit_vpe_l1_table_from_its();
2881 if (val & GICR_VPROPBASER_4_1_VALID)
2884 /* First probe the page size */
2885 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2886 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2887 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2888 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2889 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2893 gpsz = GIC_PAGE_SIZE_4K;
2895 case GIC_PAGE_SIZE_4K:
2898 case GIC_PAGE_SIZE_16K:
2901 case GIC_PAGE_SIZE_64K:
2907 * Start populating the register from scratch, including RO fields
2908 * (which we want to print in debug cases...)
2911 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2912 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2914 /* How many entries per GIC page? */
2916 epp = psz / (esz * SZ_8);
2919 * If we need more than just a single L1 page, flag the table
2920 * as indirect and compute the number of required L1 pages.
2922 if (epp < ITS_MAX_VPEID) {
2925 val |= GICR_VPROPBASER_4_1_INDIRECT;
2927 /* Number of L2 pages required to cover the VPEID space */
2928 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2930 /* Number of L1 pages to point to the L2 pages */
2931 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2936 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2938 /* Right, that's the number of CPU pages we need for L1 */
2939 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2941 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2942 np, npg, psz, epp, esz);
2943 page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2947 gic_data_rdist()->vpe_l1_base = page_address(page);
2948 pa = virt_to_phys(page_address(page));
2949 WARN_ON(!IS_ALIGNED(pa, psz));
2951 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2952 if (rdists_support_shareable()) {
2953 val |= GICR_VPROPBASER_RaWb;
2954 val |= GICR_VPROPBASER_InnerShareable;
2956 val |= GICR_VPROPBASER_4_1_Z;
2957 val |= GICR_VPROPBASER_4_1_VALID;
2960 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2961 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2963 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2964 smp_processor_id(), val,
2965 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2970 static int its_alloc_collections(struct its_node *its)
2974 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2976 if (!its->collections)
2979 for (i = 0; i < nr_cpu_ids; i++)
2980 its->collections[i].target_address = ~0ULL;
2985 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2987 struct page *pend_page;
2989 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2990 get_order(LPI_PENDBASE_SZ));
2994 /* Make sure the GIC will observe the zero-ed page */
2995 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
3000 static void its_free_pending_table(struct page *pt)
3002 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
3006 * Booting with kdump and LPIs enabled is generally fine. Any other
3007 * case is wrong in the absence of firmware/EFI support.
3009 static bool enabled_lpis_allowed(void)
3014 /* Check whether the property table is in a reserved region */
3015 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
3016 addr = val & GENMASK_ULL(51, 12);
3018 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
3021 static int __init allocate_lpi_tables(void)
3027 * If LPIs are enabled while we run this from the boot CPU,
3028 * flag the RD tables as pre-allocated if the stars do align.
3030 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
3031 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
3032 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
3033 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
3034 pr_info("GICv3: Using preallocated redistributor tables\n");
3037 err = its_setup_lpi_prop_table();
3042 * We allocate all the pending tables anyway, as we may have a
3043 * mix of RDs that have had LPIs enabled, and some that
3044 * don't. We'll free the unused ones as each CPU comes online.
3046 for_each_possible_cpu(cpu) {
3047 struct page *pend_page;
3049 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3051 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3055 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3061 static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3063 u32 count = 1000000; /* 1s! */
3068 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3069 clean = !(val & GICR_VPENDBASER_Dirty);
3075 } while (!clean && count);
3077 if (unlikely(!clean))
3078 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3083 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3087 /* Make sure we wait until the RD is done with the initial scan */
3088 val = read_vpend_dirty_clear(vlpi_base);
3089 val &= ~GICR_VPENDBASER_Valid;
3092 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3094 val = read_vpend_dirty_clear(vlpi_base);
3095 if (unlikely(val & GICR_VPENDBASER_Dirty))
3096 val |= GICR_VPENDBASER_PendingLast;
3101 static void its_cpu_init_lpis(void)
3103 void __iomem *rbase = gic_data_rdist_rd_base();
3104 struct page *pend_page;
3108 if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3111 val = readl_relaxed(rbase + GICR_CTLR);
3112 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3113 (val & GICR_CTLR_ENABLE_LPIS)) {
3115 * Check that we get the same property table on all
3116 * RDs. If we don't, this is hopeless.
3118 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3119 paddr &= GENMASK_ULL(51, 12);
3120 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3121 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3123 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3124 paddr &= GENMASK_ULL(51, 16);
3126 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3127 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3132 pend_page = gic_data_rdist()->pend_page;
3133 paddr = page_to_phys(pend_page);
3136 val = (gic_rdists->prop_table_pa |
3137 GICR_PROPBASER_InnerShareable |
3138 GICR_PROPBASER_RaWaWb |
3139 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3141 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3142 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3144 if (!rdists_support_shareable())
3145 tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
3147 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3148 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3150 * The HW reports non-shareable, we must
3151 * remove the cacheability attributes as
3154 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3155 GICR_PROPBASER_CACHEABILITY_MASK);
3156 val |= GICR_PROPBASER_nC;
3157 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3159 pr_info_once("GIC: using cache flushing for LPI property table\n");
3160 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3164 val = (page_to_phys(pend_page) |
3165 GICR_PENDBASER_InnerShareable |
3166 GICR_PENDBASER_RaWaWb);
3168 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3169 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3171 if (!rdists_support_shareable())
3172 tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
3174 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3176 * The HW reports non-shareable, we must remove the
3177 * cacheability attributes as well.
3179 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3180 GICR_PENDBASER_CACHEABILITY_MASK);
3181 val |= GICR_PENDBASER_nC;
3182 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3186 val = readl_relaxed(rbase + GICR_CTLR);
3187 val |= GICR_CTLR_ENABLE_LPIS;
3188 writel_relaxed(val, rbase + GICR_CTLR);
3191 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3192 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3195 * It's possible for CPU to receive VLPIs before it is
3196 * scheduled as a vPE, especially for the first CPU, and the
3197 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3198 * as out of range and dropped by GIC.
3199 * So we initialize IDbits to known value to avoid VLPI drop.
3201 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3202 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3203 smp_processor_id(), val);
3204 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3207 * Also clear Valid bit of GICR_VPENDBASER, in case some
3208 * ancient programming gets left in and has possibility of
3209 * corrupting memory.
3211 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3214 if (allocate_vpe_l1_table()) {
3216 * If the allocation has failed, we're in massive trouble.
3217 * Disable direct injection, and pray that no VM was
3218 * already running...
3220 gic_rdists->has_rvpeid = false;
3221 gic_rdists->has_vlpis = false;
3224 /* Make sure the GIC has seen the above */
3226 gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3227 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3229 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3230 "reserved" : "allocated",
3234 static void its_cpu_init_collection(struct its_node *its)
3236 int cpu = smp_processor_id();
3239 /* avoid cross node collections and its mapping */
3240 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3241 struct device_node *cpu_node;
3243 cpu_node = of_get_cpu_node(cpu, NULL);
3244 if (its->numa_node != NUMA_NO_NODE &&
3245 its->numa_node != of_node_to_nid(cpu_node))
3250 * We now have to bind each collection to its target
3253 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3255 * This ITS wants the physical address of the
3258 target = gic_data_rdist()->phys_base;
3260 /* This ITS wants a linear CPU number. */
3261 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3262 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3265 /* Perform collection mapping */
3266 its->collections[cpu].target_address = target;
3267 its->collections[cpu].col_id = cpu;
3269 its_send_mapc(its, &its->collections[cpu], 1);
3270 its_send_invall(its, &its->collections[cpu]);
3273 static void its_cpu_init_collections(void)
3275 struct its_node *its;
3277 raw_spin_lock(&its_lock);
3279 list_for_each_entry(its, &its_nodes, entry)
3280 its_cpu_init_collection(its);
3282 raw_spin_unlock(&its_lock);
3285 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3287 struct its_device *its_dev = NULL, *tmp;
3288 unsigned long flags;
3290 raw_spin_lock_irqsave(&its->lock, flags);
3292 list_for_each_entry(tmp, &its->its_device_list, entry) {
3293 if (tmp->device_id == dev_id) {
3299 raw_spin_unlock_irqrestore(&its->lock, flags);
3304 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3308 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3309 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3310 return &its->tables[i];
3316 static bool its_alloc_table_entry(struct its_node *its,
3317 struct its_baser *baser, u32 id)
3323 /* Don't allow device id that exceeds single, flat table limit */
3324 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3325 if (!(baser->val & GITS_BASER_INDIRECT))
3326 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3328 /* Compute 1st level table index & check if that exceeds table limit */
3329 idx = id >> ilog2(baser->psz / esz);
3330 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3333 table = baser->base;
3335 /* Allocate memory for 2nd level table */
3337 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3338 get_order(baser->psz));
3342 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3343 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3344 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3346 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3348 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3349 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3350 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3352 /* Ensure updated table contents are visible to ITS hardware */
3359 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3361 struct its_baser *baser;
3363 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3365 /* Don't allow device id that exceeds ITS hardware limit */
3367 return (ilog2(dev_id) < device_ids(its));
3369 return its_alloc_table_entry(its, baser, dev_id);
3372 static bool its_alloc_vpe_table(u32 vpe_id)
3374 struct its_node *its;
3378 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3379 * could try and only do it on ITSs corresponding to devices
3380 * that have interrupts targeted at this VPE, but the
3381 * complexity becomes crazy (and you have tons of memory
3384 list_for_each_entry(its, &its_nodes, entry) {
3385 struct its_baser *baser;
3390 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3394 if (!its_alloc_table_entry(its, baser, vpe_id))
3398 /* Non v4.1? No need to iterate RDs and go back early. */
3399 if (!gic_rdists->has_rvpeid)
3403 * Make sure the L2 tables are allocated for all copies of
3404 * the L1 table on *all* v4.1 RDs.
3406 for_each_possible_cpu(cpu) {
3407 if (!allocate_vpe_l2_table(cpu, vpe_id))
3414 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3415 int nvecs, bool alloc_lpis)
3417 struct its_device *dev;
3418 unsigned long *lpi_map = NULL;
3419 unsigned long flags;
3420 u16 *col_map = NULL;
3427 if (!its_alloc_device_table(its, dev_id))
3430 if (WARN_ON(!is_power_of_2(nvecs)))
3431 nvecs = roundup_pow_of_two(nvecs);
3433 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3435 * Even if the device wants a single LPI, the ITT must be
3436 * sized as a power of two (and you need at least one bit...).
3438 nr_ites = max(2, nvecs);
3439 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3440 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3441 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3443 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3445 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3448 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3453 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3456 bitmap_free(lpi_map);
3461 gic_flush_dcache_to_poc(itt, sz);
3465 dev->nr_ites = nr_ites;
3466 dev->event_map.lpi_map = lpi_map;
3467 dev->event_map.col_map = col_map;
3468 dev->event_map.lpi_base = lpi_base;
3469 dev->event_map.nr_lpis = nr_lpis;
3470 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3471 dev->device_id = dev_id;
3472 INIT_LIST_HEAD(&dev->entry);
3474 raw_spin_lock_irqsave(&its->lock, flags);
3475 list_add(&dev->entry, &its->its_device_list);
3476 raw_spin_unlock_irqrestore(&its->lock, flags);
3478 /* Map device to its ITT */
3479 its_send_mapd(dev, 1);
3484 static void its_free_device(struct its_device *its_dev)
3486 unsigned long flags;
3488 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3489 list_del(&its_dev->entry);
3490 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3491 kfree(its_dev->event_map.col_map);
3492 kfree(its_dev->itt);
3496 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3500 /* Find a free LPI region in lpi_map and allocate them. */
3501 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3502 dev->event_map.nr_lpis,
3503 get_count_order(nvecs));
3507 *hwirq = dev->event_map.lpi_base + idx;
3512 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3513 int nvec, msi_alloc_info_t *info)
3515 struct its_node *its;
3516 struct its_device *its_dev;
3517 struct msi_domain_info *msi_info;
3522 * We ignore "dev" entirely, and rely on the dev_id that has
3523 * been passed via the scratchpad. This limits this domain's
3524 * usefulness to upper layers that definitely know that they
3525 * are built on top of the ITS.
3527 dev_id = info->scratchpad[0].ul;
3529 msi_info = msi_get_domain_info(domain);
3530 its = msi_info->data;
3532 if (!gic_rdists->has_direct_lpi &&
3534 vpe_proxy.dev->its == its &&
3535 dev_id == vpe_proxy.dev->device_id) {
3536 /* Bad luck. Get yourself a better implementation */
3537 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3542 mutex_lock(&its->dev_alloc_lock);
3543 its_dev = its_find_device(its, dev_id);
3546 * We already have seen this ID, probably through
3547 * another alias (PCI bridge of some sort). No need to
3548 * create the device.
3550 its_dev->shared = true;
3551 pr_debug("Reusing ITT for devID %x\n", dev_id);
3555 its_dev = its_create_device(its, dev_id, nvec, true);
3561 if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3562 its_dev->shared = true;
3564 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3566 mutex_unlock(&its->dev_alloc_lock);
3567 info->scratchpad[0].ptr = its_dev;
3571 static struct msi_domain_ops its_msi_domain_ops = {
3572 .msi_prepare = its_msi_prepare,
3575 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3577 irq_hw_number_t hwirq)
3579 struct irq_fwspec fwspec;
3581 if (irq_domain_get_of_node(domain->parent)) {
3582 fwspec.fwnode = domain->parent->fwnode;
3583 fwspec.param_count = 3;
3584 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3585 fwspec.param[1] = hwirq;
3586 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3587 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3588 fwspec.fwnode = domain->parent->fwnode;
3589 fwspec.param_count = 2;
3590 fwspec.param[0] = hwirq;
3591 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3596 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3599 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3600 unsigned int nr_irqs, void *args)
3602 msi_alloc_info_t *info = args;
3603 struct its_device *its_dev = info->scratchpad[0].ptr;
3604 struct its_node *its = its_dev->its;
3605 struct irq_data *irqd;
3606 irq_hw_number_t hwirq;
3610 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3614 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3618 for (i = 0; i < nr_irqs; i++) {
3619 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3623 irq_domain_set_hwirq_and_chip(domain, virq + i,
3624 hwirq + i, &its_irq_chip, its_dev);
3625 irqd = irq_get_irq_data(virq + i);
3626 irqd_set_single_target(irqd);
3627 irqd_set_affinity_on_activate(irqd);
3628 irqd_set_resend_when_in_progress(irqd);
3629 pr_debug("ID:%d pID:%d vID:%d\n",
3630 (int)(hwirq + i - its_dev->event_map.lpi_base),
3631 (int)(hwirq + i), virq + i);
3637 static int its_irq_domain_activate(struct irq_domain *domain,
3638 struct irq_data *d, bool reserve)
3640 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3641 u32 event = its_get_event_id(d);
3644 cpu = its_select_cpu(d, cpu_online_mask);
3645 if (cpu < 0 || cpu >= nr_cpu_ids)
3648 its_inc_lpi_count(d, cpu);
3649 its_dev->event_map.col_map[event] = cpu;
3650 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3652 /* Map the GIC IRQ and event to the device */
3653 its_send_mapti(its_dev, d->hwirq, event);
3657 static void its_irq_domain_deactivate(struct irq_domain *domain,
3660 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3661 u32 event = its_get_event_id(d);
3663 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3664 /* Stop the delivery of interrupts */
3665 its_send_discard(its_dev, event);
3668 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3669 unsigned int nr_irqs)
3671 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3672 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3673 struct its_node *its = its_dev->its;
3676 bitmap_release_region(its_dev->event_map.lpi_map,
3677 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3678 get_count_order(nr_irqs));
3680 for (i = 0; i < nr_irqs; i++) {
3681 struct irq_data *data = irq_domain_get_irq_data(domain,
3683 /* Nuke the entry in the domain */
3684 irq_domain_reset_irq_data(data);
3687 mutex_lock(&its->dev_alloc_lock);
3690 * If all interrupts have been freed, start mopping the
3691 * floor. This is conditioned on the device not being shared.
3693 if (!its_dev->shared &&
3694 bitmap_empty(its_dev->event_map.lpi_map,
3695 its_dev->event_map.nr_lpis)) {
3696 its_lpi_free(its_dev->event_map.lpi_map,
3697 its_dev->event_map.lpi_base,
3698 its_dev->event_map.nr_lpis);
3700 /* Unmap device/itt */
3701 its_send_mapd(its_dev, 0);
3702 its_free_device(its_dev);
3705 mutex_unlock(&its->dev_alloc_lock);
3707 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3710 static const struct irq_domain_ops its_domain_ops = {
3711 .alloc = its_irq_domain_alloc,
3712 .free = its_irq_domain_free,
3713 .activate = its_irq_domain_activate,
3714 .deactivate = its_irq_domain_deactivate,
3720 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3721 * likely), the only way to perform an invalidate is to use a fake
3722 * device to issue an INV command, implying that the LPI has first
3723 * been mapped to some event on that device. Since this is not exactly
3724 * cheap, we try to keep that mapping around as long as possible, and
3725 * only issue an UNMAP if we're short on available slots.
3727 * Broken by design(tm).
3729 * GICv4.1, on the other hand, mandates that we're able to invalidate
3730 * by writing to a MMIO register. It doesn't implement the whole of
3731 * DirectLPI, but that's good enough. And most of the time, we don't
3732 * even have to invalidate anything, as the redistributor can be told
3733 * whether to generate a doorbell or not (we thus leave it enabled,
3736 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3738 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3739 if (gic_rdists->has_rvpeid)
3742 /* Already unmapped? */
3743 if (vpe->vpe_proxy_event == -1)
3746 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3747 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3750 * We don't track empty slots at all, so let's move the
3751 * next_victim pointer if we can quickly reuse that slot
3752 * instead of nuking an existing entry. Not clear that this is
3753 * always a win though, and this might just generate a ripple
3754 * effect... Let's just hope VPEs don't migrate too often.
3756 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3757 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3759 vpe->vpe_proxy_event = -1;
3762 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3764 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3765 if (gic_rdists->has_rvpeid)
3768 if (!gic_rdists->has_direct_lpi) {
3769 unsigned long flags;
3771 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3772 its_vpe_db_proxy_unmap_locked(vpe);
3773 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3777 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3779 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3780 if (gic_rdists->has_rvpeid)
3783 /* Already mapped? */
3784 if (vpe->vpe_proxy_event != -1)
3787 /* This slot was already allocated. Kick the other VPE out. */
3788 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3789 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3791 /* Map the new VPE instead */
3792 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3793 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3794 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3796 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3797 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3800 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3802 unsigned long flags;
3803 struct its_collection *target_col;
3805 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3806 if (gic_rdists->has_rvpeid)
3809 if (gic_rdists->has_direct_lpi) {
3810 void __iomem *rdbase;
3812 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3813 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3814 wait_for_syncr(rdbase);
3819 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3821 its_vpe_db_proxy_map_locked(vpe);
3823 target_col = &vpe_proxy.dev->its->collections[to];
3824 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3825 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3827 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3830 static int its_vpe_set_affinity(struct irq_data *d,
3831 const struct cpumask *mask_val,
3834 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3835 struct cpumask common, *table_mask;
3836 unsigned long flags;
3840 * Changing affinity is mega expensive, so let's be as lazy as
3841 * we can and only do it if we really have to. Also, if mapped
3842 * into the proxy device, we need to move the doorbell
3843 * interrupt to its new location.
3845 * Another thing is that changing the affinity of a vPE affects
3846 * *other interrupts* such as all the vLPIs that are routed to
3847 * this vPE. This means that the irq_desc lock is not enough to
3848 * protect us, and that we must ensure nobody samples vpe->col_idx
3849 * during the update, hence the lock below which must also be
3850 * taken on any vLPI handling path that evaluates vpe->col_idx.
3852 from = vpe_to_cpuid_lock(vpe, &flags);
3853 table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
3856 * If we are offered another CPU in the same GICv4.1 ITS
3857 * affinity, pick this one. Otherwise, any CPU will do.
3859 if (table_mask && cpumask_and(&common, mask_val, table_mask))
3860 cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
3862 cpu = cpumask_first(mask_val);
3869 its_send_vmovp(vpe);
3870 its_vpe_db_proxy_move(vpe, from, cpu);
3873 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3874 vpe_to_cpuid_unlock(vpe, flags);
3876 return IRQ_SET_MASK_OK_DONE;
3879 static void its_wait_vpt_parse_complete(void)
3881 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3884 if (!gic_rdists->has_vpend_valid_dirty)
3887 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3889 !(val & GICR_VPENDBASER_Dirty),
3893 static void its_vpe_schedule(struct its_vpe *vpe)
3895 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3898 /* Schedule the VPE */
3899 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3900 GENMASK_ULL(51, 12);
3901 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3902 if (rdists_support_shareable()) {
3903 val |= GICR_VPROPBASER_RaWb;
3904 val |= GICR_VPROPBASER_InnerShareable;
3906 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3908 val = virt_to_phys(page_address(vpe->vpt_page)) &
3909 GENMASK_ULL(51, 16);
3910 if (rdists_support_shareable()) {
3911 val |= GICR_VPENDBASER_RaWaWb;
3912 val |= GICR_VPENDBASER_InnerShareable;
3915 * There is no good way of finding out if the pending table is
3916 * empty as we can race against the doorbell interrupt very
3917 * easily. So in the end, vpe->pending_last is only an
3918 * indication that the vcpu has something pending, not one
3919 * that the pending table is empty. A good implementation
3920 * would be able to read its coarse map pretty quickly anyway,
3921 * making this a tolerable issue.
3923 val |= GICR_VPENDBASER_PendingLast;
3924 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3925 val |= GICR_VPENDBASER_Valid;
3926 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3929 static void its_vpe_deschedule(struct its_vpe *vpe)
3931 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3934 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3936 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3937 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3940 static void its_vpe_invall(struct its_vpe *vpe)
3942 struct its_node *its;
3944 list_for_each_entry(its, &its_nodes, entry) {
3948 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3952 * Sending a VINVALL to a single ITS is enough, as all
3953 * we need is to reach the redistributors.
3955 its_send_vinvall(its, vpe);
3960 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3962 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3963 struct its_cmd_info *info = vcpu_info;
3965 switch (info->cmd_type) {
3967 its_vpe_schedule(vpe);
3970 case DESCHEDULE_VPE:
3971 its_vpe_deschedule(vpe);
3975 its_wait_vpt_parse_complete();
3979 its_vpe_invall(vpe);
3987 static void its_vpe_send_cmd(struct its_vpe *vpe,
3988 void (*cmd)(struct its_device *, u32))
3990 unsigned long flags;
3992 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3994 its_vpe_db_proxy_map_locked(vpe);
3995 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3997 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
4000 static void its_vpe_send_inv(struct irq_data *d)
4002 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4004 if (gic_rdists->has_direct_lpi)
4005 __direct_lpi_inv(d, d->parent_data->hwirq);
4007 its_vpe_send_cmd(vpe, its_send_inv);
4010 static void its_vpe_mask_irq(struct irq_data *d)
4013 * We need to unmask the LPI, which is described by the parent
4014 * irq_data. Instead of calling into the parent (which won't
4015 * exactly do the right thing, let's simply use the
4016 * parent_data pointer. Yes, I'm naughty.
4018 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4019 its_vpe_send_inv(d);
4022 static void its_vpe_unmask_irq(struct irq_data *d)
4024 /* Same hack as above... */
4025 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4026 its_vpe_send_inv(d);
4029 static int its_vpe_set_irqchip_state(struct irq_data *d,
4030 enum irqchip_irq_state which,
4033 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4035 if (which != IRQCHIP_STATE_PENDING)
4038 if (gic_rdists->has_direct_lpi) {
4039 void __iomem *rdbase;
4041 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4043 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4045 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4046 wait_for_syncr(rdbase);
4050 its_vpe_send_cmd(vpe, its_send_int);
4052 its_vpe_send_cmd(vpe, its_send_clear);
4058 static int its_vpe_retrigger(struct irq_data *d)
4060 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4063 static struct irq_chip its_vpe_irq_chip = {
4064 .name = "GICv4-vpe",
4065 .irq_mask = its_vpe_mask_irq,
4066 .irq_unmask = its_vpe_unmask_irq,
4067 .irq_eoi = irq_chip_eoi_parent,
4068 .irq_set_affinity = its_vpe_set_affinity,
4069 .irq_retrigger = its_vpe_retrigger,
4070 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
4071 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
4074 static struct its_node *find_4_1_its(void)
4076 static struct its_node *its = NULL;
4079 list_for_each_entry(its, &its_nodes, entry) {
4091 static void its_vpe_4_1_send_inv(struct irq_data *d)
4093 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4094 struct its_node *its;
4097 * GICv4.1 wants doorbells to be invalidated using the
4098 * INVDB command in order to be broadcast to all RDs. Send
4099 * it to the first valid ITS, and let the HW do its magic.
4101 its = find_4_1_its();
4103 its_send_invdb(its, vpe);
4106 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4108 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4109 its_vpe_4_1_send_inv(d);
4112 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4114 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4115 its_vpe_4_1_send_inv(d);
4118 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4119 struct its_cmd_info *info)
4121 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4124 /* Schedule the VPE */
4125 val |= GICR_VPENDBASER_Valid;
4126 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4127 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4128 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4130 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4133 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4134 struct its_cmd_info *info)
4136 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4140 unsigned long flags;
4143 * vPE is going to block: make the vPE non-resident with
4144 * PendingLast clear and DB set. The GIC guarantees that if
4145 * we read-back PendingLast clear, then a doorbell will be
4146 * delivered when an interrupt comes.
4148 * Note the locking to deal with the concurrent update of
4149 * pending_last from the doorbell interrupt handler that can
4152 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4153 val = its_clear_vpend_valid(vlpi_base,
4154 GICR_VPENDBASER_PendingLast,
4155 GICR_VPENDBASER_4_1_DB);
4156 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4157 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4160 * We're not blocking, so just make the vPE non-resident
4161 * with PendingLast set, indicating that we'll be back.
4163 val = its_clear_vpend_valid(vlpi_base,
4165 GICR_VPENDBASER_PendingLast);
4166 vpe->pending_last = true;
4170 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4172 void __iomem *rdbase;
4173 unsigned long flags;
4177 val = GICR_INVALLR_V;
4178 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4180 /* Target the redistributor this vPE is currently known on */
4181 cpu = vpe_to_cpuid_lock(vpe, &flags);
4182 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4183 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4184 gic_write_lpir(val, rdbase + GICR_INVALLR);
4186 wait_for_syncr(rdbase);
4187 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4188 vpe_to_cpuid_unlock(vpe, flags);
4191 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4193 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4194 struct its_cmd_info *info = vcpu_info;
4196 switch (info->cmd_type) {
4198 its_vpe_4_1_schedule(vpe, info);
4201 case DESCHEDULE_VPE:
4202 its_vpe_4_1_deschedule(vpe, info);
4206 its_wait_vpt_parse_complete();
4210 its_vpe_4_1_invall(vpe);
4218 static struct irq_chip its_vpe_4_1_irq_chip = {
4219 .name = "GICv4.1-vpe",
4220 .irq_mask = its_vpe_4_1_mask_irq,
4221 .irq_unmask = its_vpe_4_1_unmask_irq,
4222 .irq_eoi = irq_chip_eoi_parent,
4223 .irq_set_affinity = its_vpe_set_affinity,
4224 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4227 static void its_configure_sgi(struct irq_data *d, bool clear)
4229 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4230 struct its_cmd_desc desc;
4232 desc.its_vsgi_cmd.vpe = vpe;
4233 desc.its_vsgi_cmd.sgi = d->hwirq;
4234 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4235 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4236 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4237 desc.its_vsgi_cmd.clear = clear;
4240 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4241 * destination VPE is mapped there. Since we map them eagerly at
4242 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4244 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4247 static void its_sgi_mask_irq(struct irq_data *d)
4249 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4251 vpe->sgi_config[d->hwirq].enabled = false;
4252 its_configure_sgi(d, false);
4255 static void its_sgi_unmask_irq(struct irq_data *d)
4257 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4259 vpe->sgi_config[d->hwirq].enabled = true;
4260 its_configure_sgi(d, false);
4263 static int its_sgi_set_affinity(struct irq_data *d,
4264 const struct cpumask *mask_val,
4268 * There is no notion of affinity for virtual SGIs, at least
4269 * not on the host (since they can only be targeting a vPE).
4270 * Tell the kernel we've done whatever it asked for.
4272 irq_data_update_effective_affinity(d, mask_val);
4273 return IRQ_SET_MASK_OK;
4276 static int its_sgi_set_irqchip_state(struct irq_data *d,
4277 enum irqchip_irq_state which,
4280 if (which != IRQCHIP_STATE_PENDING)
4284 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4285 struct its_node *its = find_4_1_its();
4288 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4289 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4290 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4292 its_configure_sgi(d, true);
4298 static int its_sgi_get_irqchip_state(struct irq_data *d,
4299 enum irqchip_irq_state which, bool *val)
4301 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4303 unsigned long flags;
4304 u32 count = 1000000; /* 1s! */
4308 if (which != IRQCHIP_STATE_PENDING)
4312 * Locking galore! We can race against two different events:
4314 * - Concurrent vPE affinity change: we must make sure it cannot
4315 * happen, or we'll talk to the wrong redistributor. This is
4316 * identical to what happens with vLPIs.
4318 * - Concurrent VSGIPENDR access: As it involves accessing two
4319 * MMIO registers, this must be made atomic one way or another.
4321 cpu = vpe_to_cpuid_lock(vpe, &flags);
4322 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4323 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4324 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4326 status = readl_relaxed(base + GICR_VSGIPENDR);
4327 if (!(status & GICR_VSGIPENDR_BUSY))
4332 pr_err_ratelimited("Unable to get SGI status\n");
4340 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4341 vpe_to_cpuid_unlock(vpe, flags);
4346 *val = !!(status & (1 << d->hwirq));
4351 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4353 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4354 struct its_cmd_info *info = vcpu_info;
4356 switch (info->cmd_type) {
4357 case PROP_UPDATE_VSGI:
4358 vpe->sgi_config[d->hwirq].priority = info->priority;
4359 vpe->sgi_config[d->hwirq].group = info->group;
4360 its_configure_sgi(d, false);
4368 static struct irq_chip its_sgi_irq_chip = {
4369 .name = "GICv4.1-sgi",
4370 .irq_mask = its_sgi_mask_irq,
4371 .irq_unmask = its_sgi_unmask_irq,
4372 .irq_set_affinity = its_sgi_set_affinity,
4373 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4374 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4375 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4378 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4379 unsigned int virq, unsigned int nr_irqs,
4382 struct its_vpe *vpe = args;
4385 /* Yes, we do want 16 SGIs */
4386 WARN_ON(nr_irqs != 16);
4388 for (i = 0; i < 16; i++) {
4389 vpe->sgi_config[i].priority = 0;
4390 vpe->sgi_config[i].enabled = false;
4391 vpe->sgi_config[i].group = false;
4393 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4394 &its_sgi_irq_chip, vpe);
4395 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4401 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4403 unsigned int nr_irqs)
4408 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4409 struct irq_data *d, bool reserve)
4411 /* Write out the initial SGI configuration */
4412 its_configure_sgi(d, false);
4416 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4419 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4422 * The VSGI command is awkward:
4424 * - To change the configuration, CLEAR must be set to false,
4425 * leaving the pending bit unchanged.
4426 * - To clear the pending bit, CLEAR must be set to true, leaving
4427 * the configuration unchanged.
4429 * You just can't do both at once, hence the two commands below.
4431 vpe->sgi_config[d->hwirq].enabled = false;
4432 its_configure_sgi(d, false);
4433 its_configure_sgi(d, true);
4436 static const struct irq_domain_ops its_sgi_domain_ops = {
4437 .alloc = its_sgi_irq_domain_alloc,
4438 .free = its_sgi_irq_domain_free,
4439 .activate = its_sgi_irq_domain_activate,
4440 .deactivate = its_sgi_irq_domain_deactivate,
4443 static int its_vpe_id_alloc(void)
4445 return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL);
4448 static void its_vpe_id_free(u16 id)
4450 ida_free(&its_vpeid_ida, id);
4453 static int its_vpe_init(struct its_vpe *vpe)
4455 struct page *vpt_page;
4458 /* Allocate vpe_id */
4459 vpe_id = its_vpe_id_alloc();
4464 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4466 its_vpe_id_free(vpe_id);
4470 if (!its_alloc_vpe_table(vpe_id)) {
4471 its_vpe_id_free(vpe_id);
4472 its_free_pending_table(vpt_page);
4476 raw_spin_lock_init(&vpe->vpe_lock);
4477 vpe->vpe_id = vpe_id;
4478 vpe->vpt_page = vpt_page;
4479 if (gic_rdists->has_rvpeid)
4480 atomic_set(&vpe->vmapp_count, 0);
4482 vpe->vpe_proxy_event = -1;
4487 static void its_vpe_teardown(struct its_vpe *vpe)
4489 its_vpe_db_proxy_unmap(vpe);
4490 its_vpe_id_free(vpe->vpe_id);
4491 its_free_pending_table(vpe->vpt_page);
4494 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4496 unsigned int nr_irqs)
4498 struct its_vm *vm = domain->host_data;
4501 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4503 for (i = 0; i < nr_irqs; i++) {
4504 struct irq_data *data = irq_domain_get_irq_data(domain,
4506 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4508 BUG_ON(vm != vpe->its_vm);
4510 clear_bit(data->hwirq, vm->db_bitmap);
4511 its_vpe_teardown(vpe);
4512 irq_domain_reset_irq_data(data);
4515 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4516 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4517 its_free_prop_table(vm->vprop_page);
4521 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4522 unsigned int nr_irqs, void *args)
4524 struct irq_chip *irqchip = &its_vpe_irq_chip;
4525 struct its_vm *vm = args;
4526 unsigned long *bitmap;
4527 struct page *vprop_page;
4528 int base, nr_ids, i, err = 0;
4532 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4536 if (nr_ids < nr_irqs) {
4537 its_lpi_free(bitmap, base, nr_ids);
4541 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4543 its_lpi_free(bitmap, base, nr_ids);
4547 vm->db_bitmap = bitmap;
4548 vm->db_lpi_base = base;
4549 vm->nr_db_lpis = nr_ids;
4550 vm->vprop_page = vprop_page;
4552 if (gic_rdists->has_rvpeid)
4553 irqchip = &its_vpe_4_1_irq_chip;
4555 for (i = 0; i < nr_irqs; i++) {
4556 vm->vpes[i]->vpe_db_lpi = base + i;
4557 err = its_vpe_init(vm->vpes[i]);
4560 err = its_irq_gic_domain_alloc(domain, virq + i,
4561 vm->vpes[i]->vpe_db_lpi);
4564 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4565 irqchip, vm->vpes[i]);
4567 irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
4572 its_vpe_irq_domain_free(domain, virq, i);
4574 its_lpi_free(bitmap, base, nr_ids);
4575 its_free_prop_table(vprop_page);
4581 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4582 struct irq_data *d, bool reserve)
4584 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4585 struct its_node *its;
4588 * If we use the list map, we issue VMAPP on demand... Unless
4589 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4590 * so that VSGIs can work.
4592 if (!gic_requires_eager_mapping())
4595 /* Map the VPE to the first possible CPU */
4596 vpe->col_idx = cpumask_first(cpu_online_mask);
4598 list_for_each_entry(its, &its_nodes, entry) {
4602 its_send_vmapp(its, vpe, true);
4603 its_send_vinvall(its, vpe);
4606 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4611 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4614 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4615 struct its_node *its;
4618 * If we use the list map on GICv4.0, we unmap the VPE once no
4619 * VLPIs are associated with the VM.
4621 if (!gic_requires_eager_mapping())
4624 list_for_each_entry(its, &its_nodes, entry) {
4628 its_send_vmapp(its, vpe, false);
4632 * There may be a direct read to the VPT after unmapping the
4633 * vPE, to guarantee the validity of this, we make the VPT
4634 * memory coherent with the CPU caches here.
4636 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4637 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4641 static const struct irq_domain_ops its_vpe_domain_ops = {
4642 .alloc = its_vpe_irq_domain_alloc,
4643 .free = its_vpe_irq_domain_free,
4644 .activate = its_vpe_irq_domain_activate,
4645 .deactivate = its_vpe_irq_domain_deactivate,
4648 static int its_force_quiescent(void __iomem *base)
4650 u32 count = 1000000; /* 1s */
4653 val = readl_relaxed(base + GITS_CTLR);
4655 * GIC architecture specification requires the ITS to be both
4656 * disabled and quiescent for writes to GITS_BASER<n> or
4657 * GITS_CBASER to not have UNPREDICTABLE results.
4659 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4662 /* Disable the generation of all interrupts to this ITS */
4663 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4664 writel_relaxed(val, base + GITS_CTLR);
4666 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4668 val = readl_relaxed(base + GITS_CTLR);
4669 if (val & GITS_CTLR_QUIESCENT)
4681 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4683 struct its_node *its = data;
4685 /* erratum 22375: only alloc 8MB table size (20 bits) */
4686 its->typer &= ~GITS_TYPER_DEVBITS;
4687 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4688 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4693 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4695 struct its_node *its = data;
4697 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4702 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4704 struct its_node *its = data;
4706 /* On QDF2400, the size of the ITE is 16Bytes */
4707 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4708 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4713 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4715 struct its_node *its = its_dev->its;
4718 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4719 * which maps 32-bit writes targeted at a separate window of
4720 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4721 * with device ID taken from bits [device_id_bits + 1:2] of
4722 * the window offset.
4724 return its->pre_its_base + (its_dev->device_id << 2);
4727 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4729 struct its_node *its = data;
4730 u32 pre_its_window[2];
4733 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4734 "socionext,synquacer-pre-its",
4736 ARRAY_SIZE(pre_its_window))) {
4738 its->pre_its_base = pre_its_window[0];
4739 its->get_msi_base = its_irq_get_msi_base_pre_its;
4741 ids = ilog2(pre_its_window[1]) - 2;
4742 if (device_ids(its) > ids) {
4743 its->typer &= ~GITS_TYPER_DEVBITS;
4744 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4747 /* the pre-ITS breaks isolation, so disable MSI remapping */
4748 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4754 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4756 struct its_node *its = data;
4759 * Hip07 insists on using the wrong address for the VLPI
4760 * page. Trick it into doing the right thing...
4762 its->vlpi_redist_offset = SZ_128K;
4766 static bool __maybe_unused its_enable_rk3588001(void *data)
4768 struct its_node *its = data;
4770 if (!of_machine_is_compatible("rockchip,rk3588") &&
4771 !of_machine_is_compatible("rockchip,rk3588s"))
4774 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4775 gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
4780 static bool its_set_non_coherent(void *data)
4782 struct its_node *its = data;
4784 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4788 static const struct gic_quirk its_quirks[] = {
4789 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4791 .desc = "ITS: Cavium errata 22375, 24313",
4792 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4794 .init = its_enable_quirk_cavium_22375,
4797 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4799 .desc = "ITS: Cavium erratum 23144",
4800 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4802 .init = its_enable_quirk_cavium_23144,
4805 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4807 .desc = "ITS: QDF2400 erratum 0065",
4808 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4810 .init = its_enable_quirk_qdf2400_e0065,
4813 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4816 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4817 * implementation, but with a 'pre-ITS' added that requires
4818 * special handling in software.
4820 .desc = "ITS: Socionext Synquacer pre-ITS",
4823 .init = its_enable_quirk_socionext_synquacer,
4826 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4828 .desc = "ITS: Hip07 erratum 161600802",
4831 .init = its_enable_quirk_hip07_161600802,
4834 #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4836 .desc = "ITS: Rockchip erratum RK3588001",
4839 .init = its_enable_rk3588001,
4843 .desc = "ITS: non-coherent attribute",
4844 .property = "dma-noncoherent",
4845 .init = its_set_non_coherent,
4851 static void its_enable_quirks(struct its_node *its)
4853 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4855 gic_enable_quirks(iidr, its_quirks, its);
4857 if (is_of_node(its->fwnode_handle))
4858 gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4862 static int its_save_disable(void)
4864 struct its_node *its;
4867 raw_spin_lock(&its_lock);
4868 list_for_each_entry(its, &its_nodes, entry) {
4872 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4873 err = its_force_quiescent(base);
4875 pr_err("ITS@%pa: failed to quiesce: %d\n",
4876 &its->phys_base, err);
4877 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4881 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4886 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4890 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4893 raw_spin_unlock(&its_lock);
4898 static void its_restore_enable(void)
4900 struct its_node *its;
4903 raw_spin_lock(&its_lock);
4904 list_for_each_entry(its, &its_nodes, entry) {
4911 * Make sure that the ITS is disabled. If it fails to quiesce,
4912 * don't restore it since writing to CBASER or BASER<n>
4913 * registers is undefined according to the GIC v3 ITS
4916 * Firmware resuming with the ITS enabled is terminally broken.
4918 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
4919 ret = its_force_quiescent(base);
4921 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4922 &its->phys_base, ret);
4926 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4929 * Writing CBASER resets CREADR to 0, so make CWRITER and
4930 * cmd_write line up with it.
4932 its->cmd_write = its->cmd_base;
4933 gits_write_cwriter(0, base + GITS_CWRITER);
4935 /* Restore GITS_BASER from the value cache. */
4936 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4937 struct its_baser *baser = &its->tables[i];
4939 if (!(baser->val & GITS_BASER_VALID))
4942 its_write_baser(its, baser, baser->val);
4944 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4947 * Reinit the collection if it's stored in the ITS. This is
4948 * indicated by the col_id being less than the HCC field.
4949 * CID < HCC as specified in the GIC v3 Documentation.
4951 if (its->collections[smp_processor_id()].col_id <
4952 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4953 its_cpu_init_collection(its);
4955 raw_spin_unlock(&its_lock);
4958 static struct syscore_ops its_syscore_ops = {
4959 .suspend = its_save_disable,
4960 .resume = its_restore_enable,
4963 static void __init __iomem *its_map_one(struct resource *res, int *err)
4965 void __iomem *its_base;
4968 its_base = ioremap(res->start, SZ_64K);
4970 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4975 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4976 if (val != 0x30 && val != 0x40) {
4977 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4982 *err = its_force_quiescent(its_base);
4984 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4995 static int its_init_domain(struct its_node *its)
4997 struct irq_domain *inner_domain;
4998 struct msi_domain_info *info;
5000 info = kzalloc(sizeof(*info), GFP_KERNEL);
5004 info->ops = &its_msi_domain_ops;
5007 inner_domain = irq_domain_create_hierarchy(its_parent,
5008 its->msi_domain_flags, 0,
5009 its->fwnode_handle, &its_domain_ops,
5011 if (!inner_domain) {
5016 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
5021 static int its_init_vpe_domain(void)
5023 struct its_node *its;
5027 if (gic_rdists->has_direct_lpi) {
5028 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
5032 /* Any ITS will do, even if not v4 */
5033 its = list_first_entry(&its_nodes, struct its_node, entry);
5035 entries = roundup_pow_of_two(nr_cpu_ids);
5036 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
5038 if (!vpe_proxy.vpes)
5041 /* Use the last possible DevID */
5042 devid = GENMASK(device_ids(its) - 1, 0);
5043 vpe_proxy.dev = its_create_device(its, devid, entries, false);
5044 if (!vpe_proxy.dev) {
5045 kfree(vpe_proxy.vpes);
5046 pr_err("ITS: Can't allocate GICv4 proxy device\n");
5050 BUG_ON(entries > vpe_proxy.dev->nr_ites);
5052 raw_spin_lock_init(&vpe_proxy.lock);
5053 vpe_proxy.next_victim = 0;
5054 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
5055 devid, vpe_proxy.dev->nr_ites);
5060 static int __init its_compute_its_list_map(struct its_node *its)
5066 * This is assumed to be done early enough that we're
5067 * guaranteed to be single-threaded, hence no
5068 * locking. Should this change, we should address
5071 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
5072 if (its_number >= GICv4_ITS_LIST_MAX) {
5073 pr_err("ITS@%pa: No ITSList entry available!\n",
5078 ctlr = readl_relaxed(its->base + GITS_CTLR);
5079 ctlr &= ~GITS_CTLR_ITS_NUMBER;
5080 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
5081 writel_relaxed(ctlr, its->base + GITS_CTLR);
5082 ctlr = readl_relaxed(its->base + GITS_CTLR);
5083 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
5084 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
5085 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
5088 if (test_and_set_bit(its_number, &its_list_map)) {
5089 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5090 &its->phys_base, its_number);
5097 static int __init its_probe_one(struct its_node *its)
5104 its_enable_quirks(its);
5107 if (!(its->typer & GITS_TYPER_VMOVP)) {
5108 err = its_compute_its_list_map(its);
5114 pr_info("ITS@%pa: Using ITS number %d\n",
5115 &its->phys_base, err);
5117 pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5121 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5123 its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5124 if (!its->sgir_base) {
5129 its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5131 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5132 &its->phys_base, its->mpidr, svpet);
5136 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5137 get_order(ITS_CMD_QUEUE_SZ));
5140 goto out_unmap_sgir;
5142 its->cmd_base = (void *)page_address(page);
5143 its->cmd_write = its->cmd_base;
5145 err = its_alloc_tables(its);
5149 err = its_alloc_collections(its);
5151 goto out_free_tables;
5153 baser = (virt_to_phys(its->cmd_base) |
5154 GITS_CBASER_RaWaWb |
5155 GITS_CBASER_InnerShareable |
5156 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5159 gits_write_cbaser(baser, its->base + GITS_CBASER);
5160 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5162 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5163 tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
5165 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5166 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5168 * The HW reports non-shareable, we must
5169 * remove the cacheability attributes as
5172 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5173 GITS_CBASER_CACHEABILITY_MASK);
5174 baser |= GITS_CBASER_nC;
5175 gits_write_cbaser(baser, its->base + GITS_CBASER);
5177 pr_info("ITS: using cache flushing for cmd queue\n");
5178 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5181 gits_write_cwriter(0, its->base + GITS_CWRITER);
5182 ctlr = readl_relaxed(its->base + GITS_CTLR);
5183 ctlr |= GITS_CTLR_ENABLE;
5185 ctlr |= GITS_CTLR_ImDe;
5186 writel_relaxed(ctlr, its->base + GITS_CTLR);
5188 err = its_init_domain(its);
5190 goto out_free_tables;
5192 raw_spin_lock(&its_lock);
5193 list_add(&its->entry, &its_nodes);
5194 raw_spin_unlock(&its_lock);
5199 its_free_tables(its);
5201 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5204 iounmap(its->sgir_base);
5206 pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5210 static bool gic_rdists_supports_plpis(void)
5212 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5215 static int redist_disable_lpis(void)
5217 void __iomem *rbase = gic_data_rdist_rd_base();
5218 u64 timeout = USEC_PER_SEC;
5221 if (!gic_rdists_supports_plpis()) {
5222 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5226 val = readl_relaxed(rbase + GICR_CTLR);
5227 if (!(val & GICR_CTLR_ENABLE_LPIS))
5231 * If coming via a CPU hotplug event, we don't need to disable
5232 * LPIs before trying to re-enable them. They are already
5233 * configured and all is well in the world.
5235 * If running with preallocated tables, there is nothing to do.
5237 if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5238 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5242 * From that point on, we only try to do some damage control.
5244 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5245 smp_processor_id());
5246 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5249 val &= ~GICR_CTLR_ENABLE_LPIS;
5250 writel_relaxed(val, rbase + GICR_CTLR);
5252 /* Make sure any change to GICR_CTLR is observable by the GIC */
5256 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5257 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5258 * Error out if we time out waiting for RWP to clear.
5260 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5262 pr_err("CPU%d: Timeout while disabling LPIs\n",
5263 smp_processor_id());
5271 * After it has been written to 1, it is IMPLEMENTATION
5272 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5273 * cleared to 0. Error out if clearing the bit failed.
5275 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5276 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5283 int its_cpu_init(void)
5285 if (!list_empty(&its_nodes)) {
5288 ret = redist_disable_lpis();
5292 its_cpu_init_lpis();
5293 its_cpu_init_collections();
5299 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5301 cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5302 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5305 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5306 rdist_memreserve_cpuhp_cleanup_workfn);
5308 static int its_cpu_memreserve_lpi(unsigned int cpu)
5310 struct page *pend_page;
5313 /* This gets to run exactly once per CPU */
5314 if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5317 pend_page = gic_data_rdist()->pend_page;
5318 if (WARN_ON(!pend_page)) {
5323 * If the pending table was pre-programmed, free the memory we
5324 * preemptively allocated. Otherwise, reserve that memory for
5327 if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5328 its_free_pending_table(pend_page);
5329 gic_data_rdist()->pend_page = NULL;
5331 phys_addr_t paddr = page_to_phys(pend_page);
5332 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5336 /* Last CPU being brought up gets to issue the cleanup */
5337 if (!IS_ENABLED(CONFIG_SMP) ||
5338 cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5339 schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5341 gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5345 /* Mark all the BASER registers as invalid before they get reprogrammed */
5346 static int __init its_reset_one(struct resource *res)
5348 void __iomem *its_base;
5351 its_base = its_map_one(res, &err);
5355 for (i = 0; i < GITS_BASER_NR_REGS; i++)
5356 gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5362 static const struct of_device_id its_device_id[] = {
5363 { .compatible = "arm,gic-v3-its", },
5367 static struct its_node __init *its_node_init(struct resource *res,
5368 struct fwnode_handle *handle, int numa_node)
5370 void __iomem *its_base;
5371 struct its_node *its;
5374 its_base = its_map_one(res, &err);
5378 pr_info("ITS %pR\n", res);
5380 its = kzalloc(sizeof(*its), GFP_KERNEL);
5384 raw_spin_lock_init(&its->lock);
5385 mutex_init(&its->dev_alloc_lock);
5386 INIT_LIST_HEAD(&its->entry);
5387 INIT_LIST_HEAD(&its->its_device_list);
5389 its->typer = gic_read_typer(its_base + GITS_TYPER);
5390 its->base = its_base;
5391 its->phys_base = res->start;
5392 its->get_msi_base = its_irq_get_msi_base;
5393 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
5395 its->numa_node = numa_node;
5396 its->fwnode_handle = handle;
5405 static void its_node_destroy(struct its_node *its)
5411 static int __init its_of_probe(struct device_node *node)
5413 struct device_node *np;
5414 struct resource res;
5418 * Make sure *all* the ITS are reset before we probe any, as
5419 * they may be sharing memory. If any of the ITS fails to
5420 * reset, don't even try to go any further, as this could
5421 * result in something even worse.
5423 for (np = of_find_matching_node(node, its_device_id); np;
5424 np = of_find_matching_node(np, its_device_id)) {
5425 if (!of_device_is_available(np) ||
5426 !of_property_read_bool(np, "msi-controller") ||
5427 of_address_to_resource(np, 0, &res))
5430 err = its_reset_one(&res);
5435 for (np = of_find_matching_node(node, its_device_id); np;
5436 np = of_find_matching_node(np, its_device_id)) {
5437 struct its_node *its;
5439 if (!of_device_is_available(np))
5441 if (!of_property_read_bool(np, "msi-controller")) {
5442 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5447 if (of_address_to_resource(np, 0, &res)) {
5448 pr_warn("%pOF: no regs?\n", np);
5453 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5457 err = its_probe_one(its);
5459 its_node_destroy(its);
5468 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5470 #ifdef CONFIG_ACPI_NUMA
5471 struct its_srat_map {
5478 static struct its_srat_map *its_srat_maps __initdata;
5479 static int its_in_srat __initdata;
5481 static int __init acpi_get_its_numa_node(u32 its_id)
5485 for (i = 0; i < its_in_srat; i++) {
5486 if (its_id == its_srat_maps[i].its_id)
5487 return its_srat_maps[i].numa_node;
5489 return NUMA_NO_NODE;
5492 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5493 const unsigned long end)
5498 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5499 const unsigned long end)
5502 struct acpi_srat_gic_its_affinity *its_affinity;
5504 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5508 if (its_affinity->header.length < sizeof(*its_affinity)) {
5509 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5510 its_affinity->header.length);
5515 * Note that in theory a new proximity node could be created by this
5516 * entry as it is an SRAT resource allocation structure.
5517 * We do not currently support doing so.
5519 node = pxm_to_node(its_affinity->proximity_domain);
5521 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5522 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5526 its_srat_maps[its_in_srat].numa_node = node;
5527 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5529 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5530 its_affinity->proximity_domain, its_affinity->its_id, node);
5535 static void __init acpi_table_parse_srat_its(void)
5539 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5540 sizeof(struct acpi_table_srat),
5541 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5542 gic_acpi_match_srat_its, 0);
5546 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5551 acpi_table_parse_entries(ACPI_SIG_SRAT,
5552 sizeof(struct acpi_table_srat),
5553 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5554 gic_acpi_parse_srat_its, 0);
5557 /* free the its_srat_maps after ITS probing */
5558 static void __init acpi_its_srat_maps_free(void)
5560 kfree(its_srat_maps);
5563 static void __init acpi_table_parse_srat_its(void) { }
5564 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5565 static void __init acpi_its_srat_maps_free(void) { }
5568 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5569 const unsigned long end)
5571 struct acpi_madt_generic_translator *its_entry;
5572 struct fwnode_handle *dom_handle;
5573 struct its_node *its;
5574 struct resource res;
5577 its_entry = (struct acpi_madt_generic_translator *)header;
5578 memset(&res, 0, sizeof(res));
5579 res.start = its_entry->base_address;
5580 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5581 res.flags = IORESOURCE_MEM;
5583 dom_handle = irq_domain_alloc_fwnode(&res.start);
5585 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5590 err = iort_register_domain_token(its_entry->translation_id, res.start,
5593 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5594 &res.start, its_entry->translation_id);
5598 its = its_node_init(&res, dom_handle,
5599 acpi_get_its_numa_node(its_entry->translation_id));
5605 err = its_probe_one(its);
5610 iort_deregister_domain_token(its_entry->translation_id);
5612 irq_domain_free_fwnode(dom_handle);
5616 static int __init its_acpi_reset(union acpi_subtable_headers *header,
5617 const unsigned long end)
5619 struct acpi_madt_generic_translator *its_entry;
5620 struct resource res;
5622 its_entry = (struct acpi_madt_generic_translator *)header;
5623 res = (struct resource) {
5624 .start = its_entry->base_address,
5625 .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5626 .flags = IORESOURCE_MEM,
5629 return its_reset_one(&res);
5632 static void __init its_acpi_probe(void)
5634 acpi_table_parse_srat_its();
5636 * Make sure *all* the ITS are reset before we probe any, as
5637 * they may be sharing memory. If any of the ITS fails to
5638 * reset, don't even try to go any further, as this could
5639 * result in something even worse.
5641 if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5642 its_acpi_reset, 0) > 0)
5643 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5644 gic_acpi_parse_madt_its, 0);
5645 acpi_its_srat_maps_free();
5648 static void __init its_acpi_probe(void) { }
5651 int __init its_lpi_memreserve_init(void)
5655 if (!efi_enabled(EFI_CONFIG_TABLES))
5658 if (list_empty(&its_nodes))
5661 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5662 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5663 "irqchip/arm/gicv3/memreserve:online",
5664 its_cpu_memreserve_lpi,
5669 gic_rdists->cpuhp_memreserve_state = state;
5674 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5675 struct irq_domain *parent_domain)
5677 struct device_node *of_node;
5678 struct its_node *its;
5679 bool has_v4 = false;
5680 bool has_v4_1 = false;
5683 gic_rdists = rdists;
5685 its_parent = parent_domain;
5686 of_node = to_of_node(handle);
5688 its_of_probe(of_node);
5692 if (list_empty(&its_nodes)) {
5693 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5697 err = allocate_lpi_tables();
5701 list_for_each_entry(its, &its_nodes, entry) {
5702 has_v4 |= is_v4(its);
5703 has_v4_1 |= is_v4_1(its);
5706 /* Don't bother with inconsistent systems */
5707 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5708 rdists->has_rvpeid = false;
5710 if (has_v4 & rdists->has_vlpis) {
5711 const struct irq_domain_ops *sgi_ops;
5714 sgi_ops = &its_sgi_domain_ops;
5718 if (its_init_vpe_domain() ||
5719 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5720 rdists->has_vlpis = false;
5721 pr_err("ITS: Disabling GICv4 support\n");
5725 register_syscore_ops(&its_syscore_ops);