1 // SPDX-License-Identifier: GPL-2.0-only
3 * ACPI probing code for ARM performance counters.
5 * Copyright (C) 2017 ARM Ltd.
8 #include <linux/acpi.h>
9 #include <linux/cpumask.h>
10 #include <linux/init.h>
11 #include <linux/irq.h>
12 #include <linux/irqdesc.h>
13 #include <linux/percpu.h>
14 #include <linux/perf/arm_pmu.h>
16 #include <asm/cputype.h>
18 static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
19 static DEFINE_PER_CPU(int, pmu_irqs);
21 static int arm_pmu_acpi_register_irq(int cpu)
23 struct acpi_madt_generic_interrupt *gicc;
26 gicc = acpi_cpu_get_madt_gicc(cpu);
30 gsi = gicc->performance_interrupt;
33 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
34 * have an interrupt. QEMU advertises this by using a GSI of zero,
35 * which is not known to be valid on any hardware despite being
36 * valid per the spec. Take the pragmatic approach and reject a
37 * GSI of zero for now.
42 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
43 trigger = ACPI_EDGE_SENSITIVE;
45 trigger = ACPI_LEVEL_SENSITIVE;
48 * Helpfully, the MADT GICC doesn't have a polarity flag for the
49 * "performance interrupt". Luckily, on compliant GICs the polarity is
50 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
53 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
54 * may not match the real polarity, but that should not matter.
56 * Other interrupt controllers are not supported with ACPI.
58 return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
61 static void arm_pmu_acpi_unregister_irq(int cpu)
63 struct acpi_madt_generic_interrupt *gicc;
66 gicc = acpi_cpu_get_madt_gicc(cpu);
70 gsi = gicc->performance_interrupt;
71 acpi_unregister_gsi(gsi);
74 static int arm_pmu_acpi_parse_irqs(void)
76 int irq, cpu, irq_cpu, err;
78 for_each_possible_cpu(cpu) {
79 irq = arm_pmu_acpi_register_irq(cpu);
82 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
85 } else if (irq == 0) {
86 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
90 * Log and request the IRQ so the core arm_pmu code can manage
91 * it. We'll have to sanity-check IRQs later when we associate
92 * them with their PMUs.
94 per_cpu(pmu_irqs, cpu) = irq;
95 armpmu_request_irq(irq, cpu);
101 for_each_possible_cpu(cpu) {
102 irq = per_cpu(pmu_irqs, cpu);
106 arm_pmu_acpi_unregister_irq(cpu);
109 * Blat all copies of the IRQ so that we only unregister the
110 * corresponding GSI once (e.g. when we have PPIs).
112 for_each_possible_cpu(irq_cpu) {
113 if (per_cpu(pmu_irqs, irq_cpu) == irq)
114 per_cpu(pmu_irqs, irq_cpu) = 0;
121 static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
123 unsigned long cpuid = read_cpuid_id();
127 for_each_possible_cpu(cpu) {
128 pmu = per_cpu(probed_pmus, cpu);
129 if (!pmu || pmu->acpi_cpuid != cpuid)
135 pmu = armpmu_alloc_atomic();
137 pr_warn("Unable to allocate PMU for CPU%d\n",
142 pmu->acpi_cpuid = cpuid;
148 * Check whether the new IRQ is compatible with those already associated with
149 * the PMU (e.g. we don't have mismatched PPIs).
151 static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
153 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
159 for_each_cpu(cpu, &pmu->supported_cpus) {
160 int other_irq = per_cpu(hw_events->irq, cpu);
164 if (irq == other_irq)
166 if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
169 pr_warn("mismatched PPIs detected\n");
177 * This must run before the common arm_pmu hotplug logic, so that we can
178 * associate a CPU and its interrupt before the common code tries to manage the
179 * affinity and so on.
181 * Note that hotplug events are serialized, so we cannot race with another CPU
182 * coming up. The perf core won't open events while a hotplug event is in
185 static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
188 struct pmu_hw_events __percpu *hw_events;
191 /* If we've already probed this CPU, we have nothing to do */
192 if (per_cpu(probed_pmus, cpu))
195 irq = per_cpu(pmu_irqs, cpu);
197 pmu = arm_pmu_acpi_find_alloc_pmu();
201 per_cpu(probed_pmus, cpu) = pmu;
203 if (pmu_irq_matches(pmu, irq)) {
204 hw_events = pmu->hw_events;
205 per_cpu(hw_events->irq, cpu) = irq;
208 cpumask_set_cpu(cpu, &pmu->supported_cpus);
211 * Ideally, we'd probe the PMU here when we find the first matching
212 * CPU. We can't do that for several reasons; see the comment in
213 * arm_pmu_acpi_init().
215 * So for the time being, we're done.
220 int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
226 * Initialise and register the set of PMUs which we know about right
227 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
228 * could handle late hotplug, but this may lead to deadlock since we
229 * might try to register a hotplug notifier instance from within a
232 * There's also the problem of having access to the right init_fn,
233 * without tying this too deeply into the "real" PMU driver.
235 * For the moment, as with the platform/DT case, we need at least one
236 * of a PMU's CPUs to be online at probe time.
238 for_each_possible_cpu(cpu) {
239 struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
242 if (!pmu || pmu->name)
246 if (ret == -ENODEV) {
247 /* PMU not handled by this driver, or not present */
250 pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
254 base_name = pmu->name;
255 pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
257 pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
261 ret = armpmu_register(pmu);
263 pr_warn("Failed to register PMU for CPU%d\n", cpu);
272 static int arm_pmu_acpi_init(void)
279 ret = arm_pmu_acpi_parse_irqs();
283 ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
284 "perf/arm/pmu_acpi:starting",
285 arm_pmu_acpi_cpu_starting, NULL);
289 subsys_initcall(arm_pmu_acpi_init)