2 * ARMv8 PMUv3 Performance Events handling code.
4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <asm/irq_regs.h>
23 #include <asm/perf_event.h>
24 #include <asm/sysreg.h>
27 #include <linux/acpi.h>
28 #include <linux/clocksource.h>
30 #include <linux/perf/arm_pmu.h>
31 #include <linux/platform_device.h>
33 /* ARMv8 Cortex-A53 specific event types. */
34 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
36 /* ARMv8 Cavium ThunderX specific event types. */
37 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
38 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
39 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
40 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
41 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
44 * ARMv8 Architectural defined events, not all of these may
45 * be supported on any given implementation. Unsupported events will
46 * be disabled at run-time based on the PMCEID registers.
48 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
49 PERF_MAP_ALL_UNSUPPORTED,
50 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
51 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
52 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
53 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
54 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
55 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
56 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
57 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
58 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
61 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
62 [PERF_COUNT_HW_CACHE_OP_MAX]
63 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
64 PERF_CACHE_MAP_ALL_UNSUPPORTED,
66 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
67 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
69 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
70 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
72 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
73 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
75 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
76 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
78 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
79 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
82 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
85 PERF_CACHE_MAP_ALL_UNSUPPORTED,
87 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
89 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
90 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
93 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
94 [PERF_COUNT_HW_CACHE_OP_MAX]
95 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
96 PERF_CACHE_MAP_ALL_UNSUPPORTED,
98 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
99 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
100 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
101 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
103 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
104 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
106 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
107 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
110 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
111 [PERF_COUNT_HW_CACHE_OP_MAX]
112 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
113 PERF_CACHE_MAP_ALL_UNSUPPORTED,
115 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
116 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
119 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
120 [PERF_COUNT_HW_CACHE_OP_MAX]
121 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
122 PERF_CACHE_MAP_ALL_UNSUPPORTED,
124 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
125 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
126 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
127 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
128 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
129 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
131 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
132 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
134 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
135 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
136 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
137 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
140 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
141 [PERF_COUNT_HW_CACHE_OP_MAX]
142 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
143 PERF_CACHE_MAP_ALL_UNSUPPORTED,
145 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
146 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
147 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
148 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
150 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
151 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
152 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
153 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
155 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
156 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
160 armv8pmu_events_sysfs_show(struct device *dev,
161 struct device_attribute *attr, char *page)
163 struct perf_pmu_events_attr *pmu_attr;
165 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
167 return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
170 #define ARMV8_EVENT_ATTR_RESOLVE(m) #m
171 #define ARMV8_EVENT_ATTR(name, config) \
172 PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
173 config, armv8pmu_events_sysfs_show)
175 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
176 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
177 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
178 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
179 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
180 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
181 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
182 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
183 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
184 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
185 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
186 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
187 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
188 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
189 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
190 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
191 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
192 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
193 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
194 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
195 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
196 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
197 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
198 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
199 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
200 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
201 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
202 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
203 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
204 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
205 /* Don't expose the chain event in /sys, since it's useless in isolation */
206 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
207 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
208 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
209 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
210 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
211 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
212 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
213 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
214 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
215 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
216 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
217 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
218 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
219 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
220 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
221 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
222 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
223 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
224 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS);
225 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE);
226 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS);
227 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK);
228 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK);
229 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD);
230 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD);
231 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD);
232 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP);
233 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED);
234 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE);
235 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION);
237 static struct attribute *armv8_pmuv3_event_attrs[] = {
238 &armv8_event_attr_sw_incr.attr.attr,
239 &armv8_event_attr_l1i_cache_refill.attr.attr,
240 &armv8_event_attr_l1i_tlb_refill.attr.attr,
241 &armv8_event_attr_l1d_cache_refill.attr.attr,
242 &armv8_event_attr_l1d_cache.attr.attr,
243 &armv8_event_attr_l1d_tlb_refill.attr.attr,
244 &armv8_event_attr_ld_retired.attr.attr,
245 &armv8_event_attr_st_retired.attr.attr,
246 &armv8_event_attr_inst_retired.attr.attr,
247 &armv8_event_attr_exc_taken.attr.attr,
248 &armv8_event_attr_exc_return.attr.attr,
249 &armv8_event_attr_cid_write_retired.attr.attr,
250 &armv8_event_attr_pc_write_retired.attr.attr,
251 &armv8_event_attr_br_immed_retired.attr.attr,
252 &armv8_event_attr_br_return_retired.attr.attr,
253 &armv8_event_attr_unaligned_ldst_retired.attr.attr,
254 &armv8_event_attr_br_mis_pred.attr.attr,
255 &armv8_event_attr_cpu_cycles.attr.attr,
256 &armv8_event_attr_br_pred.attr.attr,
257 &armv8_event_attr_mem_access.attr.attr,
258 &armv8_event_attr_l1i_cache.attr.attr,
259 &armv8_event_attr_l1d_cache_wb.attr.attr,
260 &armv8_event_attr_l2d_cache.attr.attr,
261 &armv8_event_attr_l2d_cache_refill.attr.attr,
262 &armv8_event_attr_l2d_cache_wb.attr.attr,
263 &armv8_event_attr_bus_access.attr.attr,
264 &armv8_event_attr_memory_error.attr.attr,
265 &armv8_event_attr_inst_spec.attr.attr,
266 &armv8_event_attr_ttbr_write_retired.attr.attr,
267 &armv8_event_attr_bus_cycles.attr.attr,
268 &armv8_event_attr_l1d_cache_allocate.attr.attr,
269 &armv8_event_attr_l2d_cache_allocate.attr.attr,
270 &armv8_event_attr_br_retired.attr.attr,
271 &armv8_event_attr_br_mis_pred_retired.attr.attr,
272 &armv8_event_attr_stall_frontend.attr.attr,
273 &armv8_event_attr_stall_backend.attr.attr,
274 &armv8_event_attr_l1d_tlb.attr.attr,
275 &armv8_event_attr_l1i_tlb.attr.attr,
276 &armv8_event_attr_l2i_cache.attr.attr,
277 &armv8_event_attr_l2i_cache_refill.attr.attr,
278 &armv8_event_attr_l3d_cache_allocate.attr.attr,
279 &armv8_event_attr_l3d_cache_refill.attr.attr,
280 &armv8_event_attr_l3d_cache.attr.attr,
281 &armv8_event_attr_l3d_cache_wb.attr.attr,
282 &armv8_event_attr_l2d_tlb_refill.attr.attr,
283 &armv8_event_attr_l2i_tlb_refill.attr.attr,
284 &armv8_event_attr_l2d_tlb.attr.attr,
285 &armv8_event_attr_l2i_tlb.attr.attr,
286 &armv8_event_attr_remote_access.attr.attr,
287 &armv8_event_attr_ll_cache.attr.attr,
288 &armv8_event_attr_ll_cache_miss.attr.attr,
289 &armv8_event_attr_dtlb_walk.attr.attr,
290 &armv8_event_attr_itlb_walk.attr.attr,
291 &armv8_event_attr_ll_cache_rd.attr.attr,
292 &armv8_event_attr_ll_cache_miss_rd.attr.attr,
293 &armv8_event_attr_remote_access_rd.attr.attr,
294 &armv8_event_attr_sample_pop.attr.attr,
295 &armv8_event_attr_sample_feed.attr.attr,
296 &armv8_event_attr_sample_filtrate.attr.attr,
297 &armv8_event_attr_sample_collision.attr.attr,
302 armv8pmu_event_attr_is_visible(struct kobject *kobj,
303 struct attribute *attr, int unused)
305 struct device *dev = kobj_to_dev(kobj);
306 struct pmu *pmu = dev_get_drvdata(dev);
307 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
308 struct perf_pmu_events_attr *pmu_attr;
310 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
312 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
313 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
316 pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
317 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
318 test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
324 static struct attribute_group armv8_pmuv3_events_attr_group = {
326 .attrs = armv8_pmuv3_event_attrs,
327 .is_visible = armv8pmu_event_attr_is_visible,
330 PMU_FORMAT_ATTR(event, "config:0-15");
331 PMU_FORMAT_ATTR(long, "config1:0");
333 static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
335 return event->attr.config1 & 0x1;
338 static struct attribute *armv8_pmuv3_format_attrs[] = {
339 &format_attr_event.attr,
340 &format_attr_long.attr,
344 static struct attribute_group armv8_pmuv3_format_attr_group = {
346 .attrs = armv8_pmuv3_format_attrs,
350 * Perf Events' indices
352 #define ARMV8_IDX_CYCLE_COUNTER 0
353 #define ARMV8_IDX_COUNTER0 1
354 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
355 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
358 * We must chain two programmable counters for 64 bit events,
359 * except when we have allocated the 64bit cycle counter (for CPU
360 * cycles event). This must be called only when the event has
361 * a counter allocated.
363 static inline bool armv8pmu_event_is_chained(struct perf_event *event)
365 int idx = event->hw.idx;
367 return !WARN_ON(idx < 0) &&
368 armv8pmu_event_is_64bit(event) &&
369 (idx != ARMV8_IDX_CYCLE_COUNTER);
373 * ARMv8 low level PMU access
377 * Perf Event to low level counters mapping
379 #define ARMV8_IDX_TO_COUNTER(x) \
380 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
382 static inline u32 armv8pmu_pmcr_read(void)
384 return read_sysreg(pmcr_el0);
387 static inline void armv8pmu_pmcr_write(u32 val)
389 val &= ARMV8_PMU_PMCR_MASK;
391 write_sysreg(val, pmcr_el0);
394 static inline int armv8pmu_has_overflowed(u32 pmovsr)
396 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
399 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
401 return idx >= ARMV8_IDX_CYCLE_COUNTER &&
402 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
405 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
407 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
410 static inline void armv8pmu_select_counter(int idx)
412 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
413 write_sysreg(counter, pmselr_el0);
417 static inline u32 armv8pmu_read_evcntr(int idx)
419 armv8pmu_select_counter(idx);
420 return read_sysreg(pmxevcntr_el0);
423 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
425 int idx = event->hw.idx;
428 val = armv8pmu_read_evcntr(idx);
429 if (armv8pmu_event_is_chained(event))
430 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
434 static inline u64 armv8pmu_read_counter(struct perf_event *event)
436 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
437 struct hw_perf_event *hwc = &event->hw;
441 if (!armv8pmu_counter_valid(cpu_pmu, idx))
442 pr_err("CPU%u reading wrong counter %d\n",
443 smp_processor_id(), idx);
444 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
445 value = read_sysreg(pmccntr_el0);
447 value = armv8pmu_read_hw_counter(event);
452 static inline void armv8pmu_write_evcntr(int idx, u32 value)
454 armv8pmu_select_counter(idx);
455 write_sysreg(value, pmxevcntr_el0);
458 static inline void armv8pmu_write_hw_counter(struct perf_event *event,
461 int idx = event->hw.idx;
463 if (armv8pmu_event_is_chained(event)) {
464 armv8pmu_write_evcntr(idx, upper_32_bits(value));
465 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
467 armv8pmu_write_evcntr(idx, value);
471 static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
473 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
474 struct hw_perf_event *hwc = &event->hw;
477 if (!armv8pmu_counter_valid(cpu_pmu, idx))
478 pr_err("CPU%u writing wrong counter %d\n",
479 smp_processor_id(), idx);
480 else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
482 * The cycles counter is really a 64-bit counter.
483 * When treating it as a 32-bit counter, we only count
484 * the lower 32 bits, and set the upper 32-bits so that
485 * we get an interrupt upon 32-bit overflow.
487 if (!armv8pmu_event_is_64bit(event))
488 value |= 0xffffffff00000000ULL;
489 write_sysreg(value, pmccntr_el0);
491 armv8pmu_write_hw_counter(event, value);
494 static inline void armv8pmu_write_evtype(int idx, u32 val)
496 armv8pmu_select_counter(idx);
497 val &= ARMV8_PMU_EVTYPE_MASK;
498 write_sysreg(val, pmxevtyper_el0);
501 static inline void armv8pmu_write_event_type(struct perf_event *event)
503 struct hw_perf_event *hwc = &event->hw;
507 * For chained events, the low counter is programmed to count
508 * the event of interest and the high counter is programmed
509 * with CHAIN event code with filters set to count at all ELs.
511 if (armv8pmu_event_is_chained(event)) {
512 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
513 ARMV8_PMU_INCLUDE_EL2;
515 armv8pmu_write_evtype(idx - 1, hwc->config_base);
516 armv8pmu_write_evtype(idx, chain_evt);
518 armv8pmu_write_evtype(idx, hwc->config_base);
522 static inline int armv8pmu_enable_counter(int idx)
524 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
525 write_sysreg(BIT(counter), pmcntenset_el0);
529 static inline void armv8pmu_enable_event_counter(struct perf_event *event)
531 int idx = event->hw.idx;
533 armv8pmu_enable_counter(idx);
534 if (armv8pmu_event_is_chained(event))
535 armv8pmu_enable_counter(idx - 1);
539 static inline int armv8pmu_disable_counter(int idx)
541 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
542 write_sysreg(BIT(counter), pmcntenclr_el0);
546 static inline void armv8pmu_disable_event_counter(struct perf_event *event)
548 struct hw_perf_event *hwc = &event->hw;
551 if (armv8pmu_event_is_chained(event))
552 armv8pmu_disable_counter(idx - 1);
553 armv8pmu_disable_counter(idx);
556 static inline int armv8pmu_enable_intens(int idx)
558 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
559 write_sysreg(BIT(counter), pmintenset_el1);
563 static inline int armv8pmu_enable_event_irq(struct perf_event *event)
565 return armv8pmu_enable_intens(event->hw.idx);
568 static inline int armv8pmu_disable_intens(int idx)
570 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
571 write_sysreg(BIT(counter), pmintenclr_el1);
573 /* Clear the overflow flag in case an interrupt is pending. */
574 write_sysreg(BIT(counter), pmovsclr_el0);
580 static inline int armv8pmu_disable_event_irq(struct perf_event *event)
582 return armv8pmu_disable_intens(event->hw.idx);
585 static inline u32 armv8pmu_getreset_flags(void)
590 value = read_sysreg(pmovsclr_el0);
592 /* Write to clear flags */
593 value &= ARMV8_PMU_OVSR_MASK;
594 write_sysreg(value, pmovsclr_el0);
599 static void armv8pmu_enable_event(struct perf_event *event)
602 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
603 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
606 * Enable counter and interrupt, and set the counter to count
607 * the event that we're interested in.
609 raw_spin_lock_irqsave(&events->pmu_lock, flags);
614 armv8pmu_disable_event_counter(event);
617 * Set event (if destined for PMNx counters).
619 armv8pmu_write_event_type(event);
622 * Enable interrupt for this counter
624 armv8pmu_enable_event_irq(event);
629 armv8pmu_enable_event_counter(event);
631 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
634 static void armv8pmu_disable_event(struct perf_event *event)
637 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
638 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
641 * Disable counter and interrupt
643 raw_spin_lock_irqsave(&events->pmu_lock, flags);
648 armv8pmu_disable_event_counter(event);
651 * Disable interrupt for this counter
653 armv8pmu_disable_event_irq(event);
655 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
658 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
661 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
663 raw_spin_lock_irqsave(&events->pmu_lock, flags);
664 /* Enable all counters */
665 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
666 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
669 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
672 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
674 raw_spin_lock_irqsave(&events->pmu_lock, flags);
675 /* Disable all counters */
676 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
677 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
680 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
683 struct perf_sample_data data;
684 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
685 struct pt_regs *regs;
689 * Get and reset the IRQ flags
691 pmovsr = armv8pmu_getreset_flags();
694 * Did an overflow occur?
696 if (!armv8pmu_has_overflowed(pmovsr))
700 * Handle the counter(s) overflow(s)
702 regs = get_irq_regs();
705 * Stop the PMU while processing the counter overflows
706 * to prevent skews in group events.
708 armv8pmu_stop(cpu_pmu);
709 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
710 struct perf_event *event = cpuc->events[idx];
711 struct hw_perf_event *hwc;
713 /* Ignore if we don't have an event. */
718 * We have a single interrupt for all counters. Check that
719 * each counter has overflowed before we process it.
721 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
725 armpmu_event_update(event);
726 perf_sample_data_init(&data, 0, hwc->last_period);
727 if (!armpmu_event_set_period(event))
730 if (perf_event_overflow(event, &data, regs))
731 cpu_pmu->disable(event);
733 armv8pmu_start(cpu_pmu);
736 * Handle the pending perf events.
738 * Note: this call *must* be run with interrupts disabled. For
739 * platforms that can have the PMU interrupts raised as an NMI, this
747 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
748 struct arm_pmu *cpu_pmu)
752 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
753 if (!test_and_set_bit(idx, cpuc->used_mask))
759 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
760 struct arm_pmu *cpu_pmu)
765 * Chaining requires two consecutive event counters, where
766 * the lower idx must be even.
768 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
769 if (!test_and_set_bit(idx, cpuc->used_mask)) {
770 /* Check if the preceding even counter is available */
771 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
773 /* Release the Odd counter */
774 clear_bit(idx, cpuc->used_mask);
780 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
781 struct perf_event *event)
783 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
784 struct hw_perf_event *hwc = &event->hw;
785 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
787 /* Always prefer to place a cycle counter into the cycle counter. */
788 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
789 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
790 return ARMV8_IDX_CYCLE_COUNTER;
794 * Otherwise use events counters
796 if (armv8pmu_event_is_64bit(event))
797 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
799 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
802 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
803 struct perf_event *event)
805 int idx = event->hw.idx;
807 clear_bit(idx, cpuc->used_mask);
808 if (armv8pmu_event_is_chained(event))
809 clear_bit(idx - 1, cpuc->used_mask);
813 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
815 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
816 struct perf_event_attr *attr)
818 unsigned long config_base = 0;
820 if (attr->exclude_idle)
824 * If we're running in hyp mode, then we *are* the hypervisor.
825 * Therefore we ignore exclude_hv in this configuration, since
826 * there's no hypervisor to sample anyway. This is consistent
827 * with other architectures (x86 and Power).
829 if (is_kernel_in_hyp_mode()) {
830 if (!attr->exclude_kernel)
831 config_base |= ARMV8_PMU_INCLUDE_EL2;
833 if (attr->exclude_kernel)
834 config_base |= ARMV8_PMU_EXCLUDE_EL1;
835 if (!attr->exclude_hv)
836 config_base |= ARMV8_PMU_INCLUDE_EL2;
838 if (attr->exclude_user)
839 config_base |= ARMV8_PMU_EXCLUDE_EL0;
842 * Install the filter into config_base as this is used to
843 * construct the event type.
845 event->config_base = config_base;
850 static int armv8pmu_filter_match(struct perf_event *event)
852 unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
853 return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
856 static void armv8pmu_reset(void *info)
858 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
859 u32 idx, nb_cnt = cpu_pmu->num_events;
861 /* The counter and interrupt enable registers are unknown at reset. */
862 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
863 armv8pmu_disable_counter(idx);
864 armv8pmu_disable_intens(idx);
868 * Initialize & Reset PMNC. Request overflow interrupt for
869 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
871 armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
875 static int __armv8_pmuv3_map_event(struct perf_event *event,
876 const unsigned (*extra_event_map)
878 const unsigned (*extra_cache_map)
879 [PERF_COUNT_HW_CACHE_MAX]
880 [PERF_COUNT_HW_CACHE_OP_MAX]
881 [PERF_COUNT_HW_CACHE_RESULT_MAX])
884 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
886 hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
887 &armv8_pmuv3_perf_cache_map,
888 ARMV8_PMU_EVTYPE_EVENT);
890 if (armv8pmu_event_is_64bit(event))
891 event->hw.flags |= ARMPMU_EVT_64BIT;
893 /* Only expose micro/arch events supported by this PMU */
894 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
895 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
899 return armpmu_map_event(event, extra_event_map, extra_cache_map,
900 ARMV8_PMU_EVTYPE_EVENT);
903 static int armv8_pmuv3_map_event(struct perf_event *event)
905 return __armv8_pmuv3_map_event(event, NULL, NULL);
908 static int armv8_a53_map_event(struct perf_event *event)
910 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
913 static int armv8_a57_map_event(struct perf_event *event)
915 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
918 static int armv8_a73_map_event(struct perf_event *event)
920 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
923 static int armv8_thunder_map_event(struct perf_event *event)
925 return __armv8_pmuv3_map_event(event, NULL,
926 &armv8_thunder_perf_cache_map);
929 static int armv8_vulcan_map_event(struct perf_event *event)
931 return __armv8_pmuv3_map_event(event, NULL,
932 &armv8_vulcan_perf_cache_map);
935 struct armv8pmu_probe_info {
940 static void __armv8pmu_probe_pmu(void *info)
942 struct armv8pmu_probe_info *probe = info;
943 struct arm_pmu *cpu_pmu = probe->pmu;
949 dfr0 = read_sysreg(id_aa64dfr0_el1);
950 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
951 ID_AA64DFR0_PMUVER_SHIFT);
952 if (pmuver == 0xf || pmuver == 0)
955 probe->present = true;
957 /* Read the nb of CNTx counters supported from PMNC */
958 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
959 & ARMV8_PMU_PMCR_N_MASK;
961 /* Add the CPU cycles counter */
962 cpu_pmu->num_events += 1;
964 pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
965 pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
967 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
968 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
970 pmceid[0] = pmceid_raw[0] >> 32;
971 pmceid[1] = pmceid_raw[1] >> 32;
973 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
974 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
977 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
979 struct armv8pmu_probe_info probe = {
985 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
986 __armv8pmu_probe_pmu,
991 return probe.present ? 0 : -ENODEV;
994 static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
996 int ret = armv8pmu_probe_pmu(cpu_pmu);
1000 cpu_pmu->handle_irq = armv8pmu_handle_irq;
1001 cpu_pmu->enable = armv8pmu_enable_event;
1002 cpu_pmu->disable = armv8pmu_disable_event;
1003 cpu_pmu->read_counter = armv8pmu_read_counter;
1004 cpu_pmu->write_counter = armv8pmu_write_counter;
1005 cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
1006 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
1007 cpu_pmu->start = armv8pmu_start;
1008 cpu_pmu->stop = armv8pmu_stop;
1009 cpu_pmu->reset = armv8pmu_reset;
1010 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1011 cpu_pmu->filter_match = armv8pmu_filter_match;
1016 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
1018 int ret = armv8_pmu_init(cpu_pmu);
1022 cpu_pmu->name = "armv8_pmuv3";
1023 cpu_pmu->map_event = armv8_pmuv3_map_event;
1024 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1025 &armv8_pmuv3_events_attr_group;
1026 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1027 &armv8_pmuv3_format_attr_group;
1032 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1034 int ret = armv8_pmu_init(cpu_pmu);
1038 cpu_pmu->name = "armv8_cortex_a35";
1039 cpu_pmu->map_event = armv8_a53_map_event;
1040 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1041 &armv8_pmuv3_events_attr_group;
1042 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1043 &armv8_pmuv3_format_attr_group;
1048 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1050 int ret = armv8_pmu_init(cpu_pmu);
1054 cpu_pmu->name = "armv8_cortex_a53";
1055 cpu_pmu->map_event = armv8_a53_map_event;
1056 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1057 &armv8_pmuv3_events_attr_group;
1058 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1059 &armv8_pmuv3_format_attr_group;
1064 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1066 int ret = armv8_pmu_init(cpu_pmu);
1070 cpu_pmu->name = "armv8_cortex_a57";
1071 cpu_pmu->map_event = armv8_a57_map_event;
1072 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1073 &armv8_pmuv3_events_attr_group;
1074 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1075 &armv8_pmuv3_format_attr_group;
1080 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1082 int ret = armv8_pmu_init(cpu_pmu);
1086 cpu_pmu->name = "armv8_cortex_a72";
1087 cpu_pmu->map_event = armv8_a57_map_event;
1088 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1089 &armv8_pmuv3_events_attr_group;
1090 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1091 &armv8_pmuv3_format_attr_group;
1096 static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1098 int ret = armv8_pmu_init(cpu_pmu);
1102 cpu_pmu->name = "armv8_cortex_a73";
1103 cpu_pmu->map_event = armv8_a73_map_event;
1104 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1105 &armv8_pmuv3_events_attr_group;
1106 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1107 &armv8_pmuv3_format_attr_group;
1112 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1114 int ret = armv8_pmu_init(cpu_pmu);
1118 cpu_pmu->name = "armv8_cavium_thunder";
1119 cpu_pmu->map_event = armv8_thunder_map_event;
1120 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1121 &armv8_pmuv3_events_attr_group;
1122 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1123 &armv8_pmuv3_format_attr_group;
1128 static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1130 int ret = armv8_pmu_init(cpu_pmu);
1134 cpu_pmu->name = "armv8_brcm_vulcan";
1135 cpu_pmu->map_event = armv8_vulcan_map_event;
1136 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1137 &armv8_pmuv3_events_attr_group;
1138 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1139 &armv8_pmuv3_format_attr_group;
1144 static const struct of_device_id armv8_pmu_of_device_ids[] = {
1145 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
1146 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
1147 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
1148 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
1149 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
1150 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
1151 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
1152 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
1156 static int armv8_pmu_device_probe(struct platform_device *pdev)
1158 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1161 static struct platform_driver armv8_pmu_driver = {
1163 .name = ARMV8_PMU_PDEV_NAME,
1164 .of_match_table = armv8_pmu_of_device_ids,
1165 .suppress_bind_attrs = true,
1167 .probe = armv8_pmu_device_probe,
1170 static int __init armv8_pmu_driver_init(void)
1173 return platform_driver_register(&armv8_pmu_driver);
1175 return arm_pmu_acpi_probe(armv8_pmuv3_init);
1177 device_initcall(armv8_pmu_driver_init)
1179 void arch_perf_update_userpage(struct perf_event *event,
1180 struct perf_event_mmap_page *userpg, u64 now)
1186 * Internal timekeeping for enabled/running/stopped times
1187 * is always computed with the sched_clock.
1189 freq = arch_timer_get_rate();
1190 userpg->cap_user_time = 1;
1192 clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
1195 * time_shift is not expected to be greater than 31 due to
1196 * the original published conversion algorithm shifting a
1197 * 32-bit value (now specifies a 64-bit value) - refer
1198 * perf_event_mmap_page documentation in perf_event.h.
1202 userpg->time_mult >>= 1;
1204 userpg->time_shift = (u16)shift;
1205 userpg->time_offset = -now;