4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <asm/irq_regs.h>
23 #include <asm/perf_event.h>
24 #include <asm/sysreg.h>
27 #include <linux/acpi.h>
28 #include <linux/clocksource.h>
30 #include <linux/perf/arm_pmu.h>
31 #include <linux/platform_device.h>
34 * ARMv8 PMUv3 Performance Events handling code.
35 * Common event types (some are defined in asm/perf_event.h).
38 /* At least one of the following is required. */
39 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
40 #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
42 /* Common architectural events. */
43 #define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
44 #define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
45 #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
46 #define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
47 #define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
48 #define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
49 #define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
50 #define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
51 #define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
52 #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
53 #define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
54 #define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
56 /* Common microarchitectural events. */
57 #define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
58 #define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
59 #define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
60 #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
61 #define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
62 #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
63 #define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
64 #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
65 #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
66 #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
67 #define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
68 #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
69 #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
70 #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
71 #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
72 #define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
73 #define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
74 #define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
75 #define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
76 #define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
77 #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
78 #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
79 #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
80 #define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
81 #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
82 #define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
83 #define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
84 #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
85 #define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
87 /* ARMv8 recommended implementation defined event types */
88 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
89 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
90 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
91 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
92 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
93 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
94 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
95 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
96 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
98 #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
99 #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
100 #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
101 #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
102 #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
103 #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
104 #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
105 #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
107 #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
108 #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
109 #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
111 #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
112 #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
113 #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
114 #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
116 #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
117 #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
118 #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
119 #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
120 #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
121 #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
123 #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
124 #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
125 #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
126 #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
127 #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
129 #define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
130 #define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
131 #define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
132 #define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
133 #define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
134 #define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
135 #define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
136 #define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
137 #define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
138 #define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
139 #define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
140 #define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
141 #define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
142 #define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
143 #define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
145 #define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
146 #define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
147 #define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
149 #define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
150 #define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
151 #define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
152 #define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
154 #define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
155 #define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
156 #define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
158 #define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
159 #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
160 #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
161 #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
162 #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
163 #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
164 #define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
165 #define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
167 #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
168 #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
169 #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
170 #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
172 #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
173 #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
174 #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
176 /* ARMv8 Cortex-A53 specific event types. */
177 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
179 /* ARMv8 Cavium ThunderX specific event types. */
180 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
181 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
182 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
183 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
184 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
186 /* PMUv3 HW events mapping. */
189 * ARMv8 Architectural defined events, not all of these may
190 * be supported on any given implementation. Undefined events will
191 * be disabled at run-time.
193 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
194 PERF_MAP_ALL_UNSUPPORTED,
195 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
196 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
197 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
198 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
199 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
200 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
201 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
202 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
203 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
206 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
207 [PERF_COUNT_HW_CACHE_OP_MAX]
208 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
209 PERF_CACHE_MAP_ALL_UNSUPPORTED,
211 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
212 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
213 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
214 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
216 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
217 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
219 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
220 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
222 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
223 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
225 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
226 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
227 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
228 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
231 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
232 [PERF_COUNT_HW_CACHE_OP_MAX]
233 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
234 PERF_CACHE_MAP_ALL_UNSUPPORTED,
236 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
238 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
239 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
242 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
243 [PERF_COUNT_HW_CACHE_OP_MAX]
244 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
245 PERF_CACHE_MAP_ALL_UNSUPPORTED,
247 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
248 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
249 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
250 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
252 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
253 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
255 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
256 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
259 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
260 [PERF_COUNT_HW_CACHE_OP_MAX]
261 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
262 PERF_CACHE_MAP_ALL_UNSUPPORTED,
264 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
265 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
268 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
269 [PERF_COUNT_HW_CACHE_OP_MAX]
270 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
271 PERF_CACHE_MAP_ALL_UNSUPPORTED,
273 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
274 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
275 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
276 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
277 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
278 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
280 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
281 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
283 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
284 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
285 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
286 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
289 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
290 [PERF_COUNT_HW_CACHE_OP_MAX]
291 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
292 PERF_CACHE_MAP_ALL_UNSUPPORTED,
294 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
295 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
296 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
297 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
299 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
300 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
301 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
302 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
304 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
305 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
309 armv8pmu_events_sysfs_show(struct device *dev,
310 struct device_attribute *attr, char *page)
312 struct perf_pmu_events_attr *pmu_attr;
314 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
316 return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
319 #define ARMV8_EVENT_ATTR_RESOLVE(m) #m
320 #define ARMV8_EVENT_ATTR(name, config) \
321 PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
322 config, armv8pmu_events_sysfs_show)
324 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
325 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
326 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
327 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
328 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
329 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
330 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
331 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
332 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
333 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
334 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
335 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
336 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
337 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
338 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
339 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
340 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
341 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
342 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
343 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
344 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
345 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
346 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
347 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
348 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
349 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
350 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
351 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
352 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
353 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
354 /* Don't expose the chain event in /sys, since it's useless in isolation */
355 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
356 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
357 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
358 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
359 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
360 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
361 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
362 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
363 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
364 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
365 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
366 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
367 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
368 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
369 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
370 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
371 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
372 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
374 static struct attribute *armv8_pmuv3_event_attrs[] = {
375 &armv8_event_attr_sw_incr.attr.attr,
376 &armv8_event_attr_l1i_cache_refill.attr.attr,
377 &armv8_event_attr_l1i_tlb_refill.attr.attr,
378 &armv8_event_attr_l1d_cache_refill.attr.attr,
379 &armv8_event_attr_l1d_cache.attr.attr,
380 &armv8_event_attr_l1d_tlb_refill.attr.attr,
381 &armv8_event_attr_ld_retired.attr.attr,
382 &armv8_event_attr_st_retired.attr.attr,
383 &armv8_event_attr_inst_retired.attr.attr,
384 &armv8_event_attr_exc_taken.attr.attr,
385 &armv8_event_attr_exc_return.attr.attr,
386 &armv8_event_attr_cid_write_retired.attr.attr,
387 &armv8_event_attr_pc_write_retired.attr.attr,
388 &armv8_event_attr_br_immed_retired.attr.attr,
389 &armv8_event_attr_br_return_retired.attr.attr,
390 &armv8_event_attr_unaligned_ldst_retired.attr.attr,
391 &armv8_event_attr_br_mis_pred.attr.attr,
392 &armv8_event_attr_cpu_cycles.attr.attr,
393 &armv8_event_attr_br_pred.attr.attr,
394 &armv8_event_attr_mem_access.attr.attr,
395 &armv8_event_attr_l1i_cache.attr.attr,
396 &armv8_event_attr_l1d_cache_wb.attr.attr,
397 &armv8_event_attr_l2d_cache.attr.attr,
398 &armv8_event_attr_l2d_cache_refill.attr.attr,
399 &armv8_event_attr_l2d_cache_wb.attr.attr,
400 &armv8_event_attr_bus_access.attr.attr,
401 &armv8_event_attr_memory_error.attr.attr,
402 &armv8_event_attr_inst_spec.attr.attr,
403 &armv8_event_attr_ttbr_write_retired.attr.attr,
404 &armv8_event_attr_bus_cycles.attr.attr,
405 &armv8_event_attr_l1d_cache_allocate.attr.attr,
406 &armv8_event_attr_l2d_cache_allocate.attr.attr,
407 &armv8_event_attr_br_retired.attr.attr,
408 &armv8_event_attr_br_mis_pred_retired.attr.attr,
409 &armv8_event_attr_stall_frontend.attr.attr,
410 &armv8_event_attr_stall_backend.attr.attr,
411 &armv8_event_attr_l1d_tlb.attr.attr,
412 &armv8_event_attr_l1i_tlb.attr.attr,
413 &armv8_event_attr_l2i_cache.attr.attr,
414 &armv8_event_attr_l2i_cache_refill.attr.attr,
415 &armv8_event_attr_l3d_cache_allocate.attr.attr,
416 &armv8_event_attr_l3d_cache_refill.attr.attr,
417 &armv8_event_attr_l3d_cache.attr.attr,
418 &armv8_event_attr_l3d_cache_wb.attr.attr,
419 &armv8_event_attr_l2d_tlb_refill.attr.attr,
420 &armv8_event_attr_l2i_tlb_refill.attr.attr,
421 &armv8_event_attr_l2d_tlb.attr.attr,
422 &armv8_event_attr_l2i_tlb.attr.attr,
427 armv8pmu_event_attr_is_visible(struct kobject *kobj,
428 struct attribute *attr, int unused)
430 struct device *dev = kobj_to_dev(kobj);
431 struct pmu *pmu = dev_get_drvdata(dev);
432 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
433 struct perf_pmu_events_attr *pmu_attr;
435 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
437 if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
443 static struct attribute_group armv8_pmuv3_events_attr_group = {
445 .attrs = armv8_pmuv3_event_attrs,
446 .is_visible = armv8pmu_event_attr_is_visible,
449 PMU_FORMAT_ATTR(event, "config:0-15");
450 PMU_FORMAT_ATTR(long, "config1:0");
452 static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
454 return event->attr.config1 & 0x1;
457 static struct attribute *armv8_pmuv3_format_attrs[] = {
458 &format_attr_event.attr,
459 &format_attr_long.attr,
463 static struct attribute_group armv8_pmuv3_format_attr_group = {
465 .attrs = armv8_pmuv3_format_attrs,
469 * Perf Events' indices
471 #define ARMV8_IDX_CYCLE_COUNTER 0
472 #define ARMV8_IDX_COUNTER0 1
473 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
474 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
477 * We must chain two programmable counters for 64 bit events,
478 * except when we have allocated the 64bit cycle counter (for CPU
479 * cycles event). This must be called only when the event has
480 * a counter allocated.
482 static inline bool armv8pmu_event_is_chained(struct perf_event *event)
484 int idx = event->hw.idx;
486 return !WARN_ON(idx < 0) &&
487 armv8pmu_event_is_64bit(event) &&
488 (idx != ARMV8_IDX_CYCLE_COUNTER);
492 * ARMv8 low level PMU access
496 * Perf Event to low level counters mapping
498 #define ARMV8_IDX_TO_COUNTER(x) \
499 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
501 static inline u32 armv8pmu_pmcr_read(void)
503 return read_sysreg(pmcr_el0);
506 static inline void armv8pmu_pmcr_write(u32 val)
508 val &= ARMV8_PMU_PMCR_MASK;
510 write_sysreg(val, pmcr_el0);
513 static inline int armv8pmu_has_overflowed(u32 pmovsr)
515 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
518 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
520 return idx >= ARMV8_IDX_CYCLE_COUNTER &&
521 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
524 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
526 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
529 static inline void armv8pmu_select_counter(int idx)
531 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
532 write_sysreg(counter, pmselr_el0);
536 static inline u32 armv8pmu_read_evcntr(int idx)
538 armv8pmu_select_counter(idx);
539 return read_sysreg(pmxevcntr_el0);
542 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
544 int idx = event->hw.idx;
547 val = armv8pmu_read_evcntr(idx);
548 if (armv8pmu_event_is_chained(event))
549 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
553 static inline u64 armv8pmu_read_counter(struct perf_event *event)
555 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
556 struct hw_perf_event *hwc = &event->hw;
560 if (!armv8pmu_counter_valid(cpu_pmu, idx))
561 pr_err("CPU%u reading wrong counter %d\n",
562 smp_processor_id(), idx);
563 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
564 value = read_sysreg(pmccntr_el0);
566 value = armv8pmu_read_hw_counter(event);
571 static inline void armv8pmu_write_evcntr(int idx, u32 value)
573 armv8pmu_select_counter(idx);
574 write_sysreg(value, pmxevcntr_el0);
577 static inline void armv8pmu_write_hw_counter(struct perf_event *event,
580 int idx = event->hw.idx;
582 if (armv8pmu_event_is_chained(event)) {
583 armv8pmu_write_evcntr(idx, upper_32_bits(value));
584 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
586 armv8pmu_write_evcntr(idx, value);
590 static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
592 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
593 struct hw_perf_event *hwc = &event->hw;
596 if (!armv8pmu_counter_valid(cpu_pmu, idx))
597 pr_err("CPU%u writing wrong counter %d\n",
598 smp_processor_id(), idx);
599 else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
601 * The cycles counter is really a 64-bit counter.
602 * When treating it as a 32-bit counter, we only count
603 * the lower 32 bits, and set the upper 32-bits so that
604 * we get an interrupt upon 32-bit overflow.
606 if (!armv8pmu_event_is_64bit(event))
607 value |= 0xffffffff00000000ULL;
608 write_sysreg(value, pmccntr_el0);
610 armv8pmu_write_hw_counter(event, value);
613 static inline void armv8pmu_write_evtype(int idx, u32 val)
615 armv8pmu_select_counter(idx);
616 val &= ARMV8_PMU_EVTYPE_MASK;
617 write_sysreg(val, pmxevtyper_el0);
620 static inline void armv8pmu_write_event_type(struct perf_event *event)
622 struct hw_perf_event *hwc = &event->hw;
626 * For chained events, the low counter is programmed to count
627 * the event of interest and the high counter is programmed
628 * with CHAIN event code with filters set to count at all ELs.
630 if (armv8pmu_event_is_chained(event)) {
631 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
632 ARMV8_PMU_INCLUDE_EL2;
634 armv8pmu_write_evtype(idx - 1, hwc->config_base);
635 armv8pmu_write_evtype(idx, chain_evt);
637 armv8pmu_write_evtype(idx, hwc->config_base);
641 static inline int armv8pmu_enable_counter(int idx)
643 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
644 write_sysreg(BIT(counter), pmcntenset_el0);
648 static inline void armv8pmu_enable_event_counter(struct perf_event *event)
650 int idx = event->hw.idx;
652 armv8pmu_enable_counter(idx);
653 if (armv8pmu_event_is_chained(event))
654 armv8pmu_enable_counter(idx - 1);
658 static inline int armv8pmu_disable_counter(int idx)
660 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
661 write_sysreg(BIT(counter), pmcntenclr_el0);
665 static inline void armv8pmu_disable_event_counter(struct perf_event *event)
667 struct hw_perf_event *hwc = &event->hw;
670 if (armv8pmu_event_is_chained(event))
671 armv8pmu_disable_counter(idx - 1);
672 armv8pmu_disable_counter(idx);
675 static inline int armv8pmu_enable_intens(int idx)
677 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
678 write_sysreg(BIT(counter), pmintenset_el1);
682 static inline int armv8pmu_enable_event_irq(struct perf_event *event)
684 return armv8pmu_enable_intens(event->hw.idx);
687 static inline int armv8pmu_disable_intens(int idx)
689 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
690 write_sysreg(BIT(counter), pmintenclr_el1);
692 /* Clear the overflow flag in case an interrupt is pending. */
693 write_sysreg(BIT(counter), pmovsclr_el0);
699 static inline int armv8pmu_disable_event_irq(struct perf_event *event)
701 return armv8pmu_disable_intens(event->hw.idx);
704 static inline u32 armv8pmu_getreset_flags(void)
709 value = read_sysreg(pmovsclr_el0);
711 /* Write to clear flags */
712 value &= ARMV8_PMU_OVSR_MASK;
713 write_sysreg(value, pmovsclr_el0);
718 static void armv8pmu_enable_event(struct perf_event *event)
721 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
722 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
725 * Enable counter and interrupt, and set the counter to count
726 * the event that we're interested in.
728 raw_spin_lock_irqsave(&events->pmu_lock, flags);
733 armv8pmu_disable_event_counter(event);
736 * Set event (if destined for PMNx counters).
738 armv8pmu_write_event_type(event);
741 * Enable interrupt for this counter
743 armv8pmu_enable_event_irq(event);
748 armv8pmu_enable_event_counter(event);
750 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
753 static void armv8pmu_disable_event(struct perf_event *event)
756 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
757 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
760 * Disable counter and interrupt
762 raw_spin_lock_irqsave(&events->pmu_lock, flags);
767 armv8pmu_disable_event_counter(event);
770 * Disable interrupt for this counter
772 armv8pmu_disable_event_irq(event);
774 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
777 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
780 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
782 raw_spin_lock_irqsave(&events->pmu_lock, flags);
783 /* Enable all counters */
784 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
785 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
788 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
791 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
793 raw_spin_lock_irqsave(&events->pmu_lock, flags);
794 /* Disable all counters */
795 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
796 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
799 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
802 struct perf_sample_data data;
803 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
804 struct pt_regs *regs;
808 * Get and reset the IRQ flags
810 pmovsr = armv8pmu_getreset_flags();
813 * Did an overflow occur?
815 if (!armv8pmu_has_overflowed(pmovsr))
819 * Handle the counter(s) overflow(s)
821 regs = get_irq_regs();
824 * Stop the PMU while processing the counter overflows
825 * to prevent skews in group events.
827 armv8pmu_stop(cpu_pmu);
828 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
829 struct perf_event *event = cpuc->events[idx];
830 struct hw_perf_event *hwc;
832 /* Ignore if we don't have an event. */
837 * We have a single interrupt for all counters. Check that
838 * each counter has overflowed before we process it.
840 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
844 armpmu_event_update(event);
845 perf_sample_data_init(&data, 0, hwc->last_period);
846 if (!armpmu_event_set_period(event))
849 if (perf_event_overflow(event, &data, regs))
850 cpu_pmu->disable(event);
852 armv8pmu_start(cpu_pmu);
855 * Handle the pending perf events.
857 * Note: this call *must* be run with interrupts disabled. For
858 * platforms that can have the PMU interrupts raised as an NMI, this
866 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
867 struct arm_pmu *cpu_pmu)
871 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
872 if (!test_and_set_bit(idx, cpuc->used_mask))
878 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
879 struct arm_pmu *cpu_pmu)
884 * Chaining requires two consecutive event counters, where
885 * the lower idx must be even.
887 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
888 if (!test_and_set_bit(idx, cpuc->used_mask)) {
889 /* Check if the preceding even counter is available */
890 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
892 /* Release the Odd counter */
893 clear_bit(idx, cpuc->used_mask);
899 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
900 struct perf_event *event)
902 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
903 struct hw_perf_event *hwc = &event->hw;
904 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
906 /* Always prefer to place a cycle counter into the cycle counter. */
907 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
908 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
909 return ARMV8_IDX_CYCLE_COUNTER;
913 * Otherwise use events counters
915 if (armv8pmu_event_is_64bit(event))
916 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
918 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
921 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
922 struct perf_event *event)
924 int idx = event->hw.idx;
926 clear_bit(idx, cpuc->used_mask);
927 if (armv8pmu_event_is_chained(event))
928 clear_bit(idx - 1, cpuc->used_mask);
932 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
934 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
935 struct perf_event_attr *attr)
937 unsigned long config_base = 0;
939 if (attr->exclude_idle)
943 * If we're running in hyp mode, then we *are* the hypervisor.
944 * Therefore we ignore exclude_hv in this configuration, since
945 * there's no hypervisor to sample anyway. This is consistent
946 * with other architectures (x86 and Power).
948 if (is_kernel_in_hyp_mode()) {
949 if (!attr->exclude_kernel)
950 config_base |= ARMV8_PMU_INCLUDE_EL2;
952 if (attr->exclude_kernel)
953 config_base |= ARMV8_PMU_EXCLUDE_EL1;
954 if (!attr->exclude_hv)
955 config_base |= ARMV8_PMU_INCLUDE_EL2;
957 if (attr->exclude_user)
958 config_base |= ARMV8_PMU_EXCLUDE_EL0;
961 * Install the filter into config_base as this is used to
962 * construct the event type.
964 event->config_base = config_base;
969 static int armv8pmu_filter_match(struct perf_event *event)
971 unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
972 return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
975 static void armv8pmu_reset(void *info)
977 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
978 u32 idx, nb_cnt = cpu_pmu->num_events;
980 /* The counter and interrupt enable registers are unknown at reset. */
981 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
982 armv8pmu_disable_counter(idx);
983 armv8pmu_disable_intens(idx);
987 * Initialize & Reset PMNC. Request overflow interrupt for
988 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
990 armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
994 static int __armv8_pmuv3_map_event(struct perf_event *event,
995 const unsigned (*extra_event_map)
997 const unsigned (*extra_cache_map)
998 [PERF_COUNT_HW_CACHE_MAX]
999 [PERF_COUNT_HW_CACHE_OP_MAX]
1000 [PERF_COUNT_HW_CACHE_RESULT_MAX])
1003 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1005 hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
1006 &armv8_pmuv3_perf_cache_map,
1007 ARMV8_PMU_EVTYPE_EVENT);
1009 if (armv8pmu_event_is_64bit(event))
1010 event->hw.flags |= ARMPMU_EVT_64BIT;
1012 /* Onl expose micro/arch events supported by this PMU */
1013 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
1014 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
1018 return armpmu_map_event(event, extra_event_map, extra_cache_map,
1019 ARMV8_PMU_EVTYPE_EVENT);
1022 static int armv8_pmuv3_map_event(struct perf_event *event)
1024 return __armv8_pmuv3_map_event(event, NULL, NULL);
1027 static int armv8_a53_map_event(struct perf_event *event)
1029 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
1032 static int armv8_a57_map_event(struct perf_event *event)
1034 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
1037 static int armv8_a73_map_event(struct perf_event *event)
1039 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
1042 static int armv8_thunder_map_event(struct perf_event *event)
1044 return __armv8_pmuv3_map_event(event, NULL,
1045 &armv8_thunder_perf_cache_map);
1048 static int armv8_vulcan_map_event(struct perf_event *event)
1050 return __armv8_pmuv3_map_event(event, NULL,
1051 &armv8_vulcan_perf_cache_map);
1054 struct armv8pmu_probe_info {
1055 struct arm_pmu *pmu;
1059 static void __armv8pmu_probe_pmu(void *info)
1061 struct armv8pmu_probe_info *probe = info;
1062 struct arm_pmu *cpu_pmu = probe->pmu;
1067 dfr0 = read_sysreg(id_aa64dfr0_el1);
1068 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
1069 ID_AA64DFR0_PMUVER_SHIFT);
1070 if (pmuver == 0xf || pmuver == 0)
1073 probe->present = true;
1075 /* Read the nb of CNTx counters supported from PMNC */
1076 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
1077 & ARMV8_PMU_PMCR_N_MASK;
1079 /* Add the CPU cycles counter */
1080 cpu_pmu->num_events += 1;
1082 pmceid[0] = read_sysreg(pmceid0_el0);
1083 pmceid[1] = read_sysreg(pmceid1_el0);
1085 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
1086 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1089 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
1091 struct armv8pmu_probe_info probe = {
1097 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
1098 __armv8pmu_probe_pmu,
1103 return probe.present ? 0 : -ENODEV;
1106 static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
1108 int ret = armv8pmu_probe_pmu(cpu_pmu);
1112 cpu_pmu->handle_irq = armv8pmu_handle_irq,
1113 cpu_pmu->enable = armv8pmu_enable_event,
1114 cpu_pmu->disable = armv8pmu_disable_event,
1115 cpu_pmu->read_counter = armv8pmu_read_counter,
1116 cpu_pmu->write_counter = armv8pmu_write_counter,
1117 cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
1118 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
1119 cpu_pmu->start = armv8pmu_start,
1120 cpu_pmu->stop = armv8pmu_stop,
1121 cpu_pmu->reset = armv8pmu_reset,
1122 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1123 cpu_pmu->filter_match = armv8pmu_filter_match;
1128 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
1130 int ret = armv8_pmu_init(cpu_pmu);
1134 cpu_pmu->name = "armv8_pmuv3";
1135 cpu_pmu->map_event = armv8_pmuv3_map_event;
1136 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1137 &armv8_pmuv3_events_attr_group;
1138 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1139 &armv8_pmuv3_format_attr_group;
1144 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1146 int ret = armv8_pmu_init(cpu_pmu);
1150 cpu_pmu->name = "armv8_cortex_a35";
1151 cpu_pmu->map_event = armv8_a53_map_event;
1152 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1153 &armv8_pmuv3_events_attr_group;
1154 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1155 &armv8_pmuv3_format_attr_group;
1160 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1162 int ret = armv8_pmu_init(cpu_pmu);
1166 cpu_pmu->name = "armv8_cortex_a53";
1167 cpu_pmu->map_event = armv8_a53_map_event;
1168 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1169 &armv8_pmuv3_events_attr_group;
1170 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1171 &armv8_pmuv3_format_attr_group;
1176 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1178 int ret = armv8_pmu_init(cpu_pmu);
1182 cpu_pmu->name = "armv8_cortex_a57";
1183 cpu_pmu->map_event = armv8_a57_map_event;
1184 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1185 &armv8_pmuv3_events_attr_group;
1186 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1187 &armv8_pmuv3_format_attr_group;
1192 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1194 int ret = armv8_pmu_init(cpu_pmu);
1198 cpu_pmu->name = "armv8_cortex_a72";
1199 cpu_pmu->map_event = armv8_a57_map_event;
1200 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1201 &armv8_pmuv3_events_attr_group;
1202 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1203 &armv8_pmuv3_format_attr_group;
1208 static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1210 int ret = armv8_pmu_init(cpu_pmu);
1214 cpu_pmu->name = "armv8_cortex_a73";
1215 cpu_pmu->map_event = armv8_a73_map_event;
1216 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1217 &armv8_pmuv3_events_attr_group;
1218 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1219 &armv8_pmuv3_format_attr_group;
1224 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1226 int ret = armv8_pmu_init(cpu_pmu);
1230 cpu_pmu->name = "armv8_cavium_thunder";
1231 cpu_pmu->map_event = armv8_thunder_map_event;
1232 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1233 &armv8_pmuv3_events_attr_group;
1234 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1235 &armv8_pmuv3_format_attr_group;
1240 static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1242 int ret = armv8_pmu_init(cpu_pmu);
1246 cpu_pmu->name = "armv8_brcm_vulcan";
1247 cpu_pmu->map_event = armv8_vulcan_map_event;
1248 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1249 &armv8_pmuv3_events_attr_group;
1250 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1251 &armv8_pmuv3_format_attr_group;
1256 static const struct of_device_id armv8_pmu_of_device_ids[] = {
1257 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
1258 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
1259 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
1260 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
1261 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
1262 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
1263 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
1264 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
1268 static int armv8_pmu_device_probe(struct platform_device *pdev)
1270 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1273 static struct platform_driver armv8_pmu_driver = {
1275 .name = ARMV8_PMU_PDEV_NAME,
1276 .of_match_table = armv8_pmu_of_device_ids,
1278 .probe = armv8_pmu_device_probe,
1281 static int __init armv8_pmu_driver_init(void)
1284 return platform_driver_register(&armv8_pmu_driver);
1286 return arm_pmu_acpi_probe(armv8_pmuv3_init);
1288 device_initcall(armv8_pmu_driver_init)
1290 void arch_perf_update_userpage(struct perf_event *event,
1291 struct perf_event_mmap_page *userpg, u64 now)
1297 * Internal timekeeping for enabled/running/stopped times
1298 * is always computed with the sched_clock.
1300 freq = arch_timer_get_rate();
1301 userpg->cap_user_time = 1;
1303 clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
1306 * time_shift is not expected to be greater than 31 due to
1307 * the original published conversion algorithm shifting a
1308 * 32-bit value (now specifies a 64-bit value) - refer
1309 * perf_event_mmap_page documentation in perf_event.h.
1313 userpg->time_mult >>= 1;
1315 userpg->time_shift = (u16)shift;
1316 userpg->time_offset = -now;