1 // SPDX-License-Identifier: GPL-2.0-only
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
18 #include <asm/cpufeature.h>
19 #include <asm/hardirq.h>
20 #include <asm/intel-family.h>
22 #include <asm/cpu_device_id.h>
24 #include "../perf_event.h"
27 * Intel PerfMon, used on Core and later.
29 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
31 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
32 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
33 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
34 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
35 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
36 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
37 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
38 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
41 static struct event_constraint intel_core_event_constraints[] __read_mostly =
43 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
44 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
45 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
46 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
47 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
48 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
52 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
54 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
55 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
56 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
57 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
58 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
59 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
60 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
61 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
62 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
63 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
64 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
65 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
66 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
70 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
72 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
73 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
74 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
75 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
76 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
77 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
78 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
79 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
80 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
81 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
82 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
86 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
88 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
89 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
90 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
94 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
96 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
97 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
98 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
99 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
100 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
101 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
102 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
106 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
108 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
109 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
110 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
111 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
112 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
114 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
115 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
117 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
118 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
119 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
122 * When HT is off these events can only run on the bottom 4 counters
123 * When HT is on, they are impacted by the HT bug and require EXCL access
125 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
126 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
127 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
128 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
133 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
135 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
136 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
137 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
138 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
139 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
140 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
141 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
142 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
143 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
150 * When HT is off these events can only run on the bottom 4 counters
151 * When HT is on, they are impacted by the HT bug and require EXCL access
153 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
154 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
155 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
156 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
161 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
163 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
164 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
165 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
166 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
170 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
175 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
177 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
178 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
179 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
183 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
185 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
186 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
187 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
191 static struct event_constraint intel_skl_event_constraints[] = {
192 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
193 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
194 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
195 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
198 * when HT is off, these can only run on the bottom 4 counters
200 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
201 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
203 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
204 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
209 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
210 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
211 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
215 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
216 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
217 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
218 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
219 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
223 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
224 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
225 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
226 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
227 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
231 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
232 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
233 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
234 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
236 * Note the low 8 bits eventsel code is not a continuous field, containing
237 * some #GPing bits. These are masked out.
239 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
243 static struct event_constraint intel_icl_event_constraints[] = {
244 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
245 INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */
246 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
247 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
248 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
249 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
250 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
251 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
252 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
253 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
254 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
255 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
256 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
257 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
258 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
259 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
260 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
264 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
265 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
266 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
267 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
268 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
272 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
273 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
274 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
276 static struct attribute *nhm_mem_events_attrs[] = {
277 EVENT_PTR(mem_ld_nhm),
282 * topdown events for Intel Core CPUs.
284 * The events are all in slots, which is a free slot in a 4 wide
285 * pipeline. Some events are already reported in slots, for cycle
286 * events we multiply by the pipeline width (4).
288 * With Hyper Threading on, topdown metrics are either summed or averaged
289 * between the threads of a core: (count_t0 + count_t1).
291 * For the average case the metric is always scaled to pipeline width,
292 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
295 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
296 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
297 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
298 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
299 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
300 "event=0xe,umask=0x1"); /* uops_issued.any */
301 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
302 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
303 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
304 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
305 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
306 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
307 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
308 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
311 static struct attribute *snb_events_attrs[] = {
312 EVENT_PTR(td_slots_issued),
313 EVENT_PTR(td_slots_retired),
314 EVENT_PTR(td_fetch_bubbles),
315 EVENT_PTR(td_total_slots),
316 EVENT_PTR(td_total_slots_scale),
317 EVENT_PTR(td_recovery_bubbles),
318 EVENT_PTR(td_recovery_bubbles_scale),
322 static struct attribute *snb_mem_events_attrs[] = {
323 EVENT_PTR(mem_ld_snb),
324 EVENT_PTR(mem_st_snb),
328 static struct event_constraint intel_hsw_event_constraints[] = {
329 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
330 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
331 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
332 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
333 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
334 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
335 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
336 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
337 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
338 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
339 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
340 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
343 * When HT is off these events can only run on the bottom 4 counters
344 * When HT is on, they are impacted by the HT bug and require EXCL access
346 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
347 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
348 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
349 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
354 static struct event_constraint intel_bdw_event_constraints[] = {
355 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
356 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
357 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
358 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
359 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
361 * when HT is off, these can only run on the bottom 4 counters
363 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
364 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
365 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
366 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
370 static u64 intel_pmu_event_map(int hw_event)
372 return intel_perfmon_event_map[hw_event];
376 * Notes on the events:
377 * - data reads do not include code reads (comparable to earlier tables)
378 * - data counts include speculative execution (except L1 write, dtlb, bpu)
379 * - remote node access includes remote memory, remote cache, remote mmio.
380 * - prefetches are not included in the counts.
381 * - icache miss does not include decoded icache
384 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
385 #define SKL_DEMAND_RFO BIT_ULL(1)
386 #define SKL_ANY_RESPONSE BIT_ULL(16)
387 #define SKL_SUPPLIER_NONE BIT_ULL(17)
388 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
389 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
390 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
391 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
392 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
393 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
394 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
395 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
396 #define SKL_SPL_HIT BIT_ULL(30)
397 #define SKL_SNOOP_NONE BIT_ULL(31)
398 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
399 #define SKL_SNOOP_MISS BIT_ULL(33)
400 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
401 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
402 #define SKL_SNOOP_HITM BIT_ULL(36)
403 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
404 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
405 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
406 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
407 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
408 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
409 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
410 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
411 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
412 SKL_SNOOP_HITM|SKL_SPL_HIT)
413 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
414 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
415 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
416 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
417 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
419 static __initconst const u64 skl_hw_cache_event_ids
420 [PERF_COUNT_HW_CACHE_MAX]
421 [PERF_COUNT_HW_CACHE_OP_MAX]
422 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
426 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
427 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
430 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
431 [ C(RESULT_MISS) ] = 0x0,
433 [ C(OP_PREFETCH) ] = {
434 [ C(RESULT_ACCESS) ] = 0x0,
435 [ C(RESULT_MISS) ] = 0x0,
440 [ C(RESULT_ACCESS) ] = 0x0,
441 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
444 [ C(RESULT_ACCESS) ] = -1,
445 [ C(RESULT_MISS) ] = -1,
447 [ C(OP_PREFETCH) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x0,
454 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
455 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
458 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
459 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
461 [ C(OP_PREFETCH) ] = {
462 [ C(RESULT_ACCESS) ] = 0x0,
463 [ C(RESULT_MISS) ] = 0x0,
468 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
469 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
472 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
473 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
475 [ C(OP_PREFETCH) ] = {
476 [ C(RESULT_ACCESS) ] = 0x0,
477 [ C(RESULT_MISS) ] = 0x0,
482 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
483 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
486 [ C(RESULT_ACCESS) ] = -1,
487 [ C(RESULT_MISS) ] = -1,
489 [ C(OP_PREFETCH) ] = {
490 [ C(RESULT_ACCESS) ] = -1,
491 [ C(RESULT_MISS) ] = -1,
496 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
497 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
500 [ C(RESULT_ACCESS) ] = -1,
501 [ C(RESULT_MISS) ] = -1,
503 [ C(OP_PREFETCH) ] = {
504 [ C(RESULT_ACCESS) ] = -1,
505 [ C(RESULT_MISS) ] = -1,
510 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
511 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
514 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
515 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
517 [ C(OP_PREFETCH) ] = {
518 [ C(RESULT_ACCESS) ] = 0x0,
519 [ C(RESULT_MISS) ] = 0x0,
524 static __initconst const u64 skl_hw_cache_extra_regs
525 [PERF_COUNT_HW_CACHE_MAX]
526 [PERF_COUNT_HW_CACHE_OP_MAX]
527 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
531 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
532 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
533 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
534 SKL_L3_MISS|SKL_ANY_SNOOP|
538 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
539 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
540 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
541 SKL_L3_MISS|SKL_ANY_SNOOP|
544 [ C(OP_PREFETCH) ] = {
545 [ C(RESULT_ACCESS) ] = 0x0,
546 [ C(RESULT_MISS) ] = 0x0,
551 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
552 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
553 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
554 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
557 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
558 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
559 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
560 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
562 [ C(OP_PREFETCH) ] = {
563 [ C(RESULT_ACCESS) ] = 0x0,
564 [ C(RESULT_MISS) ] = 0x0,
569 #define SNB_DMND_DATA_RD (1ULL << 0)
570 #define SNB_DMND_RFO (1ULL << 1)
571 #define SNB_DMND_IFETCH (1ULL << 2)
572 #define SNB_DMND_WB (1ULL << 3)
573 #define SNB_PF_DATA_RD (1ULL << 4)
574 #define SNB_PF_RFO (1ULL << 5)
575 #define SNB_PF_IFETCH (1ULL << 6)
576 #define SNB_LLC_DATA_RD (1ULL << 7)
577 #define SNB_LLC_RFO (1ULL << 8)
578 #define SNB_LLC_IFETCH (1ULL << 9)
579 #define SNB_BUS_LOCKS (1ULL << 10)
580 #define SNB_STRM_ST (1ULL << 11)
581 #define SNB_OTHER (1ULL << 15)
582 #define SNB_RESP_ANY (1ULL << 16)
583 #define SNB_NO_SUPP (1ULL << 17)
584 #define SNB_LLC_HITM (1ULL << 18)
585 #define SNB_LLC_HITE (1ULL << 19)
586 #define SNB_LLC_HITS (1ULL << 20)
587 #define SNB_LLC_HITF (1ULL << 21)
588 #define SNB_LOCAL (1ULL << 22)
589 #define SNB_REMOTE (0xffULL << 23)
590 #define SNB_SNP_NONE (1ULL << 31)
591 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
592 #define SNB_SNP_MISS (1ULL << 33)
593 #define SNB_NO_FWD (1ULL << 34)
594 #define SNB_SNP_FWD (1ULL << 35)
595 #define SNB_HITM (1ULL << 36)
596 #define SNB_NON_DRAM (1ULL << 37)
598 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
599 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
600 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
602 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
603 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
606 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
607 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
609 #define SNB_L3_ACCESS SNB_RESP_ANY
610 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
612 static __initconst const u64 snb_hw_cache_extra_regs
613 [PERF_COUNT_HW_CACHE_MAX]
614 [PERF_COUNT_HW_CACHE_OP_MAX]
615 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
619 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
620 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
623 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
624 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
626 [ C(OP_PREFETCH) ] = {
627 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
628 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
633 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
634 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
637 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
638 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
640 [ C(OP_PREFETCH) ] = {
641 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
642 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
647 static __initconst const u64 snb_hw_cache_event_ids
648 [PERF_COUNT_HW_CACHE_MAX]
649 [PERF_COUNT_HW_CACHE_OP_MAX]
650 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
654 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
655 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
658 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
659 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
661 [ C(OP_PREFETCH) ] = {
662 [ C(RESULT_ACCESS) ] = 0x0,
663 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
668 [ C(RESULT_ACCESS) ] = 0x0,
669 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
672 [ C(RESULT_ACCESS) ] = -1,
673 [ C(RESULT_MISS) ] = -1,
675 [ C(OP_PREFETCH) ] = {
676 [ C(RESULT_ACCESS) ] = 0x0,
677 [ C(RESULT_MISS) ] = 0x0,
682 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
683 [ C(RESULT_ACCESS) ] = 0x01b7,
684 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
685 [ C(RESULT_MISS) ] = 0x01b7,
688 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
689 [ C(RESULT_ACCESS) ] = 0x01b7,
690 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
691 [ C(RESULT_MISS) ] = 0x01b7,
693 [ C(OP_PREFETCH) ] = {
694 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
695 [ C(RESULT_ACCESS) ] = 0x01b7,
696 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
697 [ C(RESULT_MISS) ] = 0x01b7,
702 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
703 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
706 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
707 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
709 [ C(OP_PREFETCH) ] = {
710 [ C(RESULT_ACCESS) ] = 0x0,
711 [ C(RESULT_MISS) ] = 0x0,
716 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
717 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
720 [ C(RESULT_ACCESS) ] = -1,
721 [ C(RESULT_MISS) ] = -1,
723 [ C(OP_PREFETCH) ] = {
724 [ C(RESULT_ACCESS) ] = -1,
725 [ C(RESULT_MISS) ] = -1,
730 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
731 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
734 [ C(RESULT_ACCESS) ] = -1,
735 [ C(RESULT_MISS) ] = -1,
737 [ C(OP_PREFETCH) ] = {
738 [ C(RESULT_ACCESS) ] = -1,
739 [ C(RESULT_MISS) ] = -1,
744 [ C(RESULT_ACCESS) ] = 0x01b7,
745 [ C(RESULT_MISS) ] = 0x01b7,
748 [ C(RESULT_ACCESS) ] = 0x01b7,
749 [ C(RESULT_MISS) ] = 0x01b7,
751 [ C(OP_PREFETCH) ] = {
752 [ C(RESULT_ACCESS) ] = 0x01b7,
753 [ C(RESULT_MISS) ] = 0x01b7,
760 * Notes on the events:
761 * - data reads do not include code reads (comparable to earlier tables)
762 * - data counts include speculative execution (except L1 write, dtlb, bpu)
763 * - remote node access includes remote memory, remote cache, remote mmio.
764 * - prefetches are not included in the counts because they are not
768 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
769 #define HSW_DEMAND_RFO BIT_ULL(1)
770 #define HSW_ANY_RESPONSE BIT_ULL(16)
771 #define HSW_SUPPLIER_NONE BIT_ULL(17)
772 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
773 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
774 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
775 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
776 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
777 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
778 HSW_L3_MISS_REMOTE_HOP2P)
779 #define HSW_SNOOP_NONE BIT_ULL(31)
780 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
781 #define HSW_SNOOP_MISS BIT_ULL(33)
782 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
783 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
784 #define HSW_SNOOP_HITM BIT_ULL(36)
785 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
786 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
787 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
788 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
789 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
790 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
791 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
792 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
793 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
794 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
795 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
797 #define BDW_L3_MISS_LOCAL BIT(26)
798 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
799 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
800 HSW_L3_MISS_REMOTE_HOP2P)
803 static __initconst const u64 hsw_hw_cache_event_ids
804 [PERF_COUNT_HW_CACHE_MAX]
805 [PERF_COUNT_HW_CACHE_OP_MAX]
806 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
810 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
811 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
814 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
815 [ C(RESULT_MISS) ] = 0x0,
817 [ C(OP_PREFETCH) ] = {
818 [ C(RESULT_ACCESS) ] = 0x0,
819 [ C(RESULT_MISS) ] = 0x0,
824 [ C(RESULT_ACCESS) ] = 0x0,
825 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
828 [ C(RESULT_ACCESS) ] = -1,
829 [ C(RESULT_MISS) ] = -1,
831 [ C(OP_PREFETCH) ] = {
832 [ C(RESULT_ACCESS) ] = 0x0,
833 [ C(RESULT_MISS) ] = 0x0,
838 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
839 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
842 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
843 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
845 [ C(OP_PREFETCH) ] = {
846 [ C(RESULT_ACCESS) ] = 0x0,
847 [ C(RESULT_MISS) ] = 0x0,
852 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
853 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
856 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
857 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
859 [ C(OP_PREFETCH) ] = {
860 [ C(RESULT_ACCESS) ] = 0x0,
861 [ C(RESULT_MISS) ] = 0x0,
866 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
867 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
870 [ C(RESULT_ACCESS) ] = -1,
871 [ C(RESULT_MISS) ] = -1,
873 [ C(OP_PREFETCH) ] = {
874 [ C(RESULT_ACCESS) ] = -1,
875 [ C(RESULT_MISS) ] = -1,
880 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
881 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
884 [ C(RESULT_ACCESS) ] = -1,
885 [ C(RESULT_MISS) ] = -1,
887 [ C(OP_PREFETCH) ] = {
888 [ C(RESULT_ACCESS) ] = -1,
889 [ C(RESULT_MISS) ] = -1,
894 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
895 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
898 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
899 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
901 [ C(OP_PREFETCH) ] = {
902 [ C(RESULT_ACCESS) ] = 0x0,
903 [ C(RESULT_MISS) ] = 0x0,
908 static __initconst const u64 hsw_hw_cache_extra_regs
909 [PERF_COUNT_HW_CACHE_MAX]
910 [PERF_COUNT_HW_CACHE_OP_MAX]
911 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
915 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
917 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
918 HSW_L3_MISS|HSW_ANY_SNOOP,
921 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
923 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
924 HSW_L3_MISS|HSW_ANY_SNOOP,
926 [ C(OP_PREFETCH) ] = {
927 [ C(RESULT_ACCESS) ] = 0x0,
928 [ C(RESULT_MISS) ] = 0x0,
933 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
934 HSW_L3_MISS_LOCAL_DRAM|
936 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
941 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
942 HSW_L3_MISS_LOCAL_DRAM|
944 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
948 [ C(OP_PREFETCH) ] = {
949 [ C(RESULT_ACCESS) ] = 0x0,
950 [ C(RESULT_MISS) ] = 0x0,
955 static __initconst const u64 westmere_hw_cache_event_ids
956 [PERF_COUNT_HW_CACHE_MAX]
957 [PERF_COUNT_HW_CACHE_OP_MAX]
958 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
962 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
963 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
966 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
967 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
969 [ C(OP_PREFETCH) ] = {
970 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
971 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
976 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
977 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
980 [ C(RESULT_ACCESS) ] = -1,
981 [ C(RESULT_MISS) ] = -1,
983 [ C(OP_PREFETCH) ] = {
984 [ C(RESULT_ACCESS) ] = 0x0,
985 [ C(RESULT_MISS) ] = 0x0,
990 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
991 [ C(RESULT_ACCESS) ] = 0x01b7,
992 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
993 [ C(RESULT_MISS) ] = 0x01b7,
996 * Use RFO, not WRITEBACK, because a write miss would typically occur
1000 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1001 [ C(RESULT_ACCESS) ] = 0x01b7,
1002 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1003 [ C(RESULT_MISS) ] = 0x01b7,
1005 [ C(OP_PREFETCH) ] = {
1006 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1007 [ C(RESULT_ACCESS) ] = 0x01b7,
1008 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1009 [ C(RESULT_MISS) ] = 0x01b7,
1014 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1015 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1018 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1019 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1021 [ C(OP_PREFETCH) ] = {
1022 [ C(RESULT_ACCESS) ] = 0x0,
1023 [ C(RESULT_MISS) ] = 0x0,
1028 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1029 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1032 [ C(RESULT_ACCESS) ] = -1,
1033 [ C(RESULT_MISS) ] = -1,
1035 [ C(OP_PREFETCH) ] = {
1036 [ C(RESULT_ACCESS) ] = -1,
1037 [ C(RESULT_MISS) ] = -1,
1042 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1043 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1046 [ C(RESULT_ACCESS) ] = -1,
1047 [ C(RESULT_MISS) ] = -1,
1049 [ C(OP_PREFETCH) ] = {
1050 [ C(RESULT_ACCESS) ] = -1,
1051 [ C(RESULT_MISS) ] = -1,
1056 [ C(RESULT_ACCESS) ] = 0x01b7,
1057 [ C(RESULT_MISS) ] = 0x01b7,
1060 [ C(RESULT_ACCESS) ] = 0x01b7,
1061 [ C(RESULT_MISS) ] = 0x01b7,
1063 [ C(OP_PREFETCH) ] = {
1064 [ C(RESULT_ACCESS) ] = 0x01b7,
1065 [ C(RESULT_MISS) ] = 0x01b7,
1071 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1072 * See IA32 SDM Vol 3B 30.6.1.3
1075 #define NHM_DMND_DATA_RD (1 << 0)
1076 #define NHM_DMND_RFO (1 << 1)
1077 #define NHM_DMND_IFETCH (1 << 2)
1078 #define NHM_DMND_WB (1 << 3)
1079 #define NHM_PF_DATA_RD (1 << 4)
1080 #define NHM_PF_DATA_RFO (1 << 5)
1081 #define NHM_PF_IFETCH (1 << 6)
1082 #define NHM_OFFCORE_OTHER (1 << 7)
1083 #define NHM_UNCORE_HIT (1 << 8)
1084 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1085 #define NHM_OTHER_CORE_HITM (1 << 10)
1087 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1088 #define NHM_REMOTE_DRAM (1 << 13)
1089 #define NHM_LOCAL_DRAM (1 << 14)
1090 #define NHM_NON_DRAM (1 << 15)
1092 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1093 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1095 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1096 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1097 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1099 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1100 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1101 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1103 static __initconst const u64 nehalem_hw_cache_extra_regs
1104 [PERF_COUNT_HW_CACHE_MAX]
1105 [PERF_COUNT_HW_CACHE_OP_MAX]
1106 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1110 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1111 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1114 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1115 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1117 [ C(OP_PREFETCH) ] = {
1118 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1119 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1124 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1125 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1128 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1129 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1131 [ C(OP_PREFETCH) ] = {
1132 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1133 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1138 static __initconst const u64 nehalem_hw_cache_event_ids
1139 [PERF_COUNT_HW_CACHE_MAX]
1140 [PERF_COUNT_HW_CACHE_OP_MAX]
1141 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1145 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1146 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1149 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1150 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1152 [ C(OP_PREFETCH) ] = {
1153 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1154 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1159 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1160 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1163 [ C(RESULT_ACCESS) ] = -1,
1164 [ C(RESULT_MISS) ] = -1,
1166 [ C(OP_PREFETCH) ] = {
1167 [ C(RESULT_ACCESS) ] = 0x0,
1168 [ C(RESULT_MISS) ] = 0x0,
1173 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1174 [ C(RESULT_ACCESS) ] = 0x01b7,
1175 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1176 [ C(RESULT_MISS) ] = 0x01b7,
1179 * Use RFO, not WRITEBACK, because a write miss would typically occur
1183 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1184 [ C(RESULT_ACCESS) ] = 0x01b7,
1185 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1186 [ C(RESULT_MISS) ] = 0x01b7,
1188 [ C(OP_PREFETCH) ] = {
1189 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1190 [ C(RESULT_ACCESS) ] = 0x01b7,
1191 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1192 [ C(RESULT_MISS) ] = 0x01b7,
1197 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1198 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1201 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1202 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1204 [ C(OP_PREFETCH) ] = {
1205 [ C(RESULT_ACCESS) ] = 0x0,
1206 [ C(RESULT_MISS) ] = 0x0,
1211 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1212 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1215 [ C(RESULT_ACCESS) ] = -1,
1216 [ C(RESULT_MISS) ] = -1,
1218 [ C(OP_PREFETCH) ] = {
1219 [ C(RESULT_ACCESS) ] = -1,
1220 [ C(RESULT_MISS) ] = -1,
1225 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1226 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1229 [ C(RESULT_ACCESS) ] = -1,
1230 [ C(RESULT_MISS) ] = -1,
1232 [ C(OP_PREFETCH) ] = {
1233 [ C(RESULT_ACCESS) ] = -1,
1234 [ C(RESULT_MISS) ] = -1,
1239 [ C(RESULT_ACCESS) ] = 0x01b7,
1240 [ C(RESULT_MISS) ] = 0x01b7,
1243 [ C(RESULT_ACCESS) ] = 0x01b7,
1244 [ C(RESULT_MISS) ] = 0x01b7,
1246 [ C(OP_PREFETCH) ] = {
1247 [ C(RESULT_ACCESS) ] = 0x01b7,
1248 [ C(RESULT_MISS) ] = 0x01b7,
1253 static __initconst const u64 core2_hw_cache_event_ids
1254 [PERF_COUNT_HW_CACHE_MAX]
1255 [PERF_COUNT_HW_CACHE_OP_MAX]
1256 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1260 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1261 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1264 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1265 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1267 [ C(OP_PREFETCH) ] = {
1268 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1269 [ C(RESULT_MISS) ] = 0,
1274 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1275 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1278 [ C(RESULT_ACCESS) ] = -1,
1279 [ C(RESULT_MISS) ] = -1,
1281 [ C(OP_PREFETCH) ] = {
1282 [ C(RESULT_ACCESS) ] = 0,
1283 [ C(RESULT_MISS) ] = 0,
1288 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1289 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1292 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1293 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1295 [ C(OP_PREFETCH) ] = {
1296 [ C(RESULT_ACCESS) ] = 0,
1297 [ C(RESULT_MISS) ] = 0,
1302 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1303 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1306 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1307 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1309 [ C(OP_PREFETCH) ] = {
1310 [ C(RESULT_ACCESS) ] = 0,
1311 [ C(RESULT_MISS) ] = 0,
1316 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1317 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1320 [ C(RESULT_ACCESS) ] = -1,
1321 [ C(RESULT_MISS) ] = -1,
1323 [ C(OP_PREFETCH) ] = {
1324 [ C(RESULT_ACCESS) ] = -1,
1325 [ C(RESULT_MISS) ] = -1,
1330 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1331 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1334 [ C(RESULT_ACCESS) ] = -1,
1335 [ C(RESULT_MISS) ] = -1,
1337 [ C(OP_PREFETCH) ] = {
1338 [ C(RESULT_ACCESS) ] = -1,
1339 [ C(RESULT_MISS) ] = -1,
1344 static __initconst const u64 atom_hw_cache_event_ids
1345 [PERF_COUNT_HW_CACHE_MAX]
1346 [PERF_COUNT_HW_CACHE_OP_MAX]
1347 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1351 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1352 [ C(RESULT_MISS) ] = 0,
1355 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1356 [ C(RESULT_MISS) ] = 0,
1358 [ C(OP_PREFETCH) ] = {
1359 [ C(RESULT_ACCESS) ] = 0x0,
1360 [ C(RESULT_MISS) ] = 0,
1365 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1366 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1369 [ C(RESULT_ACCESS) ] = -1,
1370 [ C(RESULT_MISS) ] = -1,
1372 [ C(OP_PREFETCH) ] = {
1373 [ C(RESULT_ACCESS) ] = 0,
1374 [ C(RESULT_MISS) ] = 0,
1379 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1380 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1383 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1384 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1386 [ C(OP_PREFETCH) ] = {
1387 [ C(RESULT_ACCESS) ] = 0,
1388 [ C(RESULT_MISS) ] = 0,
1393 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1394 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1397 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1398 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1400 [ C(OP_PREFETCH) ] = {
1401 [ C(RESULT_ACCESS) ] = 0,
1402 [ C(RESULT_MISS) ] = 0,
1407 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1408 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1411 [ C(RESULT_ACCESS) ] = -1,
1412 [ C(RESULT_MISS) ] = -1,
1414 [ C(OP_PREFETCH) ] = {
1415 [ C(RESULT_ACCESS) ] = -1,
1416 [ C(RESULT_MISS) ] = -1,
1421 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1422 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1425 [ C(RESULT_ACCESS) ] = -1,
1426 [ C(RESULT_MISS) ] = -1,
1428 [ C(OP_PREFETCH) ] = {
1429 [ C(RESULT_ACCESS) ] = -1,
1430 [ C(RESULT_MISS) ] = -1,
1435 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1436 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1437 /* no_alloc_cycles.not_delivered */
1438 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1439 "event=0xca,umask=0x50");
1440 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1441 /* uops_retired.all */
1442 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1443 "event=0xc2,umask=0x10");
1444 /* uops_retired.all */
1445 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1446 "event=0xc2,umask=0x10");
1448 static struct attribute *slm_events_attrs[] = {
1449 EVENT_PTR(td_total_slots_slm),
1450 EVENT_PTR(td_total_slots_scale_slm),
1451 EVENT_PTR(td_fetch_bubbles_slm),
1452 EVENT_PTR(td_fetch_bubbles_scale_slm),
1453 EVENT_PTR(td_slots_issued_slm),
1454 EVENT_PTR(td_slots_retired_slm),
1458 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1460 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1461 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1462 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1466 #define SLM_DMND_READ SNB_DMND_DATA_RD
1467 #define SLM_DMND_WRITE SNB_DMND_RFO
1468 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1470 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1471 #define SLM_LLC_ACCESS SNB_RESP_ANY
1472 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1474 static __initconst const u64 slm_hw_cache_extra_regs
1475 [PERF_COUNT_HW_CACHE_MAX]
1476 [PERF_COUNT_HW_CACHE_OP_MAX]
1477 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1481 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1482 [ C(RESULT_MISS) ] = 0,
1485 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1486 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1488 [ C(OP_PREFETCH) ] = {
1489 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1490 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1495 static __initconst const u64 slm_hw_cache_event_ids
1496 [PERF_COUNT_HW_CACHE_MAX]
1497 [PERF_COUNT_HW_CACHE_OP_MAX]
1498 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1502 [ C(RESULT_ACCESS) ] = 0,
1503 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1506 [ C(RESULT_ACCESS) ] = 0,
1507 [ C(RESULT_MISS) ] = 0,
1509 [ C(OP_PREFETCH) ] = {
1510 [ C(RESULT_ACCESS) ] = 0,
1511 [ C(RESULT_MISS) ] = 0,
1516 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1517 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1520 [ C(RESULT_ACCESS) ] = -1,
1521 [ C(RESULT_MISS) ] = -1,
1523 [ C(OP_PREFETCH) ] = {
1524 [ C(RESULT_ACCESS) ] = 0,
1525 [ C(RESULT_MISS) ] = 0,
1530 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1531 [ C(RESULT_ACCESS) ] = 0x01b7,
1532 [ C(RESULT_MISS) ] = 0,
1535 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1536 [ C(RESULT_ACCESS) ] = 0x01b7,
1537 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1538 [ C(RESULT_MISS) ] = 0x01b7,
1540 [ C(OP_PREFETCH) ] = {
1541 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1542 [ C(RESULT_ACCESS) ] = 0x01b7,
1543 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1544 [ C(RESULT_MISS) ] = 0x01b7,
1549 [ C(RESULT_ACCESS) ] = 0,
1550 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1553 [ C(RESULT_ACCESS) ] = 0,
1554 [ C(RESULT_MISS) ] = 0,
1556 [ C(OP_PREFETCH) ] = {
1557 [ C(RESULT_ACCESS) ] = 0,
1558 [ C(RESULT_MISS) ] = 0,
1563 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1564 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1567 [ C(RESULT_ACCESS) ] = -1,
1568 [ C(RESULT_MISS) ] = -1,
1570 [ C(OP_PREFETCH) ] = {
1571 [ C(RESULT_ACCESS) ] = -1,
1572 [ C(RESULT_MISS) ] = -1,
1577 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1578 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1581 [ C(RESULT_ACCESS) ] = -1,
1582 [ C(RESULT_MISS) ] = -1,
1584 [ C(OP_PREFETCH) ] = {
1585 [ C(RESULT_ACCESS) ] = -1,
1586 [ C(RESULT_MISS) ] = -1,
1591 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1592 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1593 /* UOPS_NOT_DELIVERED.ANY */
1594 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1595 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1596 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1597 /* UOPS_RETIRED.ANY */
1598 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1599 /* UOPS_ISSUED.ANY */
1600 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1602 static struct attribute *glm_events_attrs[] = {
1603 EVENT_PTR(td_total_slots_glm),
1604 EVENT_PTR(td_total_slots_scale_glm),
1605 EVENT_PTR(td_fetch_bubbles_glm),
1606 EVENT_PTR(td_recovery_bubbles_glm),
1607 EVENT_PTR(td_slots_issued_glm),
1608 EVENT_PTR(td_slots_retired_glm),
1612 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1613 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1614 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1615 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1619 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1620 #define GLM_DEMAND_RFO BIT_ULL(1)
1621 #define GLM_ANY_RESPONSE BIT_ULL(16)
1622 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1623 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1624 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1625 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1626 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1627 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1628 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1630 static __initconst const u64 glm_hw_cache_event_ids
1631 [PERF_COUNT_HW_CACHE_MAX]
1632 [PERF_COUNT_HW_CACHE_OP_MAX]
1633 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1636 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1637 [C(RESULT_MISS)] = 0x0,
1640 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1641 [C(RESULT_MISS)] = 0x0,
1643 [C(OP_PREFETCH)] = {
1644 [C(RESULT_ACCESS)] = 0x0,
1645 [C(RESULT_MISS)] = 0x0,
1650 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1651 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1654 [C(RESULT_ACCESS)] = -1,
1655 [C(RESULT_MISS)] = -1,
1657 [C(OP_PREFETCH)] = {
1658 [C(RESULT_ACCESS)] = 0x0,
1659 [C(RESULT_MISS)] = 0x0,
1664 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1665 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1668 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1669 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1671 [C(OP_PREFETCH)] = {
1672 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1673 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1678 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1679 [C(RESULT_MISS)] = 0x0,
1682 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1683 [C(RESULT_MISS)] = 0x0,
1685 [C(OP_PREFETCH)] = {
1686 [C(RESULT_ACCESS)] = 0x0,
1687 [C(RESULT_MISS)] = 0x0,
1692 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1693 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1696 [C(RESULT_ACCESS)] = -1,
1697 [C(RESULT_MISS)] = -1,
1699 [C(OP_PREFETCH)] = {
1700 [C(RESULT_ACCESS)] = -1,
1701 [C(RESULT_MISS)] = -1,
1706 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1707 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1710 [C(RESULT_ACCESS)] = -1,
1711 [C(RESULT_MISS)] = -1,
1713 [C(OP_PREFETCH)] = {
1714 [C(RESULT_ACCESS)] = -1,
1715 [C(RESULT_MISS)] = -1,
1720 static __initconst const u64 glm_hw_cache_extra_regs
1721 [PERF_COUNT_HW_CACHE_MAX]
1722 [PERF_COUNT_HW_CACHE_OP_MAX]
1723 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1726 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1728 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1732 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1734 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1737 [C(OP_PREFETCH)] = {
1738 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1740 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1746 static __initconst const u64 glp_hw_cache_event_ids
1747 [PERF_COUNT_HW_CACHE_MAX]
1748 [PERF_COUNT_HW_CACHE_OP_MAX]
1749 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1752 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1753 [C(RESULT_MISS)] = 0x0,
1756 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1757 [C(RESULT_MISS)] = 0x0,
1759 [C(OP_PREFETCH)] = {
1760 [C(RESULT_ACCESS)] = 0x0,
1761 [C(RESULT_MISS)] = 0x0,
1766 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1767 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1770 [C(RESULT_ACCESS)] = -1,
1771 [C(RESULT_MISS)] = -1,
1773 [C(OP_PREFETCH)] = {
1774 [C(RESULT_ACCESS)] = 0x0,
1775 [C(RESULT_MISS)] = 0x0,
1780 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1781 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1784 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1785 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1787 [C(OP_PREFETCH)] = {
1788 [C(RESULT_ACCESS)] = 0x0,
1789 [C(RESULT_MISS)] = 0x0,
1794 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1795 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1798 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1799 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1801 [C(OP_PREFETCH)] = {
1802 [C(RESULT_ACCESS)] = 0x0,
1803 [C(RESULT_MISS)] = 0x0,
1808 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1809 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1812 [C(RESULT_ACCESS)] = -1,
1813 [C(RESULT_MISS)] = -1,
1815 [C(OP_PREFETCH)] = {
1816 [C(RESULT_ACCESS)] = -1,
1817 [C(RESULT_MISS)] = -1,
1822 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1823 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1826 [C(RESULT_ACCESS)] = -1,
1827 [C(RESULT_MISS)] = -1,
1829 [C(OP_PREFETCH)] = {
1830 [C(RESULT_ACCESS)] = -1,
1831 [C(RESULT_MISS)] = -1,
1836 static __initconst const u64 glp_hw_cache_extra_regs
1837 [PERF_COUNT_HW_CACHE_MAX]
1838 [PERF_COUNT_HW_CACHE_OP_MAX]
1839 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1842 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1844 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1848 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1850 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1853 [C(OP_PREFETCH)] = {
1854 [C(RESULT_ACCESS)] = 0x0,
1855 [C(RESULT_MISS)] = 0x0,
1860 #define TNT_LOCAL_DRAM BIT_ULL(26)
1861 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
1862 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
1863 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
1864 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
1865 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
1866 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
1868 static __initconst const u64 tnt_hw_cache_extra_regs
1869 [PERF_COUNT_HW_CACHE_MAX]
1870 [PERF_COUNT_HW_CACHE_OP_MAX]
1871 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1874 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
1876 [C(RESULT_MISS)] = TNT_DEMAND_READ|
1880 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
1882 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
1885 [C(OP_PREFETCH)] = {
1886 [C(RESULT_ACCESS)] = 0x0,
1887 [C(RESULT_MISS)] = 0x0,
1892 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
1893 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1894 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
1895 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
1899 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1900 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1901 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1902 #define KNL_MCDRAM_FAR BIT_ULL(22)
1903 #define KNL_DDR_LOCAL BIT_ULL(23)
1904 #define KNL_DDR_FAR BIT_ULL(24)
1905 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1906 KNL_DDR_LOCAL | KNL_DDR_FAR)
1907 #define KNL_L2_READ SLM_DMND_READ
1908 #define KNL_L2_WRITE SLM_DMND_WRITE
1909 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1910 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1911 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1912 KNL_DRAM_ANY | SNB_SNP_ANY | \
1915 static __initconst const u64 knl_hw_cache_extra_regs
1916 [PERF_COUNT_HW_CACHE_MAX]
1917 [PERF_COUNT_HW_CACHE_OP_MAX]
1918 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1921 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1922 [C(RESULT_MISS)] = 0,
1925 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1926 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1928 [C(OP_PREFETCH)] = {
1929 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1930 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1936 * Used from PMIs where the LBRs are already disabled.
1938 * This function could be called consecutively. It is required to remain in
1939 * disabled state if called consecutively.
1941 * During consecutive calls, the same disable value will be written to related
1942 * registers, so the PMU state remains unchanged.
1944 * intel_bts events don't coexist with intel PMU's BTS events because of
1945 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1946 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1948 static void __intel_pmu_disable_all(void)
1950 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1952 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1954 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1955 intel_pmu_disable_bts();
1957 intel_pmu_pebs_disable_all();
1960 static void intel_pmu_disable_all(void)
1962 __intel_pmu_disable_all();
1963 intel_pmu_lbr_disable_all();
1966 static void __intel_pmu_enable_all(int added, bool pmi)
1968 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1970 intel_pmu_pebs_enable_all();
1971 intel_pmu_lbr_enable_all(pmi);
1972 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1973 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1975 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1976 struct perf_event *event =
1977 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1979 if (WARN_ON_ONCE(!event))
1982 intel_pmu_enable_bts(event->hw.config);
1986 static void intel_pmu_enable_all(int added)
1988 __intel_pmu_enable_all(added, false);
1993 * Intel Errata AAK100 (model 26)
1994 * Intel Errata AAP53 (model 30)
1995 * Intel Errata BD53 (model 44)
1997 * The official story:
1998 * These chips need to be 'reset' when adding counters by programming the
1999 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2000 * in sequence on the same PMC or on different PMCs.
2002 * In practise it appears some of these events do in fact count, and
2003 * we need to program all 4 events.
2005 static void intel_pmu_nhm_workaround(void)
2007 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2008 static const unsigned long nhm_magic[4] = {
2014 struct perf_event *event;
2018 * The Errata requires below steps:
2019 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2020 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2021 * the corresponding PMCx;
2022 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2023 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2024 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2028 * The real steps we choose are a little different from above.
2029 * A) To reduce MSR operations, we don't run step 1) as they
2030 * are already cleared before this function is called;
2031 * B) Call x86_perf_event_update to save PMCx before configuring
2032 * PERFEVTSELx with magic number;
2033 * C) With step 5), we do clear only when the PERFEVTSELx is
2034 * not used currently.
2035 * D) Call x86_perf_event_set_period to restore PMCx;
2038 /* We always operate 4 pairs of PERF Counters */
2039 for (i = 0; i < 4; i++) {
2040 event = cpuc->events[i];
2042 x86_perf_event_update(event);
2045 for (i = 0; i < 4; i++) {
2046 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2047 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2050 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2051 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2053 for (i = 0; i < 4; i++) {
2054 event = cpuc->events[i];
2057 x86_perf_event_set_period(event);
2058 __x86_pmu_enable_event(&event->hw,
2059 ARCH_PERFMON_EVENTSEL_ENABLE);
2061 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2065 static void intel_pmu_nhm_enable_all(int added)
2068 intel_pmu_nhm_workaround();
2069 intel_pmu_enable_all(added);
2072 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2074 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2076 if (cpuc->tfa_shadow != val) {
2077 cpuc->tfa_shadow = val;
2078 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2082 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2085 * We're going to use PMC3, make sure TFA is set before we touch it.
2088 intel_set_tfa(cpuc, true);
2091 static void intel_tfa_pmu_enable_all(int added)
2093 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2096 * If we find PMC3 is no longer used when we enable the PMU, we can
2099 if (!test_bit(3, cpuc->active_mask))
2100 intel_set_tfa(cpuc, false);
2102 intel_pmu_enable_all(added);
2105 static void enable_counter_freeze(void)
2107 update_debugctlmsr(get_debugctlmsr() |
2108 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2111 static void disable_counter_freeze(void)
2113 update_debugctlmsr(get_debugctlmsr() &
2114 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2117 static inline u64 intel_pmu_get_status(void)
2121 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2126 static inline void intel_pmu_ack_status(u64 ack)
2128 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2131 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2133 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2136 mask = 0xfULL << (idx * 4);
2138 rdmsrl(hwc->config_base, ctrl_val);
2140 wrmsrl(hwc->config_base, ctrl_val);
2143 static inline bool event_is_checkpointed(struct perf_event *event)
2145 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2148 static void intel_pmu_disable_event(struct perf_event *event)
2150 struct hw_perf_event *hwc = &event->hw;
2151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2153 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2154 intel_pmu_disable_bts();
2155 intel_pmu_drain_bts_buffer();
2159 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2160 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2161 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2163 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2164 intel_pmu_disable_fixed(hwc);
2168 x86_pmu_disable_event(event);
2171 * Needs to be called after x86_pmu_disable_event,
2172 * so we don't trigger the event without PEBS bit set.
2174 if (unlikely(event->attr.precise_ip))
2175 intel_pmu_pebs_disable(event);
2178 static void intel_pmu_del_event(struct perf_event *event)
2180 if (needs_branch_stack(event))
2181 intel_pmu_lbr_del(event);
2182 if (event->attr.precise_ip)
2183 intel_pmu_pebs_del(event);
2186 static void intel_pmu_read_event(struct perf_event *event)
2188 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2189 intel_pmu_auto_reload_read(event);
2191 x86_perf_event_update(event);
2194 static void intel_pmu_enable_fixed(struct perf_event *event)
2196 struct hw_perf_event *hwc = &event->hw;
2197 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2198 u64 ctrl_val, mask, bits = 0;
2201 * Enable IRQ generation (0x8), if not PEBS,
2202 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2205 if (!event->attr.precise_ip)
2207 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2209 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2213 * ANY bit is supported in v3 and up
2215 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2219 mask = 0xfULL << (idx * 4);
2221 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2222 bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2223 mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2226 rdmsrl(hwc->config_base, ctrl_val);
2229 wrmsrl(hwc->config_base, ctrl_val);
2232 static void intel_pmu_enable_event(struct perf_event *event)
2234 struct hw_perf_event *hwc = &event->hw;
2235 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2237 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2238 if (!__this_cpu_read(cpu_hw_events.enabled))
2241 intel_pmu_enable_bts(hwc->config);
2245 if (event->attr.exclude_host)
2246 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2247 if (event->attr.exclude_guest)
2248 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2250 if (unlikely(event_is_checkpointed(event)))
2251 cpuc->intel_cp_status |= (1ull << hwc->idx);
2253 if (unlikely(event->attr.precise_ip))
2254 intel_pmu_pebs_enable(event);
2256 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2257 intel_pmu_enable_fixed(event);
2261 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2264 static void intel_pmu_add_event(struct perf_event *event)
2266 if (event->attr.precise_ip)
2267 intel_pmu_pebs_add(event);
2268 if (needs_branch_stack(event))
2269 intel_pmu_lbr_add(event);
2273 * Save and restart an expired event. Called by NMI contexts,
2274 * so it has to be careful about preempting normal event ops:
2276 int intel_pmu_save_and_restart(struct perf_event *event)
2278 x86_perf_event_update(event);
2280 * For a checkpointed counter always reset back to 0. This
2281 * avoids a situation where the counter overflows, aborts the
2282 * transaction and is then set back to shortly before the
2283 * overflow, and overflows and aborts again.
2285 if (unlikely(event_is_checkpointed(event))) {
2286 /* No race with NMIs because the counter should not be armed */
2287 wrmsrl(event->hw.event_base, 0);
2288 local64_set(&event->hw.prev_count, 0);
2290 return x86_perf_event_set_period(event);
2293 static void intel_pmu_reset(void)
2295 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2296 unsigned long flags;
2299 if (!x86_pmu.num_counters)
2302 local_irq_save(flags);
2304 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2306 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2307 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2308 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2310 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2311 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2314 ds->bts_index = ds->bts_buffer_base;
2316 /* Ack all overflows and disable fixed counters */
2317 if (x86_pmu.version >= 2) {
2318 intel_pmu_ack_status(intel_pmu_get_status());
2319 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2322 /* Reset LBRs and LBR freezing */
2323 if (x86_pmu.lbr_nr) {
2324 update_debugctlmsr(get_debugctlmsr() &
2325 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2328 local_irq_restore(flags);
2331 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2333 struct perf_sample_data data;
2334 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2338 inc_irq_stat(apic_perf_irqs);
2341 * Ignore a range of extra bits in status that do not indicate
2342 * overflow by themselves.
2344 status &= ~(GLOBAL_STATUS_COND_CHG |
2345 GLOBAL_STATUS_ASIF |
2346 GLOBAL_STATUS_LBRS_FROZEN);
2350 * In case multiple PEBS events are sampled at the same time,
2351 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2352 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2353 * having their bits set in the status register. This is a sign
2354 * that there was at least one PEBS record pending at the time
2355 * of the PMU interrupt. PEBS counters must only be processed
2356 * via the drain_pebs() calls and not via the regular sample
2357 * processing loop coming after that the function, otherwise
2358 * phony regular samples may be generated in the sampling buffer
2359 * not marked with the EXACT tag. Another possibility is to have
2360 * one PEBS event and at least one non-PEBS event whic hoverflows
2361 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2362 * not be set, yet the overflow status bit for the PEBS counter will
2365 * To avoid this problem, we systematically ignore the PEBS-enabled
2366 * counters from the GLOBAL_STATUS mask and we always process PEBS
2367 * events via drain_pebs().
2369 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2370 status &= ~cpuc->pebs_enabled;
2372 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2375 * PEBS overflow sets bit 62 in the global status register
2377 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2379 x86_pmu.drain_pebs(regs);
2380 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2386 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2388 if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2389 perf_guest_cbs->handle_intel_pt_intr))
2390 perf_guest_cbs->handle_intel_pt_intr();
2392 intel_pt_interrupt();
2396 * Checkpointed counters can lead to 'spurious' PMIs because the
2397 * rollback caused by the PMI will have cleared the overflow status
2398 * bit. Therefore always force probe these counters.
2400 status |= cpuc->intel_cp_status;
2402 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2403 struct perf_event *event = cpuc->events[bit];
2407 if (!test_bit(bit, cpuc->active_mask))
2410 if (!intel_pmu_save_and_restart(event))
2413 perf_sample_data_init(&data, 0, event->hw.last_period);
2415 if (has_branch_stack(event))
2416 data.br_stack = &cpuc->lbr_stack;
2418 if (perf_event_overflow(event, &data, regs))
2419 x86_pmu_stop(event, 0);
2425 static bool disable_counter_freezing = true;
2426 static int __init intel_perf_counter_freezing_setup(char *s)
2430 if (kstrtobool(s, &res))
2433 disable_counter_freezing = !res;
2436 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2439 * Simplified handler for Arch Perfmon v4:
2440 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2441 * This is done automatically on PMU ack.
2442 * - Ack the PMU only after the APIC.
2445 static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2447 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2451 int pmu_enabled = cpuc->enabled;
2454 /* PMU has been disabled because of counter freezing */
2456 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2458 intel_bts_disable_local();
2459 handled = intel_pmu_drain_bts_buffer();
2460 handled += intel_bts_interrupt();
2462 status = intel_pmu_get_status();
2466 intel_pmu_lbr_read();
2467 if (++loops > 100) {
2471 WARN(1, "perfevents: irq loop stuck!\n");
2472 perf_event_print_debug();
2480 handled += handle_pmi_common(regs, status);
2482 /* Ack the PMI in the APIC */
2483 apic_write(APIC_LVTPC, APIC_DM_NMI);
2486 * The counters start counting immediately while ack the status.
2487 * Make it as close as possible to IRET. This avoids bogus
2488 * freezing on Skylake CPUs.
2491 intel_pmu_ack_status(status);
2494 * CPU may issues two PMIs very close to each other.
2495 * When the PMI handler services the first one, the
2496 * GLOBAL_STATUS is already updated to reflect both.
2497 * When it IRETs, the second PMI is immediately
2498 * handled and it sees clear status. At the meantime,
2499 * there may be a third PMI, because the freezing bit
2500 * isn't set since the ack in first PMI handlers.
2501 * Double check if there is more work to be done.
2503 status = intel_pmu_get_status();
2509 intel_bts_enable_local();
2510 cpuc->enabled = pmu_enabled;
2515 * This handler is triggered by the local APIC, so the APIC IRQ handling
2518 static int intel_pmu_handle_irq(struct pt_regs *regs)
2520 struct cpu_hw_events *cpuc;
2526 cpuc = this_cpu_ptr(&cpu_hw_events);
2529 * Save the PMU state.
2530 * It needs to be restored when leaving the handler.
2532 pmu_enabled = cpuc->enabled;
2534 * No known reason to not always do late ACK,
2535 * but just in case do it opt-in.
2537 if (!x86_pmu.late_ack)
2538 apic_write(APIC_LVTPC, APIC_DM_NMI);
2539 intel_bts_disable_local();
2541 __intel_pmu_disable_all();
2542 handled = intel_pmu_drain_bts_buffer();
2543 handled += intel_bts_interrupt();
2544 status = intel_pmu_get_status();
2550 intel_pmu_lbr_read();
2551 intel_pmu_ack_status(status);
2552 if (++loops > 100) {
2556 WARN(1, "perfevents: irq loop stuck!\n");
2557 perf_event_print_debug();
2564 handled += handle_pmi_common(regs, status);
2567 * Repeat if there is more work to be done:
2569 status = intel_pmu_get_status();
2574 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2575 cpuc->enabled = pmu_enabled;
2577 __intel_pmu_enable_all(0, true);
2578 intel_bts_enable_local();
2581 * Only unmask the NMI after the overflow counters
2582 * have been reset. This avoids spurious NMIs on
2585 if (x86_pmu.late_ack)
2586 apic_write(APIC_LVTPC, APIC_DM_NMI);
2590 static struct event_constraint *
2591 intel_bts_constraints(struct perf_event *event)
2593 if (unlikely(intel_pmu_has_bts(event)))
2594 return &bts_constraint;
2599 static int intel_alt_er(int idx, u64 config)
2603 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2606 if (idx == EXTRA_REG_RSP_0)
2607 alt_idx = EXTRA_REG_RSP_1;
2609 if (idx == EXTRA_REG_RSP_1)
2610 alt_idx = EXTRA_REG_RSP_0;
2612 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2618 static void intel_fixup_er(struct perf_event *event, int idx)
2620 event->hw.extra_reg.idx = idx;
2622 if (idx == EXTRA_REG_RSP_0) {
2623 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2624 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2625 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2626 } else if (idx == EXTRA_REG_RSP_1) {
2627 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2628 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2629 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2634 * manage allocation of shared extra msr for certain events
2637 * per-cpu: to be shared between the various events on a single PMU
2638 * per-core: per-cpu + shared by HT threads
2640 static struct event_constraint *
2641 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2642 struct perf_event *event,
2643 struct hw_perf_event_extra *reg)
2645 struct event_constraint *c = &emptyconstraint;
2646 struct er_account *era;
2647 unsigned long flags;
2651 * reg->alloc can be set due to existing state, so for fake cpuc we
2652 * need to ignore this, otherwise we might fail to allocate proper fake
2653 * state for this extra reg constraint. Also see the comment below.
2655 if (reg->alloc && !cpuc->is_fake)
2656 return NULL; /* call x86_get_event_constraint() */
2659 era = &cpuc->shared_regs->regs[idx];
2661 * we use spin_lock_irqsave() to avoid lockdep issues when
2662 * passing a fake cpuc
2664 raw_spin_lock_irqsave(&era->lock, flags);
2666 if (!atomic_read(&era->ref) || era->config == reg->config) {
2669 * If its a fake cpuc -- as per validate_{group,event}() we
2670 * shouldn't touch event state and we can avoid doing so
2671 * since both will only call get_event_constraints() once
2672 * on each event, this avoids the need for reg->alloc.
2674 * Not doing the ER fixup will only result in era->reg being
2675 * wrong, but since we won't actually try and program hardware
2676 * this isn't a problem either.
2678 if (!cpuc->is_fake) {
2679 if (idx != reg->idx)
2680 intel_fixup_er(event, idx);
2683 * x86_schedule_events() can call get_event_constraints()
2684 * multiple times on events in the case of incremental
2685 * scheduling(). reg->alloc ensures we only do the ER
2691 /* lock in msr value */
2692 era->config = reg->config;
2693 era->reg = reg->reg;
2696 atomic_inc(&era->ref);
2699 * need to call x86_get_event_constraint()
2700 * to check if associated event has constraints
2704 idx = intel_alt_er(idx, reg->config);
2705 if (idx != reg->idx) {
2706 raw_spin_unlock_irqrestore(&era->lock, flags);
2710 raw_spin_unlock_irqrestore(&era->lock, flags);
2716 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2717 struct hw_perf_event_extra *reg)
2719 struct er_account *era;
2722 * Only put constraint if extra reg was actually allocated. Also takes
2723 * care of event which do not use an extra shared reg.
2725 * Also, if this is a fake cpuc we shouldn't touch any event state
2726 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2727 * either since it'll be thrown out.
2729 if (!reg->alloc || cpuc->is_fake)
2732 era = &cpuc->shared_regs->regs[reg->idx];
2734 /* one fewer user */
2735 atomic_dec(&era->ref);
2737 /* allocate again next time */
2741 static struct event_constraint *
2742 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2743 struct perf_event *event)
2745 struct event_constraint *c = NULL, *d;
2746 struct hw_perf_event_extra *xreg, *breg;
2748 xreg = &event->hw.extra_reg;
2749 if (xreg->idx != EXTRA_REG_NONE) {
2750 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2751 if (c == &emptyconstraint)
2754 breg = &event->hw.branch_reg;
2755 if (breg->idx != EXTRA_REG_NONE) {
2756 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2757 if (d == &emptyconstraint) {
2758 __intel_shared_reg_put_constraints(cpuc, xreg);
2765 struct event_constraint *
2766 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2767 struct perf_event *event)
2769 struct event_constraint *c;
2771 if (x86_pmu.event_constraints) {
2772 for_each_event_constraint(c, x86_pmu.event_constraints) {
2773 if (constraint_match(c, event->hw.config)) {
2774 event->hw.flags |= c->flags;
2780 return &unconstrained;
2783 static struct event_constraint *
2784 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2785 struct perf_event *event)
2787 struct event_constraint *c;
2789 c = intel_bts_constraints(event);
2793 c = intel_shared_regs_constraints(cpuc, event);
2797 c = intel_pebs_constraints(event);
2801 return x86_get_event_constraints(cpuc, idx, event);
2805 intel_start_scheduling(struct cpu_hw_events *cpuc)
2807 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2808 struct intel_excl_states *xl;
2809 int tid = cpuc->excl_thread_id;
2812 * nothing needed if in group validation mode
2814 if (cpuc->is_fake || !is_ht_workaround_enabled())
2818 * no exclusion needed
2820 if (WARN_ON_ONCE(!excl_cntrs))
2823 xl = &excl_cntrs->states[tid];
2825 xl->sched_started = true;
2827 * lock shared state until we are done scheduling
2828 * in stop_event_scheduling()
2829 * makes scheduling appear as a transaction
2831 raw_spin_lock(&excl_cntrs->lock);
2834 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2836 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2837 struct event_constraint *c = cpuc->event_constraint[idx];
2838 struct intel_excl_states *xl;
2839 int tid = cpuc->excl_thread_id;
2841 if (cpuc->is_fake || !is_ht_workaround_enabled())
2844 if (WARN_ON_ONCE(!excl_cntrs))
2847 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2850 xl = &excl_cntrs->states[tid];
2852 lockdep_assert_held(&excl_cntrs->lock);
2854 if (c->flags & PERF_X86_EVENT_EXCL)
2855 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2857 xl->state[cntr] = INTEL_EXCL_SHARED;
2861 intel_stop_scheduling(struct cpu_hw_events *cpuc)
2863 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2864 struct intel_excl_states *xl;
2865 int tid = cpuc->excl_thread_id;
2868 * nothing needed if in group validation mode
2870 if (cpuc->is_fake || !is_ht_workaround_enabled())
2873 * no exclusion needed
2875 if (WARN_ON_ONCE(!excl_cntrs))
2878 xl = &excl_cntrs->states[tid];
2880 xl->sched_started = false;
2882 * release shared state lock (acquired in intel_start_scheduling())
2884 raw_spin_unlock(&excl_cntrs->lock);
2887 static struct event_constraint *
2888 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
2890 WARN_ON_ONCE(!cpuc->constraint_list);
2892 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2893 struct event_constraint *cx;
2896 * grab pre-allocated constraint entry
2898 cx = &cpuc->constraint_list[idx];
2901 * initialize dynamic constraint
2902 * with static constraint
2907 * mark constraint as dynamic
2909 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2916 static struct event_constraint *
2917 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2918 int idx, struct event_constraint *c)
2920 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2921 struct intel_excl_states *xlo;
2922 int tid = cpuc->excl_thread_id;
2926 * validating a group does not require
2927 * enforcing cross-thread exclusion
2929 if (cpuc->is_fake || !is_ht_workaround_enabled())
2933 * no exclusion needed
2935 if (WARN_ON_ONCE(!excl_cntrs))
2939 * because we modify the constraint, we need
2940 * to make a copy. Static constraints come
2941 * from static const tables.
2943 * only needed when constraint has not yet
2944 * been cloned (marked dynamic)
2946 c = dyn_constraint(cpuc, c, idx);
2949 * From here on, the constraint is dynamic.
2950 * Either it was just allocated above, or it
2951 * was allocated during a earlier invocation
2956 * state of sibling HT
2958 xlo = &excl_cntrs->states[tid ^ 1];
2961 * event requires exclusive counter access
2964 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2965 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2966 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2967 if (!cpuc->n_excl++)
2968 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2972 * Modify static constraint with current dynamic
2975 * EXCLUSIVE: sibling counter measuring exclusive event
2976 * SHARED : sibling counter measuring non-exclusive event
2977 * UNUSED : sibling counter unused
2980 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2982 * exclusive event in sibling counter
2983 * our corresponding counter cannot be used
2984 * regardless of our event
2986 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
2987 __clear_bit(i, c->idxmsk);
2992 * if measuring an exclusive event, sibling
2993 * measuring non-exclusive, then counter cannot
2996 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
2997 __clear_bit(i, c->idxmsk);
3004 * if we return an empty mask, then switch
3005 * back to static empty constraint to avoid
3006 * the cost of freeing later on
3009 c = &emptyconstraint;
3016 static struct event_constraint *
3017 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3018 struct perf_event *event)
3020 struct event_constraint *c1, *c2;
3022 c1 = cpuc->event_constraint[idx];
3026 * - static constraint: no change across incremental scheduling calls
3027 * - dynamic constraint: handled by intel_get_excl_constraints()
3029 c2 = __intel_get_event_constraints(cpuc, idx, event);
3031 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3032 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3033 c1->weight = c2->weight;
3037 if (cpuc->excl_cntrs)
3038 return intel_get_excl_constraints(cpuc, event, idx, c2);
3043 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3044 struct perf_event *event)
3046 struct hw_perf_event *hwc = &event->hw;
3047 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3048 int tid = cpuc->excl_thread_id;
3049 struct intel_excl_states *xl;
3052 * nothing needed if in group validation mode
3057 if (WARN_ON_ONCE(!excl_cntrs))
3060 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3061 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3062 if (!--cpuc->n_excl)
3063 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3067 * If event was actually assigned, then mark the counter state as
3070 if (hwc->idx >= 0) {
3071 xl = &excl_cntrs->states[tid];
3074 * put_constraint may be called from x86_schedule_events()
3075 * which already has the lock held so here make locking
3078 if (!xl->sched_started)
3079 raw_spin_lock(&excl_cntrs->lock);
3081 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3083 if (!xl->sched_started)
3084 raw_spin_unlock(&excl_cntrs->lock);
3089 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3090 struct perf_event *event)
3092 struct hw_perf_event_extra *reg;
3094 reg = &event->hw.extra_reg;
3095 if (reg->idx != EXTRA_REG_NONE)
3096 __intel_shared_reg_put_constraints(cpuc, reg);
3098 reg = &event->hw.branch_reg;
3099 if (reg->idx != EXTRA_REG_NONE)
3100 __intel_shared_reg_put_constraints(cpuc, reg);
3103 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3104 struct perf_event *event)
3106 intel_put_shared_regs_event_constraints(cpuc, event);
3109 * is PMU has exclusive counter restrictions, then
3110 * all events are subject to and must call the
3111 * put_excl_constraints() routine
3113 if (cpuc->excl_cntrs)
3114 intel_put_excl_constraints(cpuc, event);
3117 static void intel_pebs_aliases_core2(struct perf_event *event)
3119 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3121 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3122 * (0x003c) so that we can use it with PEBS.
3124 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3125 * PEBS capable. However we can use INST_RETIRED.ANY_P
3126 * (0x00c0), which is a PEBS capable event, to get the same
3129 * INST_RETIRED.ANY_P counts the number of cycles that retires
3130 * CNTMASK instructions. By setting CNTMASK to a value (16)
3131 * larger than the maximum number of instructions that can be
3132 * retired per cycle (4) and then inverting the condition, we
3133 * count all cycles that retire 16 or less instructions, which
3136 * Thereby we gain a PEBS capable cycle counter.
3138 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3140 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3141 event->hw.config = alt_config;
3145 static void intel_pebs_aliases_snb(struct perf_event *event)
3147 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3149 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3150 * (0x003c) so that we can use it with PEBS.
3152 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3153 * PEBS capable. However we can use UOPS_RETIRED.ALL
3154 * (0x01c2), which is a PEBS capable event, to get the same
3157 * UOPS_RETIRED.ALL counts the number of cycles that retires
3158 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3159 * larger than the maximum number of micro-ops that can be
3160 * retired per cycle (4) and then inverting the condition, we
3161 * count all cycles that retire 16 or less micro-ops, which
3164 * Thereby we gain a PEBS capable cycle counter.
3166 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3168 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3169 event->hw.config = alt_config;
3173 static void intel_pebs_aliases_precdist(struct perf_event *event)
3175 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3177 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3178 * (0x003c) so that we can use it with PEBS.
3180 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3181 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3182 * (0x01c0), which is a PEBS capable event, to get the same
3185 * The PREC_DIST event has special support to minimize sample
3186 * shadowing effects. One drawback is that it can be
3187 * only programmed on counter 1, but that seems like an
3188 * acceptable trade off.
3190 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3192 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3193 event->hw.config = alt_config;
3197 static void intel_pebs_aliases_ivb(struct perf_event *event)
3199 if (event->attr.precise_ip < 3)
3200 return intel_pebs_aliases_snb(event);
3201 return intel_pebs_aliases_precdist(event);
3204 static void intel_pebs_aliases_skl(struct perf_event *event)
3206 if (event->attr.precise_ip < 3)
3207 return intel_pebs_aliases_core2(event);
3208 return intel_pebs_aliases_precdist(event);
3211 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3213 unsigned long flags = x86_pmu.large_pebs_flags;
3215 if (event->attr.use_clockid)
3216 flags &= ~PERF_SAMPLE_TIME;
3217 if (!event->attr.exclude_kernel)
3218 flags &= ~PERF_SAMPLE_REGS_USER;
3219 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3220 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3224 static int intel_pmu_bts_config(struct perf_event *event)
3226 struct perf_event_attr *attr = &event->attr;
3228 if (unlikely(intel_pmu_has_bts(event))) {
3229 /* BTS is not supported by this architecture. */
3230 if (!x86_pmu.bts_active)
3233 /* BTS is currently only allowed for user-mode. */
3234 if (!attr->exclude_kernel)
3237 /* BTS is not allowed for precise events. */
3238 if (attr->precise_ip)
3241 /* disallow bts if conflicting events are present */
3242 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3245 event->destroy = hw_perf_lbr_event_destroy;
3251 static int core_pmu_hw_config(struct perf_event *event)
3253 int ret = x86_pmu_hw_config(event);
3258 return intel_pmu_bts_config(event);
3261 static int intel_pmu_hw_config(struct perf_event *event)
3263 int ret = x86_pmu_hw_config(event);
3268 ret = intel_pmu_bts_config(event);
3272 if (event->attr.precise_ip) {
3273 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3274 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3275 if (!(event->attr.sample_type &
3276 ~intel_pmu_large_pebs_flags(event)))
3277 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3279 if (x86_pmu.pebs_aliases)
3280 x86_pmu.pebs_aliases(event);
3282 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3283 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3286 if (needs_branch_stack(event)) {
3287 ret = intel_pmu_setup_lbr_filter(event);
3292 * BTS is set up earlier in this path, so don't account twice
3294 if (!unlikely(intel_pmu_has_bts(event))) {
3295 /* disallow lbr if conflicting events are present */
3296 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3299 event->destroy = hw_perf_lbr_event_destroy;
3303 if (event->attr.type != PERF_TYPE_RAW)
3306 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3309 if (x86_pmu.version < 3)
3312 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3315 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3320 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3322 if (x86_pmu.guest_get_msrs)
3323 return x86_pmu.guest_get_msrs(nr);
3327 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3329 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3331 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3332 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3334 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3335 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3336 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3337 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3338 arr[0].guest &= ~cpuc->pebs_enabled;
3340 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3343 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3345 * If PMU counter has PEBS enabled it is not enough to
3346 * disable counter on a guest entry since PEBS memory
3347 * write can overshoot guest entry and corrupt guest
3348 * memory. Disabling PEBS solves the problem.
3350 * Don't do this if the CPU already enforces it.
3352 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3353 arr[1].host = cpuc->pebs_enabled;
3361 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3363 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3364 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3367 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3368 struct perf_event *event = cpuc->events[idx];
3370 arr[idx].msr = x86_pmu_config_addr(idx);
3371 arr[idx].host = arr[idx].guest = 0;
3373 if (!test_bit(idx, cpuc->active_mask))
3376 arr[idx].host = arr[idx].guest =
3377 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3379 if (event->attr.exclude_host)
3380 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3381 else if (event->attr.exclude_guest)
3382 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3385 *nr = x86_pmu.num_counters;
3389 static void core_pmu_enable_event(struct perf_event *event)
3391 if (!event->attr.exclude_host)
3392 x86_pmu_enable_event(event);
3395 static void core_pmu_enable_all(int added)
3397 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3400 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3401 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3403 if (!test_bit(idx, cpuc->active_mask) ||
3404 cpuc->events[idx]->attr.exclude_host)
3407 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3411 static int hsw_hw_config(struct perf_event *event)
3413 int ret = intel_pmu_hw_config(event);
3417 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3419 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3422 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3423 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3426 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3427 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3428 event->attr.precise_ip > 0))
3431 if (event_is_checkpointed(event)) {
3433 * Sampling of checkpointed events can cause situations where
3434 * the CPU constantly aborts because of a overflow, which is
3435 * then checkpointed back and ignored. Forbid checkpointing
3438 * But still allow a long sampling period, so that perf stat
3441 if (event->attr.sample_period > 0 &&
3442 event->attr.sample_period < 0x7fffffff)
3448 static struct event_constraint counter0_constraint =
3449 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3451 static struct event_constraint counter2_constraint =
3452 EVENT_CONSTRAINT(0, 0x4, 0);
3454 static struct event_constraint fixed0_constraint =
3455 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3457 static struct event_constraint fixed0_counter0_constraint =
3458 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3460 static struct event_constraint *
3461 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3462 struct perf_event *event)
3464 struct event_constraint *c;
3466 c = intel_get_event_constraints(cpuc, idx, event);
3468 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3469 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3470 if (c->idxmsk64 & (1U << 2))
3471 return &counter2_constraint;
3472 return &emptyconstraint;
3478 static struct event_constraint *
3479 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3480 struct perf_event *event)
3483 * Fixed counter 0 has less skid.
3484 * Force instruction:ppp in Fixed counter 0
3486 if ((event->attr.precise_ip == 3) &&
3487 constraint_match(&fixed0_constraint, event->hw.config))
3488 return &fixed0_constraint;
3490 return hsw_get_event_constraints(cpuc, idx, event);
3493 static struct event_constraint *
3494 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3495 struct perf_event *event)
3497 struct event_constraint *c;
3499 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3500 if (event->attr.precise_ip == 3)
3501 return &counter0_constraint;
3503 c = intel_get_event_constraints(cpuc, idx, event);
3508 static struct event_constraint *
3509 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3510 struct perf_event *event)
3512 struct event_constraint *c;
3515 * :ppp means to do reduced skid PEBS,
3516 * which is available on PMC0 and fixed counter 0.
3518 if (event->attr.precise_ip == 3) {
3519 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
3520 if (constraint_match(&fixed0_constraint, event->hw.config))
3521 return &fixed0_counter0_constraint;
3523 return &counter0_constraint;
3526 c = intel_get_event_constraints(cpuc, idx, event);
3531 static bool allow_tsx_force_abort = true;
3533 static struct event_constraint *
3534 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3535 struct perf_event *event)
3537 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3540 * Without TFA we must not use PMC3.
3542 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
3543 c = dyn_constraint(cpuc, c, idx);
3544 c->idxmsk64 &= ~(1ULL << 3);
3554 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3555 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3556 * the two to enforce a minimum period of 128 (the smallest value that has bits
3557 * 0-5 cleared and >= 100).
3559 * Because of how the code in x86_perf_event_set_period() works, the truncation
3560 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3561 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3563 * Therefore the effective (average) period matches the requested period,
3564 * despite coarser hardware granularity.
3566 static u64 bdw_limit_period(struct perf_event *event, u64 left)
3568 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3569 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3577 PMU_FORMAT_ATTR(event, "config:0-7" );
3578 PMU_FORMAT_ATTR(umask, "config:8-15" );
3579 PMU_FORMAT_ATTR(edge, "config:18" );
3580 PMU_FORMAT_ATTR(pc, "config:19" );
3581 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
3582 PMU_FORMAT_ATTR(inv, "config:23" );
3583 PMU_FORMAT_ATTR(cmask, "config:24-31" );
3584 PMU_FORMAT_ATTR(in_tx, "config:32");
3585 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3587 static struct attribute *intel_arch_formats_attr[] = {
3588 &format_attr_event.attr,
3589 &format_attr_umask.attr,
3590 &format_attr_edge.attr,
3591 &format_attr_pc.attr,
3592 &format_attr_inv.attr,
3593 &format_attr_cmask.attr,
3597 ssize_t intel_event_sysfs_show(char *page, u64 config)
3599 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3601 return x86_event_sysfs_show(page, config, event);
3604 static struct intel_shared_regs *allocate_shared_regs(int cpu)
3606 struct intel_shared_regs *regs;
3609 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3610 GFP_KERNEL, cpu_to_node(cpu));
3613 * initialize the locks to keep lockdep happy
3615 for (i = 0; i < EXTRA_REG_MAX; i++)
3616 raw_spin_lock_init(®s->regs[i].lock);
3623 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3625 struct intel_excl_cntrs *c;
3627 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3628 GFP_KERNEL, cpu_to_node(cpu));
3630 raw_spin_lock_init(&c->lock);
3637 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3639 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
3641 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3642 cpuc->shared_regs = allocate_shared_regs(cpu);
3643 if (!cpuc->shared_regs)
3647 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3648 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3650 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3651 if (!cpuc->constraint_list)
3652 goto err_shared_regs;
3655 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3656 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3657 if (!cpuc->excl_cntrs)
3658 goto err_constraint_list;
3660 cpuc->excl_thread_id = 0;
3665 err_constraint_list:
3666 kfree(cpuc->constraint_list);
3667 cpuc->constraint_list = NULL;
3670 kfree(cpuc->shared_regs);
3671 cpuc->shared_regs = NULL;
3677 static int intel_pmu_cpu_prepare(int cpu)
3679 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3682 static void flip_smm_bit(void *data)
3684 unsigned long set = *(unsigned long *)data;
3687 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3688 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3690 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3691 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3695 static void intel_pmu_cpu_starting(int cpu)
3697 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3698 int core_id = topology_core_id(cpu);
3701 init_debug_store_on_cpu(cpu);
3703 * Deal with CPUs that don't clear their LBRs on power-up.
3705 intel_pmu_lbr_reset();
3707 cpuc->lbr_sel = NULL;
3709 if (x86_pmu.flags & PMU_FL_TFA) {
3710 WARN_ON_ONCE(cpuc->tfa_shadow);
3711 cpuc->tfa_shadow = ~0ULL;
3712 intel_set_tfa(cpuc, false);
3715 if (x86_pmu.version > 1)
3716 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3718 if (x86_pmu.counter_freezing)
3719 enable_counter_freeze();
3721 if (!cpuc->shared_regs)
3724 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3725 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3726 struct intel_shared_regs *pc;
3728 pc = per_cpu(cpu_hw_events, i).shared_regs;
3729 if (pc && pc->core_id == core_id) {
3730 cpuc->kfree_on_online[0] = cpuc->shared_regs;
3731 cpuc->shared_regs = pc;
3735 cpuc->shared_regs->core_id = core_id;
3736 cpuc->shared_regs->refcnt++;
3739 if (x86_pmu.lbr_sel_map)
3740 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3742 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3743 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3744 struct cpu_hw_events *sibling;
3745 struct intel_excl_cntrs *c;
3747 sibling = &per_cpu(cpu_hw_events, i);
3748 c = sibling->excl_cntrs;
3749 if (c && c->core_id == core_id) {
3750 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3751 cpuc->excl_cntrs = c;
3752 if (!sibling->excl_thread_id)
3753 cpuc->excl_thread_id = 1;
3757 cpuc->excl_cntrs->core_id = core_id;
3758 cpuc->excl_cntrs->refcnt++;
3762 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
3764 struct intel_excl_cntrs *c;
3766 c = cpuc->excl_cntrs;
3768 if (c->core_id == -1 || --c->refcnt == 0)
3770 cpuc->excl_cntrs = NULL;
3773 kfree(cpuc->constraint_list);
3774 cpuc->constraint_list = NULL;
3777 static void intel_pmu_cpu_dying(int cpu)
3779 fini_debug_store_on_cpu(cpu);
3781 if (x86_pmu.counter_freezing)
3782 disable_counter_freeze();
3785 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
3787 struct intel_shared_regs *pc;
3789 pc = cpuc->shared_regs;
3791 if (pc->core_id == -1 || --pc->refcnt == 0)
3793 cpuc->shared_regs = NULL;
3796 free_excl_cntrs(cpuc);
3799 static void intel_pmu_cpu_dead(int cpu)
3801 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
3804 static void intel_pmu_sched_task(struct perf_event_context *ctx,
3807 intel_pmu_pebs_sched_task(ctx, sched_in);
3808 intel_pmu_lbr_sched_task(ctx, sched_in);
3811 static int intel_pmu_check_period(struct perf_event *event, u64 value)
3813 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3816 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3818 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3820 PMU_FORMAT_ATTR(frontend, "config1:0-23");
3822 static struct attribute *intel_arch3_formats_attr[] = {
3823 &format_attr_event.attr,
3824 &format_attr_umask.attr,
3825 &format_attr_edge.attr,
3826 &format_attr_pc.attr,
3827 &format_attr_any.attr,
3828 &format_attr_inv.attr,
3829 &format_attr_cmask.attr,
3833 static struct attribute *hsw_format_attr[] = {
3834 &format_attr_in_tx.attr,
3835 &format_attr_in_tx_cp.attr,
3836 &format_attr_offcore_rsp.attr,
3837 &format_attr_ldlat.attr,
3841 static struct attribute *nhm_format_attr[] = {
3842 &format_attr_offcore_rsp.attr,
3843 &format_attr_ldlat.attr,
3847 static struct attribute *slm_format_attr[] = {
3848 &format_attr_offcore_rsp.attr,
3852 static struct attribute *skl_format_attr[] = {
3853 &format_attr_frontend.attr,
3857 static __initconst const struct x86_pmu core_pmu = {
3859 .handle_irq = x86_pmu_handle_irq,
3860 .disable_all = x86_pmu_disable_all,
3861 .enable_all = core_pmu_enable_all,
3862 .enable = core_pmu_enable_event,
3863 .disable = x86_pmu_disable_event,
3864 .hw_config = core_pmu_hw_config,
3865 .schedule_events = x86_schedule_events,
3866 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3867 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3868 .event_map = intel_pmu_event_map,
3869 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3871 .large_pebs_flags = LARGE_PEBS_FLAGS,
3874 * Intel PMCs cannot be accessed sanely above 32-bit width,
3875 * so we install an artificial 1<<31 period regardless of
3876 * the generic event period:
3878 .max_period = (1ULL<<31) - 1,
3879 .get_event_constraints = intel_get_event_constraints,
3880 .put_event_constraints = intel_put_event_constraints,
3881 .event_constraints = intel_core_event_constraints,
3882 .guest_get_msrs = core_guest_get_msrs,
3883 .format_attrs = intel_arch_formats_attr,
3884 .events_sysfs_show = intel_event_sysfs_show,
3887 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3888 * together with PMU version 1 and thus be using core_pmu with
3889 * shared_regs. We need following callbacks here to allocate
3892 .cpu_prepare = intel_pmu_cpu_prepare,
3893 .cpu_starting = intel_pmu_cpu_starting,
3894 .cpu_dying = intel_pmu_cpu_dying,
3895 .cpu_dead = intel_pmu_cpu_dead,
3897 .check_period = intel_pmu_check_period,
3900 static struct attribute *intel_pmu_attrs[];
3902 static __initconst const struct x86_pmu intel_pmu = {
3904 .handle_irq = intel_pmu_handle_irq,
3905 .disable_all = intel_pmu_disable_all,
3906 .enable_all = intel_pmu_enable_all,
3907 .enable = intel_pmu_enable_event,
3908 .disable = intel_pmu_disable_event,
3909 .add = intel_pmu_add_event,
3910 .del = intel_pmu_del_event,
3911 .read = intel_pmu_read_event,
3912 .hw_config = intel_pmu_hw_config,
3913 .schedule_events = x86_schedule_events,
3914 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3915 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3916 .event_map = intel_pmu_event_map,
3917 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3919 .large_pebs_flags = LARGE_PEBS_FLAGS,
3921 * Intel PMCs cannot be accessed sanely above 32 bit width,
3922 * so we install an artificial 1<<31 period regardless of
3923 * the generic event period:
3925 .max_period = (1ULL << 31) - 1,
3926 .get_event_constraints = intel_get_event_constraints,
3927 .put_event_constraints = intel_put_event_constraints,
3928 .pebs_aliases = intel_pebs_aliases_core2,
3930 .format_attrs = intel_arch3_formats_attr,
3931 .events_sysfs_show = intel_event_sysfs_show,
3933 .attrs = intel_pmu_attrs,
3935 .cpu_prepare = intel_pmu_cpu_prepare,
3936 .cpu_starting = intel_pmu_cpu_starting,
3937 .cpu_dying = intel_pmu_cpu_dying,
3938 .cpu_dead = intel_pmu_cpu_dead,
3940 .guest_get_msrs = intel_guest_get_msrs,
3941 .sched_task = intel_pmu_sched_task,
3943 .check_period = intel_pmu_check_period,
3946 static __init void intel_clovertown_quirk(void)
3949 * PEBS is unreliable due to:
3951 * AJ67 - PEBS may experience CPL leaks
3952 * AJ68 - PEBS PMI may be delayed by one event
3953 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3954 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3956 * AJ67 could be worked around by restricting the OS/USR flags.
3957 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3959 * AJ106 could possibly be worked around by not allowing LBR
3960 * usage from PEBS, including the fixup.
3961 * AJ68 could possibly be worked around by always programming
3962 * a pebs_event_reset[0] value and coping with the lost events.
3964 * But taken together it might just make sense to not enable PEBS on
3967 pr_warn("PEBS disabled due to CPU errata\n");
3969 x86_pmu.pebs_constraints = NULL;
3972 static const struct x86_cpu_desc isolation_ucodes[] = {
3973 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f),
3974 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e),
3975 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015),
3976 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
3977 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
3978 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023),
3979 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014),
3980 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010),
3981 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009),
3982 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009),
3983 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002),
3984 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
3985 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
3986 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
3987 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c),
3988 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c),
3989 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e),
3990 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e),
3991 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e),
3992 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e),
3993 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e),
3994 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e),
3995 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e),
3996 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e),
3997 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e),
4001 static void intel_check_pebs_isolation(void)
4003 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4006 static __init void intel_pebs_isolation_quirk(void)
4008 WARN_ON_ONCE(x86_pmu.check_microcode);
4009 x86_pmu.check_microcode = intel_check_pebs_isolation;
4010 intel_check_pebs_isolation();
4013 static const struct x86_cpu_desc pebs_ucodes[] = {
4014 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
4015 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
4016 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
4020 static bool intel_snb_pebs_broken(void)
4022 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4025 static void intel_snb_check_microcode(void)
4027 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4031 * Serialized by the microcode lock..
4033 if (x86_pmu.pebs_broken) {
4034 pr_info("PEBS enabled due to microcode update\n");
4035 x86_pmu.pebs_broken = 0;
4037 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4038 x86_pmu.pebs_broken = 1;
4042 static bool is_lbr_from(unsigned long msr)
4044 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4046 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4050 * Under certain circumstances, access certain MSR may cause #GP.
4051 * The function tests if the input MSR can be safely accessed.
4053 static bool check_msr(unsigned long msr, u64 mask)
4055 u64 val_old, val_new, val_tmp;
4058 * Read the current value, change it and read it back to see if it
4059 * matches, this is needed to detect certain hardware emulators
4060 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4062 if (rdmsrl_safe(msr, &val_old))
4066 * Only change the bits which can be updated by wrmsrl.
4068 val_tmp = val_old ^ mask;
4070 if (is_lbr_from(msr))
4071 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4073 if (wrmsrl_safe(msr, val_tmp) ||
4074 rdmsrl_safe(msr, &val_new))
4078 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
4079 * should equal rdmsrl()'s even with the quirk.
4081 if (val_new != val_tmp)
4084 if (is_lbr_from(msr))
4085 val_old = lbr_from_signext_quirk_wr(val_old);
4087 /* Here it's sure that the MSR can be safely accessed.
4088 * Restore the old value and return.
4090 wrmsrl(msr, val_old);
4095 static __init void intel_sandybridge_quirk(void)
4097 x86_pmu.check_microcode = intel_snb_check_microcode;
4099 intel_snb_check_microcode();
4103 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
4104 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
4105 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
4106 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
4107 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
4108 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
4109 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
4110 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
4113 static __init void intel_arch_events_quirk(void)
4117 /* disable event that reported as not presend by cpuid */
4118 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
4119 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
4120 pr_warn("CPUID marked event: \'%s\' unavailable\n",
4121 intel_arch_events_map[bit].name);
4125 static __init void intel_nehalem_quirk(void)
4127 union cpuid10_ebx ebx;
4129 ebx.full = x86_pmu.events_maskl;
4130 if (ebx.split.no_branch_misses_retired) {
4132 * Erratum AAJ80 detected, we work it around by using
4133 * the BR_MISP_EXEC.ANY event. This will over-count
4134 * branch-misses, but it's still much better than the
4135 * architectural event which is often completely bogus:
4137 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4138 ebx.split.no_branch_misses_retired = 0;
4139 x86_pmu.events_maskl = ebx.full;
4140 pr_info("CPU erratum AAJ80 worked around\n");
4144 static const struct x86_cpu_desc counter_freezing_ucodes[] = {
4145 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
4146 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
4147 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
4148 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028),
4149 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
4150 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
4154 static bool intel_counter_freezing_broken(void)
4156 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
4159 static __init void intel_counter_freezing_quirk(void)
4161 /* Check if it's already disabled */
4162 if (disable_counter_freezing)
4166 * If the system starts with the wrong ucode, leave the
4167 * counter-freezing feature permanently disabled.
4169 if (intel_counter_freezing_broken()) {
4170 pr_info("PMU counter freezing disabled due to CPU errata,"
4171 "please upgrade microcode\n");
4172 x86_pmu.counter_freezing = false;
4173 x86_pmu.handle_irq = intel_pmu_handle_irq;
4178 * enable software workaround for errata:
4183 * Only needed when HT is enabled. However detecting
4184 * if HT is enabled is difficult (model specific). So instead,
4185 * we enable the workaround in the early boot, and verify if
4186 * it is needed in a later initcall phase once we have valid
4187 * topology information to check if HT is actually enabled
4189 static __init void intel_ht_bug(void)
4191 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4193 x86_pmu.start_scheduling = intel_start_scheduling;
4194 x86_pmu.commit_scheduling = intel_commit_scheduling;
4195 x86_pmu.stop_scheduling = intel_stop_scheduling;
4198 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
4199 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
4201 /* Haswell special events */
4202 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
4203 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
4204 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
4205 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
4206 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
4207 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
4208 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4209 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4210 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4211 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4212 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4213 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4215 static struct attribute *hsw_events_attrs[] = {
4216 EVENT_PTR(td_slots_issued),
4217 EVENT_PTR(td_slots_retired),
4218 EVENT_PTR(td_fetch_bubbles),
4219 EVENT_PTR(td_total_slots),
4220 EVENT_PTR(td_total_slots_scale),
4221 EVENT_PTR(td_recovery_bubbles),
4222 EVENT_PTR(td_recovery_bubbles_scale),
4226 static struct attribute *hsw_mem_events_attrs[] = {
4227 EVENT_PTR(mem_ld_hsw),
4228 EVENT_PTR(mem_st_hsw),
4232 static struct attribute *hsw_tsx_events_attrs[] = {
4233 EVENT_PTR(tx_start),
4234 EVENT_PTR(tx_commit),
4235 EVENT_PTR(tx_abort),
4236 EVENT_PTR(tx_capacity),
4237 EVENT_PTR(tx_conflict),
4238 EVENT_PTR(el_start),
4239 EVENT_PTR(el_commit),
4240 EVENT_PTR(el_abort),
4241 EVENT_PTR(el_capacity),
4242 EVENT_PTR(el_conflict),
4243 EVENT_PTR(cycles_t),
4244 EVENT_PTR(cycles_ct),
4248 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
4249 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
4250 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
4251 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
4253 static struct attribute *icl_events_attrs[] = {
4254 EVENT_PTR(mem_ld_hsw),
4255 EVENT_PTR(mem_st_hsw),
4259 static struct attribute *icl_tsx_events_attrs[] = {
4260 EVENT_PTR(tx_start),
4261 EVENT_PTR(tx_abort),
4262 EVENT_PTR(tx_commit),
4263 EVENT_PTR(tx_capacity_read),
4264 EVENT_PTR(tx_capacity_write),
4265 EVENT_PTR(tx_conflict),
4266 EVENT_PTR(el_start),
4267 EVENT_PTR(el_abort),
4268 EVENT_PTR(el_commit),
4269 EVENT_PTR(el_capacity_read),
4270 EVENT_PTR(el_capacity_write),
4271 EVENT_PTR(el_conflict),
4272 EVENT_PTR(cycles_t),
4273 EVENT_PTR(cycles_ct),
4277 static __init struct attribute **get_icl_events_attrs(void)
4279 return boot_cpu_has(X86_FEATURE_RTM) ?
4280 merge_attr(icl_events_attrs, icl_tsx_events_attrs) :
4284 static ssize_t freeze_on_smi_show(struct device *cdev,
4285 struct device_attribute *attr,
4288 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4291 static DEFINE_MUTEX(freeze_on_smi_mutex);
4293 static ssize_t freeze_on_smi_store(struct device *cdev,
4294 struct device_attribute *attr,
4295 const char *buf, size_t count)
4300 ret = kstrtoul(buf, 0, &val);
4307 mutex_lock(&freeze_on_smi_mutex);
4309 if (x86_pmu.attr_freeze_on_smi == val)
4312 x86_pmu.attr_freeze_on_smi = val;
4315 on_each_cpu(flip_smm_bit, &val, 1);
4318 mutex_unlock(&freeze_on_smi_mutex);
4323 static void update_tfa_sched(void *ignored)
4325 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4328 * check if PMC3 is used
4329 * and if so force schedule out for all event types all contexts
4331 if (test_bit(3, cpuc->active_mask))
4332 perf_pmu_resched(x86_get_pmu());
4335 static ssize_t show_sysctl_tfa(struct device *cdev,
4336 struct device_attribute *attr,
4339 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
4342 static ssize_t set_sysctl_tfa(struct device *cdev,
4343 struct device_attribute *attr,
4344 const char *buf, size_t count)
4349 ret = kstrtobool(buf, &val);
4354 if (val == allow_tsx_force_abort)
4357 allow_tsx_force_abort = val;
4360 on_each_cpu(update_tfa_sched, NULL, 1);
4367 static DEVICE_ATTR_RW(freeze_on_smi);
4369 static ssize_t branches_show(struct device *cdev,
4370 struct device_attribute *attr,
4373 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4376 static DEVICE_ATTR_RO(branches);
4378 static struct attribute *lbr_attrs[] = {
4379 &dev_attr_branches.attr,
4383 static char pmu_name_str[30];
4385 static ssize_t pmu_name_show(struct device *cdev,
4386 struct device_attribute *attr,
4389 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4392 static DEVICE_ATTR_RO(pmu_name);
4394 static struct attribute *intel_pmu_caps_attrs[] = {
4395 &dev_attr_pmu_name.attr,
4399 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
4403 static struct attribute *intel_pmu_attrs[] = {
4404 &dev_attr_freeze_on_smi.attr,
4405 NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
4409 static __init struct attribute **
4410 get_events_attrs(struct attribute **base,
4411 struct attribute **mem,
4412 struct attribute **tsx)
4414 struct attribute **attrs = base;
4415 struct attribute **old;
4417 if (mem && x86_pmu.pebs)
4418 attrs = merge_attr(attrs, mem);
4420 if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
4422 attrs = merge_attr(attrs, tsx);
4430 __init int intel_pmu_init(void)
4432 struct attribute **extra_attr = NULL;
4433 struct attribute **mem_attr = NULL;
4434 struct attribute **tsx_attr = NULL;
4435 struct attribute **to_free = NULL;
4436 union cpuid10_edx edx;
4437 union cpuid10_eax eax;
4438 union cpuid10_ebx ebx;
4439 struct event_constraint *c;
4440 unsigned int unused;
4441 struct extra_reg *er;
4445 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4446 switch (boot_cpu_data.x86) {
4448 return p6_pmu_init();
4450 return knc_pmu_init();
4452 return p4_pmu_init();
4458 * Check whether the Architectural PerfMon supports
4459 * Branch Misses Retired hw_event or not.
4461 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4462 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4465 version = eax.split.version_id;
4469 x86_pmu = intel_pmu;
4471 x86_pmu.version = version;
4472 x86_pmu.num_counters = eax.split.num_counters;
4473 x86_pmu.cntval_bits = eax.split.bit_width;
4474 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4476 x86_pmu.events_maskl = ebx.full;
4477 x86_pmu.events_mask_len = eax.split.mask_length;
4479 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4482 * Quirk: v2 perfmon does not report fixed-purpose events, so
4483 * assume at least 3 events, when not running in a hypervisor:
4486 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4488 x86_pmu.num_counters_fixed =
4489 max((int)edx.split.num_counters_fixed, assume);
4493 x86_pmu.counter_freezing = !disable_counter_freezing;
4495 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4498 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4499 x86_pmu.intel_cap.capabilities = capabilities;
4504 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4507 * Install the hw-cache-events table:
4509 switch (boot_cpu_data.x86_model) {
4510 case INTEL_FAM6_CORE_YONAH:
4511 pr_cont("Core events, ");
4515 case INTEL_FAM6_CORE2_MEROM:
4516 x86_add_quirk(intel_clovertown_quirk);
4519 case INTEL_FAM6_CORE2_MEROM_L:
4520 case INTEL_FAM6_CORE2_PENRYN:
4521 case INTEL_FAM6_CORE2_DUNNINGTON:
4522 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4523 sizeof(hw_cache_event_ids));
4525 intel_pmu_lbr_init_core();
4527 x86_pmu.event_constraints = intel_core2_event_constraints;
4528 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4529 pr_cont("Core2 events, ");
4533 case INTEL_FAM6_NEHALEM:
4534 case INTEL_FAM6_NEHALEM_EP:
4535 case INTEL_FAM6_NEHALEM_EX:
4536 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4537 sizeof(hw_cache_event_ids));
4538 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4539 sizeof(hw_cache_extra_regs));
4541 intel_pmu_lbr_init_nhm();
4543 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4544 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4545 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4546 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4548 mem_attr = nhm_mem_events_attrs;
4550 /* UOPS_ISSUED.STALLED_CYCLES */
4551 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4552 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4553 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4554 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4555 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4557 intel_pmu_pebs_data_source_nhm();
4558 x86_add_quirk(intel_nehalem_quirk);
4559 x86_pmu.pebs_no_tlb = 1;
4560 extra_attr = nhm_format_attr;
4562 pr_cont("Nehalem events, ");
4566 case INTEL_FAM6_ATOM_BONNELL:
4567 case INTEL_FAM6_ATOM_BONNELL_MID:
4568 case INTEL_FAM6_ATOM_SALTWELL:
4569 case INTEL_FAM6_ATOM_SALTWELL_MID:
4570 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4571 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4572 sizeof(hw_cache_event_ids));
4574 intel_pmu_lbr_init_atom();
4576 x86_pmu.event_constraints = intel_gen_event_constraints;
4577 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4578 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4579 pr_cont("Atom events, ");
4583 case INTEL_FAM6_ATOM_SILVERMONT:
4584 case INTEL_FAM6_ATOM_SILVERMONT_X:
4585 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4586 case INTEL_FAM6_ATOM_AIRMONT:
4587 case INTEL_FAM6_ATOM_AIRMONT_MID:
4588 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4589 sizeof(hw_cache_event_ids));
4590 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4591 sizeof(hw_cache_extra_regs));
4593 intel_pmu_lbr_init_slm();
4595 x86_pmu.event_constraints = intel_slm_event_constraints;
4596 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4597 x86_pmu.extra_regs = intel_slm_extra_regs;
4598 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4599 x86_pmu.cpu_events = slm_events_attrs;
4600 extra_attr = slm_format_attr;
4601 pr_cont("Silvermont events, ");
4602 name = "silvermont";
4605 case INTEL_FAM6_ATOM_GOLDMONT:
4606 case INTEL_FAM6_ATOM_GOLDMONT_X:
4607 x86_add_quirk(intel_counter_freezing_quirk);
4608 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4609 sizeof(hw_cache_event_ids));
4610 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4611 sizeof(hw_cache_extra_regs));
4613 intel_pmu_lbr_init_skl();
4615 x86_pmu.event_constraints = intel_slm_event_constraints;
4616 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4617 x86_pmu.extra_regs = intel_glm_extra_regs;
4619 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4620 * for precise cycles.
4621 * :pp is identical to :ppp
4623 x86_pmu.pebs_aliases = NULL;
4624 x86_pmu.pebs_prec_dist = true;
4625 x86_pmu.lbr_pt_coexist = true;
4626 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4627 x86_pmu.cpu_events = glm_events_attrs;
4628 extra_attr = slm_format_attr;
4629 pr_cont("Goldmont events, ");
4633 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4634 x86_add_quirk(intel_counter_freezing_quirk);
4635 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4636 sizeof(hw_cache_event_ids));
4637 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4638 sizeof(hw_cache_extra_regs));
4640 intel_pmu_lbr_init_skl();
4642 x86_pmu.event_constraints = intel_slm_event_constraints;
4643 x86_pmu.extra_regs = intel_glm_extra_regs;
4645 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4646 * for precise cycles.
4648 x86_pmu.pebs_aliases = NULL;
4649 x86_pmu.pebs_prec_dist = true;
4650 x86_pmu.lbr_pt_coexist = true;
4651 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4652 x86_pmu.flags |= PMU_FL_PEBS_ALL;
4653 x86_pmu.get_event_constraints = glp_get_event_constraints;
4654 x86_pmu.cpu_events = glm_events_attrs;
4655 /* Goldmont Plus has 4-wide pipeline */
4656 event_attr_td_total_slots_scale_glm.event_str = "4";
4657 extra_attr = slm_format_attr;
4658 pr_cont("Goldmont plus events, ");
4659 name = "goldmont_plus";
4662 case INTEL_FAM6_ATOM_TREMONT_X:
4663 x86_pmu.late_ack = true;
4664 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4665 sizeof(hw_cache_event_ids));
4666 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
4667 sizeof(hw_cache_extra_regs));
4668 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
4670 intel_pmu_lbr_init_skl();
4672 x86_pmu.event_constraints = intel_slm_event_constraints;
4673 x86_pmu.extra_regs = intel_tnt_extra_regs;
4675 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4676 * for precise cycles.
4678 x86_pmu.pebs_aliases = NULL;
4679 x86_pmu.pebs_prec_dist = true;
4680 x86_pmu.lbr_pt_coexist = true;
4681 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4682 x86_pmu.get_event_constraints = tnt_get_event_constraints;
4683 extra_attr = slm_format_attr;
4684 pr_cont("Tremont events, ");
4688 case INTEL_FAM6_WESTMERE:
4689 case INTEL_FAM6_WESTMERE_EP:
4690 case INTEL_FAM6_WESTMERE_EX:
4691 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4692 sizeof(hw_cache_event_ids));
4693 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4694 sizeof(hw_cache_extra_regs));
4696 intel_pmu_lbr_init_nhm();
4698 x86_pmu.event_constraints = intel_westmere_event_constraints;
4699 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4700 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4701 x86_pmu.extra_regs = intel_westmere_extra_regs;
4702 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4704 mem_attr = nhm_mem_events_attrs;
4706 /* UOPS_ISSUED.STALLED_CYCLES */
4707 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4708 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4709 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4710 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4711 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4713 intel_pmu_pebs_data_source_nhm();
4714 extra_attr = nhm_format_attr;
4715 pr_cont("Westmere events, ");
4719 case INTEL_FAM6_SANDYBRIDGE:
4720 case INTEL_FAM6_SANDYBRIDGE_X:
4721 x86_add_quirk(intel_sandybridge_quirk);
4722 x86_add_quirk(intel_ht_bug);
4723 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4724 sizeof(hw_cache_event_ids));
4725 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4726 sizeof(hw_cache_extra_regs));
4728 intel_pmu_lbr_init_snb();
4730 x86_pmu.event_constraints = intel_snb_event_constraints;
4731 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4732 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4733 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4734 x86_pmu.extra_regs = intel_snbep_extra_regs;
4736 x86_pmu.extra_regs = intel_snb_extra_regs;
4739 /* all extra regs are per-cpu when HT is on */
4740 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4741 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4743 x86_pmu.cpu_events = snb_events_attrs;
4744 mem_attr = snb_mem_events_attrs;
4746 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4747 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4748 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4749 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4750 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4751 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4753 extra_attr = nhm_format_attr;
4755 pr_cont("SandyBridge events, ");
4756 name = "sandybridge";
4759 case INTEL_FAM6_IVYBRIDGE:
4760 case INTEL_FAM6_IVYBRIDGE_X:
4761 x86_add_quirk(intel_ht_bug);
4762 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4763 sizeof(hw_cache_event_ids));
4764 /* dTLB-load-misses on IVB is different than SNB */
4765 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4767 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4768 sizeof(hw_cache_extra_regs));
4770 intel_pmu_lbr_init_snb();
4772 x86_pmu.event_constraints = intel_ivb_event_constraints;
4773 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4774 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4775 x86_pmu.pebs_prec_dist = true;
4776 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4777 x86_pmu.extra_regs = intel_snbep_extra_regs;
4779 x86_pmu.extra_regs = intel_snb_extra_regs;
4780 /* all extra regs are per-cpu when HT is on */
4781 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4782 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4784 x86_pmu.cpu_events = snb_events_attrs;
4785 mem_attr = snb_mem_events_attrs;
4787 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4788 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4789 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4791 extra_attr = nhm_format_attr;
4793 pr_cont("IvyBridge events, ");
4798 case INTEL_FAM6_HASWELL_CORE:
4799 case INTEL_FAM6_HASWELL_X:
4800 case INTEL_FAM6_HASWELL_ULT:
4801 case INTEL_FAM6_HASWELL_GT3E:
4802 x86_add_quirk(intel_ht_bug);
4803 x86_add_quirk(intel_pebs_isolation_quirk);
4804 x86_pmu.late_ack = true;
4805 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4806 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4808 intel_pmu_lbr_init_hsw();
4810 x86_pmu.event_constraints = intel_hsw_event_constraints;
4811 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4812 x86_pmu.extra_regs = intel_snbep_extra_regs;
4813 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4814 x86_pmu.pebs_prec_dist = true;
4815 /* all extra regs are per-cpu when HT is on */
4816 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4817 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4819 x86_pmu.hw_config = hsw_hw_config;
4820 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4821 x86_pmu.cpu_events = hsw_events_attrs;
4822 x86_pmu.lbr_double_abort = true;
4823 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4824 hsw_format_attr : nhm_format_attr;
4825 mem_attr = hsw_mem_events_attrs;
4826 tsx_attr = hsw_tsx_events_attrs;
4827 pr_cont("Haswell events, ");
4831 case INTEL_FAM6_BROADWELL_CORE:
4832 case INTEL_FAM6_BROADWELL_XEON_D:
4833 case INTEL_FAM6_BROADWELL_GT3E:
4834 case INTEL_FAM6_BROADWELL_X:
4835 x86_add_quirk(intel_pebs_isolation_quirk);
4836 x86_pmu.late_ack = true;
4837 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4838 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4840 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4841 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4842 BDW_L3_MISS|HSW_SNOOP_DRAM;
4843 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4845 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4846 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4847 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4848 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4850 intel_pmu_lbr_init_hsw();
4852 x86_pmu.event_constraints = intel_bdw_event_constraints;
4853 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4854 x86_pmu.extra_regs = intel_snbep_extra_regs;
4855 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4856 x86_pmu.pebs_prec_dist = true;
4857 /* all extra regs are per-cpu when HT is on */
4858 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4859 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4861 x86_pmu.hw_config = hsw_hw_config;
4862 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4863 x86_pmu.cpu_events = hsw_events_attrs;
4864 x86_pmu.limit_period = bdw_limit_period;
4865 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4866 hsw_format_attr : nhm_format_attr;
4867 mem_attr = hsw_mem_events_attrs;
4868 tsx_attr = hsw_tsx_events_attrs;
4869 pr_cont("Broadwell events, ");
4873 case INTEL_FAM6_XEON_PHI_KNL:
4874 case INTEL_FAM6_XEON_PHI_KNM:
4875 memcpy(hw_cache_event_ids,
4876 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4877 memcpy(hw_cache_extra_regs,
4878 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4879 intel_pmu_lbr_init_knl();
4881 x86_pmu.event_constraints = intel_slm_event_constraints;
4882 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4883 x86_pmu.extra_regs = intel_knl_extra_regs;
4885 /* all extra regs are per-cpu when HT is on */
4886 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4887 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4888 extra_attr = slm_format_attr;
4889 pr_cont("Knights Landing/Mill events, ");
4890 name = "knights-landing";
4893 case INTEL_FAM6_SKYLAKE_MOBILE:
4894 case INTEL_FAM6_SKYLAKE_DESKTOP:
4895 case INTEL_FAM6_SKYLAKE_X:
4896 case INTEL_FAM6_KABYLAKE_MOBILE:
4897 case INTEL_FAM6_KABYLAKE_DESKTOP:
4898 x86_add_quirk(intel_pebs_isolation_quirk);
4899 x86_pmu.late_ack = true;
4900 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4901 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4902 intel_pmu_lbr_init_skl();
4904 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4905 event_attr_td_recovery_bubbles.event_str_noht =
4906 "event=0xd,umask=0x1,cmask=1";
4907 event_attr_td_recovery_bubbles.event_str_ht =
4908 "event=0xd,umask=0x1,cmask=1,any=1";
4910 x86_pmu.event_constraints = intel_skl_event_constraints;
4911 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4912 x86_pmu.extra_regs = intel_skl_extra_regs;
4913 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4914 x86_pmu.pebs_prec_dist = true;
4915 /* all extra regs are per-cpu when HT is on */
4916 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4917 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4919 x86_pmu.hw_config = hsw_hw_config;
4920 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4921 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4922 hsw_format_attr : nhm_format_attr;
4923 extra_attr = merge_attr(extra_attr, skl_format_attr);
4924 to_free = extra_attr;
4925 x86_pmu.cpu_events = hsw_events_attrs;
4926 mem_attr = hsw_mem_events_attrs;
4927 tsx_attr = hsw_tsx_events_attrs;
4928 intel_pmu_pebs_data_source_skl(
4929 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4931 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
4932 x86_pmu.flags |= PMU_FL_TFA;
4933 x86_pmu.get_event_constraints = tfa_get_event_constraints;
4934 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
4935 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
4936 intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr;
4939 pr_cont("Skylake events, ");
4943 case INTEL_FAM6_ICELAKE_MOBILE:
4944 x86_pmu.late_ack = true;
4945 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4946 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4947 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
4948 intel_pmu_lbr_init_skl();
4950 x86_pmu.event_constraints = intel_icl_event_constraints;
4951 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
4952 x86_pmu.extra_regs = intel_icl_extra_regs;
4953 x86_pmu.pebs_aliases = NULL;
4954 x86_pmu.pebs_prec_dist = true;
4955 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4956 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4958 x86_pmu.hw_config = hsw_hw_config;
4959 x86_pmu.get_event_constraints = icl_get_event_constraints;
4960 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4961 hsw_format_attr : nhm_format_attr;
4962 extra_attr = merge_attr(extra_attr, skl_format_attr);
4963 x86_pmu.cpu_events = get_icl_events_attrs();
4964 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
4965 x86_pmu.lbr_pt_coexist = true;
4966 intel_pmu_pebs_data_source_skl(false);
4967 pr_cont("Icelake events, ");
4972 switch (x86_pmu.version) {
4974 x86_pmu.event_constraints = intel_v1_event_constraints;
4975 pr_cont("generic architected perfmon v1, ");
4976 name = "generic_arch_v1";
4980 * default constraints for v2 and up
4982 x86_pmu.event_constraints = intel_gen_event_constraints;
4983 pr_cont("generic architected perfmon, ");
4984 name = "generic_arch_v2+";
4989 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
4991 if (version >= 2 && extra_attr) {
4992 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4994 WARN_ON(!x86_pmu.format_attrs);
4997 x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
4998 mem_attr, tsx_attr);
5000 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
5001 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5002 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
5003 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
5005 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
5007 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5008 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5009 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
5010 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
5013 x86_pmu.intel_ctrl |=
5014 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
5016 if (x86_pmu.event_constraints) {
5018 * event on fixed counter2 (REF_CYCLES) only works on this
5019 * counter, so do not extend mask to generic counters
5021 for_each_event_constraint(c, x86_pmu.event_constraints) {
5022 if (c->cmask == FIXED_EVENT_FLAGS
5023 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
5024 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
5027 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
5028 c->weight = hweight64(c->idxmsk64);
5033 * Access LBR MSR may cause #GP under certain circumstances.
5034 * E.g. KVM doesn't support LBR MSR
5035 * Check all LBT MSR here.
5036 * Disable LBR access if any LBR MSRs can not be accessed.
5038 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
5040 for (i = 0; i < x86_pmu.lbr_nr; i++) {
5041 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
5042 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
5046 x86_pmu.caps_attrs = intel_pmu_caps_attrs;
5048 if (x86_pmu.lbr_nr) {
5049 x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
5050 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
5054 * Access extra MSR may cause #GP under certain circumstances.
5055 * E.g. KVM doesn't support offcore event
5056 * Check all extra_regs here.
5058 if (x86_pmu.extra_regs) {
5059 for (er = x86_pmu.extra_regs; er->msr; er++) {
5060 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5061 /* Disable LBR select mapping */
5062 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5063 x86_pmu.lbr_sel_map = NULL;
5067 /* Support full width counters using alternative MSR range */
5068 if (x86_pmu.intel_cap.full_width_write) {
5069 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
5070 x86_pmu.perfctr = MSR_IA32_PMC0;
5071 pr_cont("full-width counters, ");
5075 * For arch perfmon 4 use counter freezing to avoid
5076 * several MSR accesses in the PMI.
5078 if (x86_pmu.counter_freezing)
5079 x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
5086 * HT bug: phase 2 init
5087 * Called once we have valid topology information to check
5088 * whether or not HT is enabled
5089 * If HT is off, then we disable the workaround
5091 static __init int fixup_ht_bug(void)
5095 * problem not present on this CPU model, nothing to do
5097 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
5100 if (topology_max_smt_threads() > 1) {
5101 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
5107 hardlockup_detector_perf_stop();
5109 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
5111 x86_pmu.start_scheduling = NULL;
5112 x86_pmu.commit_scheduling = NULL;
5113 x86_pmu.stop_scheduling = NULL;
5115 hardlockup_detector_perf_restart();
5117 for_each_online_cpu(c)
5118 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
5121 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
5124 subsys_initcall(fixup_ht_bug)