4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/nmi.h>
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
19 #include <asm/intel-family.h>
21 #include <asm/cpu_device_id.h>
23 #include "../perf_event.h"
26 * Intel PerfMon, used on Core and later.
28 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
30 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
31 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
32 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
33 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
34 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
35 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
36 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
37 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
40 static struct event_constraint intel_core_event_constraints[] __read_mostly =
42 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
43 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
44 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
45 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
46 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
47 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
51 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
57 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
58 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
59 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
60 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
61 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
62 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
63 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
64 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
65 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
69 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
71 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
72 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
73 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
74 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
75 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
76 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
77 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
78 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
79 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
80 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
81 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
85 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
87 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
88 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
89 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
93 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
95 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
96 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
97 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
98 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
99 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
100 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
101 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
105 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
107 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
108 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
109 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
110 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
111 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
113 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
114 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
116 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
117 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
118 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
121 * When HT is off these events can only run on the bottom 4 counters
122 * When HT is on, they are impacted by the HT bug and require EXCL access
124 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
125 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
126 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
127 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
132 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
134 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
135 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
136 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
137 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
138 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
139 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
140 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
141 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
142 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
149 * When HT is off these events can only run on the bottom 4 counters
150 * When HT is on, they are impacted by the HT bug and require EXCL access
152 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
153 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
154 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
155 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
160 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
162 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
163 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
164 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
165 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
169 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
174 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
176 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
177 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
178 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
182 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
184 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
185 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
186 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
190 static struct event_constraint intel_skl_event_constraints[] = {
191 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
192 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
193 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
194 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
197 * when HT is off, these can only run on the bottom 4 counters
199 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
200 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
201 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
203 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
208 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
209 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
210 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
214 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
215 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
216 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
217 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
218 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
222 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
223 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
224 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
225 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
226 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
230 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
231 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
232 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
233 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
235 * Note the low 8 bits eventsel code is not a continuous field, containing
236 * some #GPing bits. These are masked out.
238 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
242 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
243 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
244 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
246 static struct attribute *nhm_mem_events_attrs[] = {
247 EVENT_PTR(mem_ld_nhm),
252 * topdown events for Intel Core CPUs.
254 * The events are all in slots, which is a free slot in a 4 wide
255 * pipeline. Some events are already reported in slots, for cycle
256 * events we multiply by the pipeline width (4).
258 * With Hyper Threading on, topdown metrics are either summed or averaged
259 * between the threads of a core: (count_t0 + count_t1).
261 * For the average case the metric is always scaled to pipeline width,
262 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
265 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
266 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
267 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
268 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
269 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
270 "event=0xe,umask=0x1"); /* uops_issued.any */
271 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
272 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
273 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
274 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
275 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
276 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
277 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
278 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
281 static struct attribute *snb_events_attrs[] = {
282 EVENT_PTR(td_slots_issued),
283 EVENT_PTR(td_slots_retired),
284 EVENT_PTR(td_fetch_bubbles),
285 EVENT_PTR(td_total_slots),
286 EVENT_PTR(td_total_slots_scale),
287 EVENT_PTR(td_recovery_bubbles),
288 EVENT_PTR(td_recovery_bubbles_scale),
292 static struct attribute *snb_mem_events_attrs[] = {
293 EVENT_PTR(mem_ld_snb),
294 EVENT_PTR(mem_st_snb),
298 static struct event_constraint intel_hsw_event_constraints[] = {
299 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
300 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
301 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
302 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
303 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
304 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
305 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
306 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
307 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
308 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
309 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
310 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
313 * When HT is off these events can only run on the bottom 4 counters
314 * When HT is on, they are impacted by the HT bug and require EXCL access
316 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
317 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
318 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
319 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
324 static struct event_constraint intel_bdw_event_constraints[] = {
325 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
326 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
327 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
328 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
329 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
331 * when HT is off, these can only run on the bottom 4 counters
333 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
334 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
335 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
336 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
340 static u64 intel_pmu_event_map(int hw_event)
342 return intel_perfmon_event_map[hw_event];
346 * Notes on the events:
347 * - data reads do not include code reads (comparable to earlier tables)
348 * - data counts include speculative execution (except L1 write, dtlb, bpu)
349 * - remote node access includes remote memory, remote cache, remote mmio.
350 * - prefetches are not included in the counts.
351 * - icache miss does not include decoded icache
354 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
355 #define SKL_DEMAND_RFO BIT_ULL(1)
356 #define SKL_ANY_RESPONSE BIT_ULL(16)
357 #define SKL_SUPPLIER_NONE BIT_ULL(17)
358 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
359 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
360 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
361 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
362 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
363 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
364 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
365 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
366 #define SKL_SPL_HIT BIT_ULL(30)
367 #define SKL_SNOOP_NONE BIT_ULL(31)
368 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
369 #define SKL_SNOOP_MISS BIT_ULL(33)
370 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
371 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
372 #define SKL_SNOOP_HITM BIT_ULL(36)
373 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
374 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
375 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
376 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
377 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
378 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
379 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
380 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
381 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
382 SKL_SNOOP_HITM|SKL_SPL_HIT)
383 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
384 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
385 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
386 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
387 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
389 static __initconst const u64 skl_hw_cache_event_ids
390 [PERF_COUNT_HW_CACHE_MAX]
391 [PERF_COUNT_HW_CACHE_OP_MAX]
392 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
396 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
397 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
400 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
401 [ C(RESULT_MISS) ] = 0x0,
403 [ C(OP_PREFETCH) ] = {
404 [ C(RESULT_ACCESS) ] = 0x0,
405 [ C(RESULT_MISS) ] = 0x0,
410 [ C(RESULT_ACCESS) ] = 0x0,
411 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
414 [ C(RESULT_ACCESS) ] = -1,
415 [ C(RESULT_MISS) ] = -1,
417 [ C(OP_PREFETCH) ] = {
418 [ C(RESULT_ACCESS) ] = 0x0,
419 [ C(RESULT_MISS) ] = 0x0,
424 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
425 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
428 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
429 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
431 [ C(OP_PREFETCH) ] = {
432 [ C(RESULT_ACCESS) ] = 0x0,
433 [ C(RESULT_MISS) ] = 0x0,
438 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
439 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
442 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
443 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
445 [ C(OP_PREFETCH) ] = {
446 [ C(RESULT_ACCESS) ] = 0x0,
447 [ C(RESULT_MISS) ] = 0x0,
452 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
453 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
456 [ C(RESULT_ACCESS) ] = -1,
457 [ C(RESULT_MISS) ] = -1,
459 [ C(OP_PREFETCH) ] = {
460 [ C(RESULT_ACCESS) ] = -1,
461 [ C(RESULT_MISS) ] = -1,
466 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
467 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
470 [ C(RESULT_ACCESS) ] = -1,
471 [ C(RESULT_MISS) ] = -1,
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = -1,
475 [ C(RESULT_MISS) ] = -1,
480 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
481 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
484 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
485 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = 0x0,
489 [ C(RESULT_MISS) ] = 0x0,
494 static __initconst const u64 skl_hw_cache_extra_regs
495 [PERF_COUNT_HW_CACHE_MAX]
496 [PERF_COUNT_HW_CACHE_OP_MAX]
497 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
501 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
502 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
503 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
504 SKL_L3_MISS|SKL_ANY_SNOOP|
508 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
509 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
510 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
511 SKL_L3_MISS|SKL_ANY_SNOOP|
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = 0x0,
516 [ C(RESULT_MISS) ] = 0x0,
521 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
522 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
523 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
524 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
527 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
528 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
529 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
530 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
532 [ C(OP_PREFETCH) ] = {
533 [ C(RESULT_ACCESS) ] = 0x0,
534 [ C(RESULT_MISS) ] = 0x0,
539 #define SNB_DMND_DATA_RD (1ULL << 0)
540 #define SNB_DMND_RFO (1ULL << 1)
541 #define SNB_DMND_IFETCH (1ULL << 2)
542 #define SNB_DMND_WB (1ULL << 3)
543 #define SNB_PF_DATA_RD (1ULL << 4)
544 #define SNB_PF_RFO (1ULL << 5)
545 #define SNB_PF_IFETCH (1ULL << 6)
546 #define SNB_LLC_DATA_RD (1ULL << 7)
547 #define SNB_LLC_RFO (1ULL << 8)
548 #define SNB_LLC_IFETCH (1ULL << 9)
549 #define SNB_BUS_LOCKS (1ULL << 10)
550 #define SNB_STRM_ST (1ULL << 11)
551 #define SNB_OTHER (1ULL << 15)
552 #define SNB_RESP_ANY (1ULL << 16)
553 #define SNB_NO_SUPP (1ULL << 17)
554 #define SNB_LLC_HITM (1ULL << 18)
555 #define SNB_LLC_HITE (1ULL << 19)
556 #define SNB_LLC_HITS (1ULL << 20)
557 #define SNB_LLC_HITF (1ULL << 21)
558 #define SNB_LOCAL (1ULL << 22)
559 #define SNB_REMOTE (0xffULL << 23)
560 #define SNB_SNP_NONE (1ULL << 31)
561 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
562 #define SNB_SNP_MISS (1ULL << 33)
563 #define SNB_NO_FWD (1ULL << 34)
564 #define SNB_SNP_FWD (1ULL << 35)
565 #define SNB_HITM (1ULL << 36)
566 #define SNB_NON_DRAM (1ULL << 37)
568 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
569 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
570 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
572 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
573 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
576 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
577 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
579 #define SNB_L3_ACCESS SNB_RESP_ANY
580 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
582 static __initconst const u64 snb_hw_cache_extra_regs
583 [PERF_COUNT_HW_CACHE_MAX]
584 [PERF_COUNT_HW_CACHE_OP_MAX]
585 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
589 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
590 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
593 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
594 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
596 [ C(OP_PREFETCH) ] = {
597 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
598 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
603 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
604 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
607 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
608 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
610 [ C(OP_PREFETCH) ] = {
611 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
612 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
617 static __initconst const u64 snb_hw_cache_event_ids
618 [PERF_COUNT_HW_CACHE_MAX]
619 [PERF_COUNT_HW_CACHE_OP_MAX]
620 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
624 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
625 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
628 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
629 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
631 [ C(OP_PREFETCH) ] = {
632 [ C(RESULT_ACCESS) ] = 0x0,
633 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
638 [ C(RESULT_ACCESS) ] = 0x0,
639 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
642 [ C(RESULT_ACCESS) ] = -1,
643 [ C(RESULT_MISS) ] = -1,
645 [ C(OP_PREFETCH) ] = {
646 [ C(RESULT_ACCESS) ] = 0x0,
647 [ C(RESULT_MISS) ] = 0x0,
652 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
653 [ C(RESULT_ACCESS) ] = 0x01b7,
654 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
655 [ C(RESULT_MISS) ] = 0x01b7,
658 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
659 [ C(RESULT_ACCESS) ] = 0x01b7,
660 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
661 [ C(RESULT_MISS) ] = 0x01b7,
663 [ C(OP_PREFETCH) ] = {
664 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
665 [ C(RESULT_ACCESS) ] = 0x01b7,
666 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
667 [ C(RESULT_MISS) ] = 0x01b7,
672 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
673 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
676 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
677 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
679 [ C(OP_PREFETCH) ] = {
680 [ C(RESULT_ACCESS) ] = 0x0,
681 [ C(RESULT_MISS) ] = 0x0,
686 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
687 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
690 [ C(RESULT_ACCESS) ] = -1,
691 [ C(RESULT_MISS) ] = -1,
693 [ C(OP_PREFETCH) ] = {
694 [ C(RESULT_ACCESS) ] = -1,
695 [ C(RESULT_MISS) ] = -1,
700 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
701 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
704 [ C(RESULT_ACCESS) ] = -1,
705 [ C(RESULT_MISS) ] = -1,
707 [ C(OP_PREFETCH) ] = {
708 [ C(RESULT_ACCESS) ] = -1,
709 [ C(RESULT_MISS) ] = -1,
714 [ C(RESULT_ACCESS) ] = 0x01b7,
715 [ C(RESULT_MISS) ] = 0x01b7,
718 [ C(RESULT_ACCESS) ] = 0x01b7,
719 [ C(RESULT_MISS) ] = 0x01b7,
721 [ C(OP_PREFETCH) ] = {
722 [ C(RESULT_ACCESS) ] = 0x01b7,
723 [ C(RESULT_MISS) ] = 0x01b7,
730 * Notes on the events:
731 * - data reads do not include code reads (comparable to earlier tables)
732 * - data counts include speculative execution (except L1 write, dtlb, bpu)
733 * - remote node access includes remote memory, remote cache, remote mmio.
734 * - prefetches are not included in the counts because they are not
738 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
739 #define HSW_DEMAND_RFO BIT_ULL(1)
740 #define HSW_ANY_RESPONSE BIT_ULL(16)
741 #define HSW_SUPPLIER_NONE BIT_ULL(17)
742 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
743 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
744 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
745 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
746 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
747 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
748 HSW_L3_MISS_REMOTE_HOP2P)
749 #define HSW_SNOOP_NONE BIT_ULL(31)
750 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
751 #define HSW_SNOOP_MISS BIT_ULL(33)
752 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
753 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
754 #define HSW_SNOOP_HITM BIT_ULL(36)
755 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
756 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
757 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
758 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
759 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
760 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
761 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
762 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
763 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
764 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
765 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
767 #define BDW_L3_MISS_LOCAL BIT(26)
768 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
769 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
770 HSW_L3_MISS_REMOTE_HOP2P)
773 static __initconst const u64 hsw_hw_cache_event_ids
774 [PERF_COUNT_HW_CACHE_MAX]
775 [PERF_COUNT_HW_CACHE_OP_MAX]
776 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
780 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
781 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
784 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
785 [ C(RESULT_MISS) ] = 0x0,
787 [ C(OP_PREFETCH) ] = {
788 [ C(RESULT_ACCESS) ] = 0x0,
789 [ C(RESULT_MISS) ] = 0x0,
794 [ C(RESULT_ACCESS) ] = 0x0,
795 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
798 [ C(RESULT_ACCESS) ] = -1,
799 [ C(RESULT_MISS) ] = -1,
801 [ C(OP_PREFETCH) ] = {
802 [ C(RESULT_ACCESS) ] = 0x0,
803 [ C(RESULT_MISS) ] = 0x0,
808 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
809 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
812 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
813 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
815 [ C(OP_PREFETCH) ] = {
816 [ C(RESULT_ACCESS) ] = 0x0,
817 [ C(RESULT_MISS) ] = 0x0,
822 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
823 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
826 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
827 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
829 [ C(OP_PREFETCH) ] = {
830 [ C(RESULT_ACCESS) ] = 0x0,
831 [ C(RESULT_MISS) ] = 0x0,
836 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
837 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
840 [ C(RESULT_ACCESS) ] = -1,
841 [ C(RESULT_MISS) ] = -1,
843 [ C(OP_PREFETCH) ] = {
844 [ C(RESULT_ACCESS) ] = -1,
845 [ C(RESULT_MISS) ] = -1,
850 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
851 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
854 [ C(RESULT_ACCESS) ] = -1,
855 [ C(RESULT_MISS) ] = -1,
857 [ C(OP_PREFETCH) ] = {
858 [ C(RESULT_ACCESS) ] = -1,
859 [ C(RESULT_MISS) ] = -1,
864 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
865 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
868 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
869 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
871 [ C(OP_PREFETCH) ] = {
872 [ C(RESULT_ACCESS) ] = 0x0,
873 [ C(RESULT_MISS) ] = 0x0,
878 static __initconst const u64 hsw_hw_cache_extra_regs
879 [PERF_COUNT_HW_CACHE_MAX]
880 [PERF_COUNT_HW_CACHE_OP_MAX]
881 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
885 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
887 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
888 HSW_L3_MISS|HSW_ANY_SNOOP,
891 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
893 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
894 HSW_L3_MISS|HSW_ANY_SNOOP,
896 [ C(OP_PREFETCH) ] = {
897 [ C(RESULT_ACCESS) ] = 0x0,
898 [ C(RESULT_MISS) ] = 0x0,
903 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
904 HSW_L3_MISS_LOCAL_DRAM|
906 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
911 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
912 HSW_L3_MISS_LOCAL_DRAM|
914 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
918 [ C(OP_PREFETCH) ] = {
919 [ C(RESULT_ACCESS) ] = 0x0,
920 [ C(RESULT_MISS) ] = 0x0,
925 static __initconst const u64 westmere_hw_cache_event_ids
926 [PERF_COUNT_HW_CACHE_MAX]
927 [PERF_COUNT_HW_CACHE_OP_MAX]
928 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
932 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
933 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
936 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
937 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
939 [ C(OP_PREFETCH) ] = {
940 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
941 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
946 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
947 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
950 [ C(RESULT_ACCESS) ] = -1,
951 [ C(RESULT_MISS) ] = -1,
953 [ C(OP_PREFETCH) ] = {
954 [ C(RESULT_ACCESS) ] = 0x0,
955 [ C(RESULT_MISS) ] = 0x0,
960 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
961 [ C(RESULT_ACCESS) ] = 0x01b7,
962 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
963 [ C(RESULT_MISS) ] = 0x01b7,
966 * Use RFO, not WRITEBACK, because a write miss would typically occur
970 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
971 [ C(RESULT_ACCESS) ] = 0x01b7,
972 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
973 [ C(RESULT_MISS) ] = 0x01b7,
975 [ C(OP_PREFETCH) ] = {
976 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
977 [ C(RESULT_ACCESS) ] = 0x01b7,
978 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
979 [ C(RESULT_MISS) ] = 0x01b7,
984 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
985 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
988 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
989 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
991 [ C(OP_PREFETCH) ] = {
992 [ C(RESULT_ACCESS) ] = 0x0,
993 [ C(RESULT_MISS) ] = 0x0,
998 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
999 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1002 [ C(RESULT_ACCESS) ] = -1,
1003 [ C(RESULT_MISS) ] = -1,
1005 [ C(OP_PREFETCH) ] = {
1006 [ C(RESULT_ACCESS) ] = -1,
1007 [ C(RESULT_MISS) ] = -1,
1012 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1013 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1016 [ C(RESULT_ACCESS) ] = -1,
1017 [ C(RESULT_MISS) ] = -1,
1019 [ C(OP_PREFETCH) ] = {
1020 [ C(RESULT_ACCESS) ] = -1,
1021 [ C(RESULT_MISS) ] = -1,
1026 [ C(RESULT_ACCESS) ] = 0x01b7,
1027 [ C(RESULT_MISS) ] = 0x01b7,
1030 [ C(RESULT_ACCESS) ] = 0x01b7,
1031 [ C(RESULT_MISS) ] = 0x01b7,
1033 [ C(OP_PREFETCH) ] = {
1034 [ C(RESULT_ACCESS) ] = 0x01b7,
1035 [ C(RESULT_MISS) ] = 0x01b7,
1041 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1042 * See IA32 SDM Vol 3B 30.6.1.3
1045 #define NHM_DMND_DATA_RD (1 << 0)
1046 #define NHM_DMND_RFO (1 << 1)
1047 #define NHM_DMND_IFETCH (1 << 2)
1048 #define NHM_DMND_WB (1 << 3)
1049 #define NHM_PF_DATA_RD (1 << 4)
1050 #define NHM_PF_DATA_RFO (1 << 5)
1051 #define NHM_PF_IFETCH (1 << 6)
1052 #define NHM_OFFCORE_OTHER (1 << 7)
1053 #define NHM_UNCORE_HIT (1 << 8)
1054 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1055 #define NHM_OTHER_CORE_HITM (1 << 10)
1057 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1058 #define NHM_REMOTE_DRAM (1 << 13)
1059 #define NHM_LOCAL_DRAM (1 << 14)
1060 #define NHM_NON_DRAM (1 << 15)
1062 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1063 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1065 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1066 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1067 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1069 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1070 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1071 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1073 static __initconst const u64 nehalem_hw_cache_extra_regs
1074 [PERF_COUNT_HW_CACHE_MAX]
1075 [PERF_COUNT_HW_CACHE_OP_MAX]
1076 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1080 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1081 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1084 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1085 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1087 [ C(OP_PREFETCH) ] = {
1088 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1089 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1094 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1095 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1098 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1099 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1101 [ C(OP_PREFETCH) ] = {
1102 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1103 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1108 static __initconst const u64 nehalem_hw_cache_event_ids
1109 [PERF_COUNT_HW_CACHE_MAX]
1110 [PERF_COUNT_HW_CACHE_OP_MAX]
1111 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1115 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1116 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1119 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1120 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1122 [ C(OP_PREFETCH) ] = {
1123 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1124 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1129 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1130 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1133 [ C(RESULT_ACCESS) ] = -1,
1134 [ C(RESULT_MISS) ] = -1,
1136 [ C(OP_PREFETCH) ] = {
1137 [ C(RESULT_ACCESS) ] = 0x0,
1138 [ C(RESULT_MISS) ] = 0x0,
1143 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1144 [ C(RESULT_ACCESS) ] = 0x01b7,
1145 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1146 [ C(RESULT_MISS) ] = 0x01b7,
1149 * Use RFO, not WRITEBACK, because a write miss would typically occur
1153 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1154 [ C(RESULT_ACCESS) ] = 0x01b7,
1155 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1156 [ C(RESULT_MISS) ] = 0x01b7,
1158 [ C(OP_PREFETCH) ] = {
1159 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1160 [ C(RESULT_ACCESS) ] = 0x01b7,
1161 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1162 [ C(RESULT_MISS) ] = 0x01b7,
1167 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1168 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1171 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1172 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1174 [ C(OP_PREFETCH) ] = {
1175 [ C(RESULT_ACCESS) ] = 0x0,
1176 [ C(RESULT_MISS) ] = 0x0,
1181 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1182 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1185 [ C(RESULT_ACCESS) ] = -1,
1186 [ C(RESULT_MISS) ] = -1,
1188 [ C(OP_PREFETCH) ] = {
1189 [ C(RESULT_ACCESS) ] = -1,
1190 [ C(RESULT_MISS) ] = -1,
1195 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1196 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1199 [ C(RESULT_ACCESS) ] = -1,
1200 [ C(RESULT_MISS) ] = -1,
1202 [ C(OP_PREFETCH) ] = {
1203 [ C(RESULT_ACCESS) ] = -1,
1204 [ C(RESULT_MISS) ] = -1,
1209 [ C(RESULT_ACCESS) ] = 0x01b7,
1210 [ C(RESULT_MISS) ] = 0x01b7,
1213 [ C(RESULT_ACCESS) ] = 0x01b7,
1214 [ C(RESULT_MISS) ] = 0x01b7,
1216 [ C(OP_PREFETCH) ] = {
1217 [ C(RESULT_ACCESS) ] = 0x01b7,
1218 [ C(RESULT_MISS) ] = 0x01b7,
1223 static __initconst const u64 core2_hw_cache_event_ids
1224 [PERF_COUNT_HW_CACHE_MAX]
1225 [PERF_COUNT_HW_CACHE_OP_MAX]
1226 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1230 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1231 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1234 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1235 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1237 [ C(OP_PREFETCH) ] = {
1238 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1239 [ C(RESULT_MISS) ] = 0,
1244 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1245 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1248 [ C(RESULT_ACCESS) ] = -1,
1249 [ C(RESULT_MISS) ] = -1,
1251 [ C(OP_PREFETCH) ] = {
1252 [ C(RESULT_ACCESS) ] = 0,
1253 [ C(RESULT_MISS) ] = 0,
1258 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1259 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1262 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1263 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1265 [ C(OP_PREFETCH) ] = {
1266 [ C(RESULT_ACCESS) ] = 0,
1267 [ C(RESULT_MISS) ] = 0,
1272 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1273 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1276 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1277 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1279 [ C(OP_PREFETCH) ] = {
1280 [ C(RESULT_ACCESS) ] = 0,
1281 [ C(RESULT_MISS) ] = 0,
1286 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1287 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1290 [ C(RESULT_ACCESS) ] = -1,
1291 [ C(RESULT_MISS) ] = -1,
1293 [ C(OP_PREFETCH) ] = {
1294 [ C(RESULT_ACCESS) ] = -1,
1295 [ C(RESULT_MISS) ] = -1,
1300 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1301 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1304 [ C(RESULT_ACCESS) ] = -1,
1305 [ C(RESULT_MISS) ] = -1,
1307 [ C(OP_PREFETCH) ] = {
1308 [ C(RESULT_ACCESS) ] = -1,
1309 [ C(RESULT_MISS) ] = -1,
1314 static __initconst const u64 atom_hw_cache_event_ids
1315 [PERF_COUNT_HW_CACHE_MAX]
1316 [PERF_COUNT_HW_CACHE_OP_MAX]
1317 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1321 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1322 [ C(RESULT_MISS) ] = 0,
1325 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1326 [ C(RESULT_MISS) ] = 0,
1328 [ C(OP_PREFETCH) ] = {
1329 [ C(RESULT_ACCESS) ] = 0x0,
1330 [ C(RESULT_MISS) ] = 0,
1335 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1336 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1339 [ C(RESULT_ACCESS) ] = -1,
1340 [ C(RESULT_MISS) ] = -1,
1342 [ C(OP_PREFETCH) ] = {
1343 [ C(RESULT_ACCESS) ] = 0,
1344 [ C(RESULT_MISS) ] = 0,
1349 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1350 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1353 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1354 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1356 [ C(OP_PREFETCH) ] = {
1357 [ C(RESULT_ACCESS) ] = 0,
1358 [ C(RESULT_MISS) ] = 0,
1363 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1364 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1367 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1368 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1370 [ C(OP_PREFETCH) ] = {
1371 [ C(RESULT_ACCESS) ] = 0,
1372 [ C(RESULT_MISS) ] = 0,
1377 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1378 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1381 [ C(RESULT_ACCESS) ] = -1,
1382 [ C(RESULT_MISS) ] = -1,
1384 [ C(OP_PREFETCH) ] = {
1385 [ C(RESULT_ACCESS) ] = -1,
1386 [ C(RESULT_MISS) ] = -1,
1391 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1392 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1395 [ C(RESULT_ACCESS) ] = -1,
1396 [ C(RESULT_MISS) ] = -1,
1398 [ C(OP_PREFETCH) ] = {
1399 [ C(RESULT_ACCESS) ] = -1,
1400 [ C(RESULT_MISS) ] = -1,
1405 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1406 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1407 /* no_alloc_cycles.not_delivered */
1408 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1409 "event=0xca,umask=0x50");
1410 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1411 /* uops_retired.all */
1412 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1413 "event=0xc2,umask=0x10");
1414 /* uops_retired.all */
1415 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1416 "event=0xc2,umask=0x10");
1418 static struct attribute *slm_events_attrs[] = {
1419 EVENT_PTR(td_total_slots_slm),
1420 EVENT_PTR(td_total_slots_scale_slm),
1421 EVENT_PTR(td_fetch_bubbles_slm),
1422 EVENT_PTR(td_fetch_bubbles_scale_slm),
1423 EVENT_PTR(td_slots_issued_slm),
1424 EVENT_PTR(td_slots_retired_slm),
1428 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1430 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1431 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1432 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1436 #define SLM_DMND_READ SNB_DMND_DATA_RD
1437 #define SLM_DMND_WRITE SNB_DMND_RFO
1438 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1440 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1441 #define SLM_LLC_ACCESS SNB_RESP_ANY
1442 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1444 static __initconst const u64 slm_hw_cache_extra_regs
1445 [PERF_COUNT_HW_CACHE_MAX]
1446 [PERF_COUNT_HW_CACHE_OP_MAX]
1447 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1451 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1452 [ C(RESULT_MISS) ] = 0,
1455 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1456 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1458 [ C(OP_PREFETCH) ] = {
1459 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1460 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1465 static __initconst const u64 slm_hw_cache_event_ids
1466 [PERF_COUNT_HW_CACHE_MAX]
1467 [PERF_COUNT_HW_CACHE_OP_MAX]
1468 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1472 [ C(RESULT_ACCESS) ] = 0,
1473 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1476 [ C(RESULT_ACCESS) ] = 0,
1477 [ C(RESULT_MISS) ] = 0,
1479 [ C(OP_PREFETCH) ] = {
1480 [ C(RESULT_ACCESS) ] = 0,
1481 [ C(RESULT_MISS) ] = 0,
1486 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1487 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1490 [ C(RESULT_ACCESS) ] = -1,
1491 [ C(RESULT_MISS) ] = -1,
1493 [ C(OP_PREFETCH) ] = {
1494 [ C(RESULT_ACCESS) ] = 0,
1495 [ C(RESULT_MISS) ] = 0,
1500 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1501 [ C(RESULT_ACCESS) ] = 0x01b7,
1502 [ C(RESULT_MISS) ] = 0,
1505 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1506 [ C(RESULT_ACCESS) ] = 0x01b7,
1507 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1508 [ C(RESULT_MISS) ] = 0x01b7,
1510 [ C(OP_PREFETCH) ] = {
1511 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1512 [ C(RESULT_ACCESS) ] = 0x01b7,
1513 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1514 [ C(RESULT_MISS) ] = 0x01b7,
1519 [ C(RESULT_ACCESS) ] = 0,
1520 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1523 [ C(RESULT_ACCESS) ] = 0,
1524 [ C(RESULT_MISS) ] = 0,
1526 [ C(OP_PREFETCH) ] = {
1527 [ C(RESULT_ACCESS) ] = 0,
1528 [ C(RESULT_MISS) ] = 0,
1533 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1534 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1537 [ C(RESULT_ACCESS) ] = -1,
1538 [ C(RESULT_MISS) ] = -1,
1540 [ C(OP_PREFETCH) ] = {
1541 [ C(RESULT_ACCESS) ] = -1,
1542 [ C(RESULT_MISS) ] = -1,
1547 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1548 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1551 [ C(RESULT_ACCESS) ] = -1,
1552 [ C(RESULT_MISS) ] = -1,
1554 [ C(OP_PREFETCH) ] = {
1555 [ C(RESULT_ACCESS) ] = -1,
1556 [ C(RESULT_MISS) ] = -1,
1561 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1562 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1563 /* UOPS_NOT_DELIVERED.ANY */
1564 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1565 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1566 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1567 /* UOPS_RETIRED.ANY */
1568 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1569 /* UOPS_ISSUED.ANY */
1570 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1572 static struct attribute *glm_events_attrs[] = {
1573 EVENT_PTR(td_total_slots_glm),
1574 EVENT_PTR(td_total_slots_scale_glm),
1575 EVENT_PTR(td_fetch_bubbles_glm),
1576 EVENT_PTR(td_recovery_bubbles_glm),
1577 EVENT_PTR(td_slots_issued_glm),
1578 EVENT_PTR(td_slots_retired_glm),
1582 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1583 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1584 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1585 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1589 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1590 #define GLM_DEMAND_RFO BIT_ULL(1)
1591 #define GLM_ANY_RESPONSE BIT_ULL(16)
1592 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1593 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1594 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1595 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1596 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1597 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1598 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1600 static __initconst const u64 glm_hw_cache_event_ids
1601 [PERF_COUNT_HW_CACHE_MAX]
1602 [PERF_COUNT_HW_CACHE_OP_MAX]
1603 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1606 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1607 [C(RESULT_MISS)] = 0x0,
1610 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1611 [C(RESULT_MISS)] = 0x0,
1613 [C(OP_PREFETCH)] = {
1614 [C(RESULT_ACCESS)] = 0x0,
1615 [C(RESULT_MISS)] = 0x0,
1620 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1621 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1624 [C(RESULT_ACCESS)] = -1,
1625 [C(RESULT_MISS)] = -1,
1627 [C(OP_PREFETCH)] = {
1628 [C(RESULT_ACCESS)] = 0x0,
1629 [C(RESULT_MISS)] = 0x0,
1634 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1635 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1638 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1639 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1641 [C(OP_PREFETCH)] = {
1642 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1643 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1648 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1649 [C(RESULT_MISS)] = 0x0,
1652 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1653 [C(RESULT_MISS)] = 0x0,
1655 [C(OP_PREFETCH)] = {
1656 [C(RESULT_ACCESS)] = 0x0,
1657 [C(RESULT_MISS)] = 0x0,
1662 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1663 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1666 [C(RESULT_ACCESS)] = -1,
1667 [C(RESULT_MISS)] = -1,
1669 [C(OP_PREFETCH)] = {
1670 [C(RESULT_ACCESS)] = -1,
1671 [C(RESULT_MISS)] = -1,
1676 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1677 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1680 [C(RESULT_ACCESS)] = -1,
1681 [C(RESULT_MISS)] = -1,
1683 [C(OP_PREFETCH)] = {
1684 [C(RESULT_ACCESS)] = -1,
1685 [C(RESULT_MISS)] = -1,
1690 static __initconst const u64 glm_hw_cache_extra_regs
1691 [PERF_COUNT_HW_CACHE_MAX]
1692 [PERF_COUNT_HW_CACHE_OP_MAX]
1693 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1696 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1698 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1702 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1704 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1707 [C(OP_PREFETCH)] = {
1708 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1710 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1716 static __initconst const u64 glp_hw_cache_event_ids
1717 [PERF_COUNT_HW_CACHE_MAX]
1718 [PERF_COUNT_HW_CACHE_OP_MAX]
1719 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1722 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1723 [C(RESULT_MISS)] = 0x0,
1726 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1727 [C(RESULT_MISS)] = 0x0,
1729 [C(OP_PREFETCH)] = {
1730 [C(RESULT_ACCESS)] = 0x0,
1731 [C(RESULT_MISS)] = 0x0,
1736 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1737 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1740 [C(RESULT_ACCESS)] = -1,
1741 [C(RESULT_MISS)] = -1,
1743 [C(OP_PREFETCH)] = {
1744 [C(RESULT_ACCESS)] = 0x0,
1745 [C(RESULT_MISS)] = 0x0,
1750 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1751 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1754 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1755 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1757 [C(OP_PREFETCH)] = {
1758 [C(RESULT_ACCESS)] = 0x0,
1759 [C(RESULT_MISS)] = 0x0,
1764 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1765 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1768 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1769 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1771 [C(OP_PREFETCH)] = {
1772 [C(RESULT_ACCESS)] = 0x0,
1773 [C(RESULT_MISS)] = 0x0,
1778 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1779 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1782 [C(RESULT_ACCESS)] = -1,
1783 [C(RESULT_MISS)] = -1,
1785 [C(OP_PREFETCH)] = {
1786 [C(RESULT_ACCESS)] = -1,
1787 [C(RESULT_MISS)] = -1,
1792 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1793 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1796 [C(RESULT_ACCESS)] = -1,
1797 [C(RESULT_MISS)] = -1,
1799 [C(OP_PREFETCH)] = {
1800 [C(RESULT_ACCESS)] = -1,
1801 [C(RESULT_MISS)] = -1,
1806 static __initconst const u64 glp_hw_cache_extra_regs
1807 [PERF_COUNT_HW_CACHE_MAX]
1808 [PERF_COUNT_HW_CACHE_OP_MAX]
1809 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1812 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1814 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1818 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1820 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1823 [C(OP_PREFETCH)] = {
1824 [C(RESULT_ACCESS)] = 0x0,
1825 [C(RESULT_MISS)] = 0x0,
1830 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1831 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1832 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1833 #define KNL_MCDRAM_FAR BIT_ULL(22)
1834 #define KNL_DDR_LOCAL BIT_ULL(23)
1835 #define KNL_DDR_FAR BIT_ULL(24)
1836 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1837 KNL_DDR_LOCAL | KNL_DDR_FAR)
1838 #define KNL_L2_READ SLM_DMND_READ
1839 #define KNL_L2_WRITE SLM_DMND_WRITE
1840 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1841 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1842 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1843 KNL_DRAM_ANY | SNB_SNP_ANY | \
1846 static __initconst const u64 knl_hw_cache_extra_regs
1847 [PERF_COUNT_HW_CACHE_MAX]
1848 [PERF_COUNT_HW_CACHE_OP_MAX]
1849 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1852 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1853 [C(RESULT_MISS)] = 0,
1856 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1857 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1859 [C(OP_PREFETCH)] = {
1860 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1861 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1867 * Used from PMIs where the LBRs are already disabled.
1869 * This function could be called consecutively. It is required to remain in
1870 * disabled state if called consecutively.
1872 * During consecutive calls, the same disable value will be written to related
1873 * registers, so the PMU state remains unchanged.
1875 * intel_bts events don't coexist with intel PMU's BTS events because of
1876 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1877 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1879 static void __intel_pmu_disable_all(void)
1881 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1883 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1885 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1886 intel_pmu_disable_bts();
1888 intel_pmu_pebs_disable_all();
1891 static void intel_pmu_disable_all(void)
1893 __intel_pmu_disable_all();
1894 intel_pmu_lbr_disable_all();
1897 static void __intel_pmu_enable_all(int added, bool pmi)
1899 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1901 intel_pmu_pebs_enable_all();
1902 intel_pmu_lbr_enable_all(pmi);
1903 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1904 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1906 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1907 struct perf_event *event =
1908 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1910 if (WARN_ON_ONCE(!event))
1913 intel_pmu_enable_bts(event->hw.config);
1917 static void intel_pmu_enable_all(int added)
1919 __intel_pmu_enable_all(added, false);
1924 * Intel Errata AAK100 (model 26)
1925 * Intel Errata AAP53 (model 30)
1926 * Intel Errata BD53 (model 44)
1928 * The official story:
1929 * These chips need to be 'reset' when adding counters by programming the
1930 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1931 * in sequence on the same PMC or on different PMCs.
1933 * In practise it appears some of these events do in fact count, and
1934 * we need to program all 4 events.
1936 static void intel_pmu_nhm_workaround(void)
1938 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1939 static const unsigned long nhm_magic[4] = {
1945 struct perf_event *event;
1949 * The Errata requires below steps:
1950 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1951 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1952 * the corresponding PMCx;
1953 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1954 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1955 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1959 * The real steps we choose are a little different from above.
1960 * A) To reduce MSR operations, we don't run step 1) as they
1961 * are already cleared before this function is called;
1962 * B) Call x86_perf_event_update to save PMCx before configuring
1963 * PERFEVTSELx with magic number;
1964 * C) With step 5), we do clear only when the PERFEVTSELx is
1965 * not used currently.
1966 * D) Call x86_perf_event_set_period to restore PMCx;
1969 /* We always operate 4 pairs of PERF Counters */
1970 for (i = 0; i < 4; i++) {
1971 event = cpuc->events[i];
1973 x86_perf_event_update(event);
1976 for (i = 0; i < 4; i++) {
1977 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1978 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1981 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1982 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1984 for (i = 0; i < 4; i++) {
1985 event = cpuc->events[i];
1988 x86_perf_event_set_period(event);
1989 __x86_pmu_enable_event(&event->hw,
1990 ARCH_PERFMON_EVENTSEL_ENABLE);
1992 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1996 static void intel_pmu_nhm_enable_all(int added)
1999 intel_pmu_nhm_workaround();
2000 intel_pmu_enable_all(added);
2003 static void enable_counter_freeze(void)
2005 update_debugctlmsr(get_debugctlmsr() |
2006 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2009 static void disable_counter_freeze(void)
2011 update_debugctlmsr(get_debugctlmsr() &
2012 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2015 static inline u64 intel_pmu_get_status(void)
2019 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2024 static inline void intel_pmu_ack_status(u64 ack)
2026 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2029 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2031 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2034 mask = 0xfULL << (idx * 4);
2036 rdmsrl(hwc->config_base, ctrl_val);
2038 wrmsrl(hwc->config_base, ctrl_val);
2041 static inline bool event_is_checkpointed(struct perf_event *event)
2043 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2046 static void intel_pmu_disable_event(struct perf_event *event)
2048 struct hw_perf_event *hwc = &event->hw;
2049 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2051 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2052 intel_pmu_disable_bts();
2053 intel_pmu_drain_bts_buffer();
2057 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2058 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2059 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2061 if (unlikely(event->attr.precise_ip))
2062 intel_pmu_pebs_disable(event);
2064 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2065 intel_pmu_disable_fixed(hwc);
2069 x86_pmu_disable_event(event);
2072 static void intel_pmu_del_event(struct perf_event *event)
2074 if (needs_branch_stack(event))
2075 intel_pmu_lbr_del(event);
2076 if (event->attr.precise_ip)
2077 intel_pmu_pebs_del(event);
2080 static void intel_pmu_read_event(struct perf_event *event)
2082 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2083 intel_pmu_auto_reload_read(event);
2085 x86_perf_event_update(event);
2088 static void intel_pmu_enable_fixed(struct perf_event *event)
2090 struct hw_perf_event *hwc = &event->hw;
2091 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2092 u64 ctrl_val, mask, bits = 0;
2095 * Enable IRQ generation (0x8), if not PEBS,
2096 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2099 if (!event->attr.precise_ip)
2101 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2103 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2107 * ANY bit is supported in v3 and up
2109 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2113 mask = 0xfULL << (idx * 4);
2115 rdmsrl(hwc->config_base, ctrl_val);
2118 wrmsrl(hwc->config_base, ctrl_val);
2121 static void intel_pmu_enable_event(struct perf_event *event)
2123 struct hw_perf_event *hwc = &event->hw;
2124 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2126 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2127 if (!__this_cpu_read(cpu_hw_events.enabled))
2130 intel_pmu_enable_bts(hwc->config);
2134 if (event->attr.exclude_host)
2135 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2136 if (event->attr.exclude_guest)
2137 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2139 if (unlikely(event_is_checkpointed(event)))
2140 cpuc->intel_cp_status |= (1ull << hwc->idx);
2142 if (unlikely(event->attr.precise_ip))
2143 intel_pmu_pebs_enable(event);
2145 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2146 intel_pmu_enable_fixed(event);
2150 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2153 static void intel_pmu_add_event(struct perf_event *event)
2155 if (event->attr.precise_ip)
2156 intel_pmu_pebs_add(event);
2157 if (needs_branch_stack(event))
2158 intel_pmu_lbr_add(event);
2162 * Save and restart an expired event. Called by NMI contexts,
2163 * so it has to be careful about preempting normal event ops:
2165 int intel_pmu_save_and_restart(struct perf_event *event)
2167 x86_perf_event_update(event);
2169 * For a checkpointed counter always reset back to 0. This
2170 * avoids a situation where the counter overflows, aborts the
2171 * transaction and is then set back to shortly before the
2172 * overflow, and overflows and aborts again.
2174 if (unlikely(event_is_checkpointed(event))) {
2175 /* No race with NMIs because the counter should not be armed */
2176 wrmsrl(event->hw.event_base, 0);
2177 local64_set(&event->hw.prev_count, 0);
2179 return x86_perf_event_set_period(event);
2182 static void intel_pmu_reset(void)
2184 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2185 unsigned long flags;
2188 if (!x86_pmu.num_counters)
2191 local_irq_save(flags);
2193 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2195 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2196 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2197 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2199 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2200 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2203 ds->bts_index = ds->bts_buffer_base;
2205 /* Ack all overflows and disable fixed counters */
2206 if (x86_pmu.version >= 2) {
2207 intel_pmu_ack_status(intel_pmu_get_status());
2208 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2211 /* Reset LBRs and LBR freezing */
2212 if (x86_pmu.lbr_nr) {
2213 update_debugctlmsr(get_debugctlmsr() &
2214 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2217 local_irq_restore(flags);
2220 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2222 struct perf_sample_data data;
2223 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2227 inc_irq_stat(apic_perf_irqs);
2230 * Ignore a range of extra bits in status that do not indicate
2231 * overflow by themselves.
2233 status &= ~(GLOBAL_STATUS_COND_CHG |
2234 GLOBAL_STATUS_ASIF |
2235 GLOBAL_STATUS_LBRS_FROZEN);
2239 * In case multiple PEBS events are sampled at the same time,
2240 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2241 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2242 * having their bits set in the status register. This is a sign
2243 * that there was at least one PEBS record pending at the time
2244 * of the PMU interrupt. PEBS counters must only be processed
2245 * via the drain_pebs() calls and not via the regular sample
2246 * processing loop coming after that the function, otherwise
2247 * phony regular samples may be generated in the sampling buffer
2248 * not marked with the EXACT tag. Another possibility is to have
2249 * one PEBS event and at least one non-PEBS event whic hoverflows
2250 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2251 * not be set, yet the overflow status bit for the PEBS counter will
2254 * To avoid this problem, we systematically ignore the PEBS-enabled
2255 * counters from the GLOBAL_STATUS mask and we always process PEBS
2256 * events via drain_pebs().
2258 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2259 status &= ~cpuc->pebs_enabled;
2261 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2264 * PEBS overflow sets bit 62 in the global status register
2266 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2268 x86_pmu.drain_pebs(regs);
2269 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2275 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2277 intel_pt_interrupt();
2281 * Checkpointed counters can lead to 'spurious' PMIs because the
2282 * rollback caused by the PMI will have cleared the overflow status
2283 * bit. Therefore always force probe these counters.
2285 status |= cpuc->intel_cp_status;
2287 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2288 struct perf_event *event = cpuc->events[bit];
2292 if (!test_bit(bit, cpuc->active_mask))
2295 if (!intel_pmu_save_and_restart(event))
2298 perf_sample_data_init(&data, 0, event->hw.last_period);
2300 if (has_branch_stack(event))
2301 data.br_stack = &cpuc->lbr_stack;
2303 if (perf_event_overflow(event, &data, regs))
2304 x86_pmu_stop(event, 0);
2310 static bool disable_counter_freezing = true;
2311 static int __init intel_perf_counter_freezing_setup(char *s)
2315 if (kstrtobool(s, &res))
2318 disable_counter_freezing = !res;
2321 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2324 * Simplified handler for Arch Perfmon v4:
2325 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2326 * This is done automatically on PMU ack.
2327 * - Ack the PMU only after the APIC.
2330 static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2332 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2336 int pmu_enabled = cpuc->enabled;
2339 /* PMU has been disabled because of counter freezing */
2341 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2343 intel_bts_disable_local();
2344 handled = intel_pmu_drain_bts_buffer();
2345 handled += intel_bts_interrupt();
2347 status = intel_pmu_get_status();
2351 intel_pmu_lbr_read();
2352 if (++loops > 100) {
2356 WARN(1, "perfevents: irq loop stuck!\n");
2357 perf_event_print_debug();
2365 handled += handle_pmi_common(regs, status);
2367 /* Ack the PMI in the APIC */
2368 apic_write(APIC_LVTPC, APIC_DM_NMI);
2371 * The counters start counting immediately while ack the status.
2372 * Make it as close as possible to IRET. This avoids bogus
2373 * freezing on Skylake CPUs.
2376 intel_pmu_ack_status(status);
2379 * CPU may issues two PMIs very close to each other.
2380 * When the PMI handler services the first one, the
2381 * GLOBAL_STATUS is already updated to reflect both.
2382 * When it IRETs, the second PMI is immediately
2383 * handled and it sees clear status. At the meantime,
2384 * there may be a third PMI, because the freezing bit
2385 * isn't set since the ack in first PMI handlers.
2386 * Double check if there is more work to be done.
2388 status = intel_pmu_get_status();
2394 intel_bts_enable_local();
2395 cpuc->enabled = pmu_enabled;
2400 * This handler is triggered by the local APIC, so the APIC IRQ handling
2403 static int intel_pmu_handle_irq(struct pt_regs *regs)
2405 struct cpu_hw_events *cpuc;
2411 cpuc = this_cpu_ptr(&cpu_hw_events);
2414 * Save the PMU state.
2415 * It needs to be restored when leaving the handler.
2417 pmu_enabled = cpuc->enabled;
2419 * No known reason to not always do late ACK,
2420 * but just in case do it opt-in.
2422 if (!x86_pmu.late_ack)
2423 apic_write(APIC_LVTPC, APIC_DM_NMI);
2424 intel_bts_disable_local();
2426 __intel_pmu_disable_all();
2427 handled = intel_pmu_drain_bts_buffer();
2428 handled += intel_bts_interrupt();
2429 status = intel_pmu_get_status();
2435 intel_pmu_lbr_read();
2436 intel_pmu_ack_status(status);
2437 if (++loops > 100) {
2441 WARN(1, "perfevents: irq loop stuck!\n");
2442 perf_event_print_debug();
2449 handled += handle_pmi_common(regs, status);
2452 * Repeat if there is more work to be done:
2454 status = intel_pmu_get_status();
2459 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2460 cpuc->enabled = pmu_enabled;
2462 __intel_pmu_enable_all(0, true);
2463 intel_bts_enable_local();
2466 * Only unmask the NMI after the overflow counters
2467 * have been reset. This avoids spurious NMIs on
2470 if (x86_pmu.late_ack)
2471 apic_write(APIC_LVTPC, APIC_DM_NMI);
2475 static struct event_constraint *
2476 intel_bts_constraints(struct perf_event *event)
2478 if (unlikely(intel_pmu_has_bts(event)))
2479 return &bts_constraint;
2484 static int intel_alt_er(int idx, u64 config)
2488 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2491 if (idx == EXTRA_REG_RSP_0)
2492 alt_idx = EXTRA_REG_RSP_1;
2494 if (idx == EXTRA_REG_RSP_1)
2495 alt_idx = EXTRA_REG_RSP_0;
2497 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2503 static void intel_fixup_er(struct perf_event *event, int idx)
2505 event->hw.extra_reg.idx = idx;
2507 if (idx == EXTRA_REG_RSP_0) {
2508 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2509 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2510 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2511 } else if (idx == EXTRA_REG_RSP_1) {
2512 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2513 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2514 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2519 * manage allocation of shared extra msr for certain events
2522 * per-cpu: to be shared between the various events on a single PMU
2523 * per-core: per-cpu + shared by HT threads
2525 static struct event_constraint *
2526 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2527 struct perf_event *event,
2528 struct hw_perf_event_extra *reg)
2530 struct event_constraint *c = &emptyconstraint;
2531 struct er_account *era;
2532 unsigned long flags;
2536 * reg->alloc can be set due to existing state, so for fake cpuc we
2537 * need to ignore this, otherwise we might fail to allocate proper fake
2538 * state for this extra reg constraint. Also see the comment below.
2540 if (reg->alloc && !cpuc->is_fake)
2541 return NULL; /* call x86_get_event_constraint() */
2544 era = &cpuc->shared_regs->regs[idx];
2546 * we use spin_lock_irqsave() to avoid lockdep issues when
2547 * passing a fake cpuc
2549 raw_spin_lock_irqsave(&era->lock, flags);
2551 if (!atomic_read(&era->ref) || era->config == reg->config) {
2554 * If its a fake cpuc -- as per validate_{group,event}() we
2555 * shouldn't touch event state and we can avoid doing so
2556 * since both will only call get_event_constraints() once
2557 * on each event, this avoids the need for reg->alloc.
2559 * Not doing the ER fixup will only result in era->reg being
2560 * wrong, but since we won't actually try and program hardware
2561 * this isn't a problem either.
2563 if (!cpuc->is_fake) {
2564 if (idx != reg->idx)
2565 intel_fixup_er(event, idx);
2568 * x86_schedule_events() can call get_event_constraints()
2569 * multiple times on events in the case of incremental
2570 * scheduling(). reg->alloc ensures we only do the ER
2576 /* lock in msr value */
2577 era->config = reg->config;
2578 era->reg = reg->reg;
2581 atomic_inc(&era->ref);
2584 * need to call x86_get_event_constraint()
2585 * to check if associated event has constraints
2589 idx = intel_alt_er(idx, reg->config);
2590 if (idx != reg->idx) {
2591 raw_spin_unlock_irqrestore(&era->lock, flags);
2595 raw_spin_unlock_irqrestore(&era->lock, flags);
2601 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2602 struct hw_perf_event_extra *reg)
2604 struct er_account *era;
2607 * Only put constraint if extra reg was actually allocated. Also takes
2608 * care of event which do not use an extra shared reg.
2610 * Also, if this is a fake cpuc we shouldn't touch any event state
2611 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2612 * either since it'll be thrown out.
2614 if (!reg->alloc || cpuc->is_fake)
2617 era = &cpuc->shared_regs->regs[reg->idx];
2619 /* one fewer user */
2620 atomic_dec(&era->ref);
2622 /* allocate again next time */
2626 static struct event_constraint *
2627 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2628 struct perf_event *event)
2630 struct event_constraint *c = NULL, *d;
2631 struct hw_perf_event_extra *xreg, *breg;
2633 xreg = &event->hw.extra_reg;
2634 if (xreg->idx != EXTRA_REG_NONE) {
2635 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2636 if (c == &emptyconstraint)
2639 breg = &event->hw.branch_reg;
2640 if (breg->idx != EXTRA_REG_NONE) {
2641 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2642 if (d == &emptyconstraint) {
2643 __intel_shared_reg_put_constraints(cpuc, xreg);
2650 struct event_constraint *
2651 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2652 struct perf_event *event)
2654 struct event_constraint *c;
2656 if (x86_pmu.event_constraints) {
2657 for_each_event_constraint(c, x86_pmu.event_constraints) {
2658 if ((event->hw.config & c->cmask) == c->code) {
2659 event->hw.flags |= c->flags;
2665 return &unconstrained;
2668 static struct event_constraint *
2669 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2670 struct perf_event *event)
2672 struct event_constraint *c;
2674 c = intel_bts_constraints(event);
2678 c = intel_shared_regs_constraints(cpuc, event);
2682 c = intel_pebs_constraints(event);
2686 return x86_get_event_constraints(cpuc, idx, event);
2690 intel_start_scheduling(struct cpu_hw_events *cpuc)
2692 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2693 struct intel_excl_states *xl;
2694 int tid = cpuc->excl_thread_id;
2697 * nothing needed if in group validation mode
2699 if (cpuc->is_fake || !is_ht_workaround_enabled())
2703 * no exclusion needed
2705 if (WARN_ON_ONCE(!excl_cntrs))
2708 xl = &excl_cntrs->states[tid];
2710 xl->sched_started = true;
2712 * lock shared state until we are done scheduling
2713 * in stop_event_scheduling()
2714 * makes scheduling appear as a transaction
2716 raw_spin_lock(&excl_cntrs->lock);
2719 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2721 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2722 struct event_constraint *c = cpuc->event_constraint[idx];
2723 struct intel_excl_states *xl;
2724 int tid = cpuc->excl_thread_id;
2726 if (cpuc->is_fake || !is_ht_workaround_enabled())
2729 if (WARN_ON_ONCE(!excl_cntrs))
2732 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2735 xl = &excl_cntrs->states[tid];
2737 lockdep_assert_held(&excl_cntrs->lock);
2739 if (c->flags & PERF_X86_EVENT_EXCL)
2740 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2742 xl->state[cntr] = INTEL_EXCL_SHARED;
2746 intel_stop_scheduling(struct cpu_hw_events *cpuc)
2748 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2749 struct intel_excl_states *xl;
2750 int tid = cpuc->excl_thread_id;
2753 * nothing needed if in group validation mode
2755 if (cpuc->is_fake || !is_ht_workaround_enabled())
2758 * no exclusion needed
2760 if (WARN_ON_ONCE(!excl_cntrs))
2763 xl = &excl_cntrs->states[tid];
2765 xl->sched_started = false;
2767 * release shared state lock (acquired in intel_start_scheduling())
2769 raw_spin_unlock(&excl_cntrs->lock);
2772 static struct event_constraint *
2773 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2774 int idx, struct event_constraint *c)
2776 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2777 struct intel_excl_states *xlo;
2778 int tid = cpuc->excl_thread_id;
2782 * validating a group does not require
2783 * enforcing cross-thread exclusion
2785 if (cpuc->is_fake || !is_ht_workaround_enabled())
2789 * no exclusion needed
2791 if (WARN_ON_ONCE(!excl_cntrs))
2795 * because we modify the constraint, we need
2796 * to make a copy. Static constraints come
2797 * from static const tables.
2799 * only needed when constraint has not yet
2800 * been cloned (marked dynamic)
2802 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2803 struct event_constraint *cx;
2806 * grab pre-allocated constraint entry
2808 cx = &cpuc->constraint_list[idx];
2811 * initialize dynamic constraint
2812 * with static constraint
2817 * mark constraint as dynamic, so we
2818 * can free it later on
2820 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2825 * From here on, the constraint is dynamic.
2826 * Either it was just allocated above, or it
2827 * was allocated during a earlier invocation
2832 * state of sibling HT
2834 xlo = &excl_cntrs->states[tid ^ 1];
2837 * event requires exclusive counter access
2840 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2841 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2842 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2843 if (!cpuc->n_excl++)
2844 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2848 * Modify static constraint with current dynamic
2851 * EXCLUSIVE: sibling counter measuring exclusive event
2852 * SHARED : sibling counter measuring non-exclusive event
2853 * UNUSED : sibling counter unused
2855 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2857 * exclusive event in sibling counter
2858 * our corresponding counter cannot be used
2859 * regardless of our event
2861 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
2862 __clear_bit(i, c->idxmsk);
2864 * if measuring an exclusive event, sibling
2865 * measuring non-exclusive, then counter cannot
2868 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
2869 __clear_bit(i, c->idxmsk);
2873 * recompute actual bit weight for scheduling algorithm
2875 c->weight = hweight64(c->idxmsk64);
2878 * if we return an empty mask, then switch
2879 * back to static empty constraint to avoid
2880 * the cost of freeing later on
2883 c = &emptyconstraint;
2888 static struct event_constraint *
2889 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2890 struct perf_event *event)
2892 struct event_constraint *c1 = NULL;
2893 struct event_constraint *c2;
2895 if (idx >= 0) /* fake does < 0 */
2896 c1 = cpuc->event_constraint[idx];
2900 * - static constraint: no change across incremental scheduling calls
2901 * - dynamic constraint: handled by intel_get_excl_constraints()
2903 c2 = __intel_get_event_constraints(cpuc, idx, event);
2904 if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2905 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2906 c1->weight = c2->weight;
2910 if (cpuc->excl_cntrs)
2911 return intel_get_excl_constraints(cpuc, event, idx, c2);
2916 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2917 struct perf_event *event)
2919 struct hw_perf_event *hwc = &event->hw;
2920 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2921 int tid = cpuc->excl_thread_id;
2922 struct intel_excl_states *xl;
2925 * nothing needed if in group validation mode
2930 if (WARN_ON_ONCE(!excl_cntrs))
2933 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2934 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2935 if (!--cpuc->n_excl)
2936 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2940 * If event was actually assigned, then mark the counter state as
2943 if (hwc->idx >= 0) {
2944 xl = &excl_cntrs->states[tid];
2947 * put_constraint may be called from x86_schedule_events()
2948 * which already has the lock held so here make locking
2951 if (!xl->sched_started)
2952 raw_spin_lock(&excl_cntrs->lock);
2954 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
2956 if (!xl->sched_started)
2957 raw_spin_unlock(&excl_cntrs->lock);
2962 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
2963 struct perf_event *event)
2965 struct hw_perf_event_extra *reg;
2967 reg = &event->hw.extra_reg;
2968 if (reg->idx != EXTRA_REG_NONE)
2969 __intel_shared_reg_put_constraints(cpuc, reg);
2971 reg = &event->hw.branch_reg;
2972 if (reg->idx != EXTRA_REG_NONE)
2973 __intel_shared_reg_put_constraints(cpuc, reg);
2976 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2977 struct perf_event *event)
2979 intel_put_shared_regs_event_constraints(cpuc, event);
2982 * is PMU has exclusive counter restrictions, then
2983 * all events are subject to and must call the
2984 * put_excl_constraints() routine
2986 if (cpuc->excl_cntrs)
2987 intel_put_excl_constraints(cpuc, event);
2990 static void intel_pebs_aliases_core2(struct perf_event *event)
2992 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2994 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2995 * (0x003c) so that we can use it with PEBS.
2997 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2998 * PEBS capable. However we can use INST_RETIRED.ANY_P
2999 * (0x00c0), which is a PEBS capable event, to get the same
3002 * INST_RETIRED.ANY_P counts the number of cycles that retires
3003 * CNTMASK instructions. By setting CNTMASK to a value (16)
3004 * larger than the maximum number of instructions that can be
3005 * retired per cycle (4) and then inverting the condition, we
3006 * count all cycles that retire 16 or less instructions, which
3009 * Thereby we gain a PEBS capable cycle counter.
3011 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3013 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3014 event->hw.config = alt_config;
3018 static void intel_pebs_aliases_snb(struct perf_event *event)
3020 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3022 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3023 * (0x003c) so that we can use it with PEBS.
3025 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3026 * PEBS capable. However we can use UOPS_RETIRED.ALL
3027 * (0x01c2), which is a PEBS capable event, to get the same
3030 * UOPS_RETIRED.ALL counts the number of cycles that retires
3031 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3032 * larger than the maximum number of micro-ops that can be
3033 * retired per cycle (4) and then inverting the condition, we
3034 * count all cycles that retire 16 or less micro-ops, which
3037 * Thereby we gain a PEBS capable cycle counter.
3039 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3041 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3042 event->hw.config = alt_config;
3046 static void intel_pebs_aliases_precdist(struct perf_event *event)
3048 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3050 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3051 * (0x003c) so that we can use it with PEBS.
3053 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3054 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3055 * (0x01c0), which is a PEBS capable event, to get the same
3058 * The PREC_DIST event has special support to minimize sample
3059 * shadowing effects. One drawback is that it can be
3060 * only programmed on counter 1, but that seems like an
3061 * acceptable trade off.
3063 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3065 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3066 event->hw.config = alt_config;
3070 static void intel_pebs_aliases_ivb(struct perf_event *event)
3072 if (event->attr.precise_ip < 3)
3073 return intel_pebs_aliases_snb(event);
3074 return intel_pebs_aliases_precdist(event);
3077 static void intel_pebs_aliases_skl(struct perf_event *event)
3079 if (event->attr.precise_ip < 3)
3080 return intel_pebs_aliases_core2(event);
3081 return intel_pebs_aliases_precdist(event);
3084 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3086 unsigned long flags = x86_pmu.large_pebs_flags;
3088 if (event->attr.use_clockid)
3089 flags &= ~PERF_SAMPLE_TIME;
3090 if (!event->attr.exclude_kernel)
3091 flags &= ~PERF_SAMPLE_REGS_USER;
3092 if (event->attr.sample_regs_user & ~PEBS_REGS)
3093 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3097 static int intel_pmu_bts_config(struct perf_event *event)
3099 struct perf_event_attr *attr = &event->attr;
3101 if (unlikely(intel_pmu_has_bts(event))) {
3102 /* BTS is not supported by this architecture. */
3103 if (!x86_pmu.bts_active)
3106 /* BTS is currently only allowed for user-mode. */
3107 if (!attr->exclude_kernel)
3110 /* BTS is not allowed for precise events. */
3111 if (attr->precise_ip)
3114 /* disallow bts if conflicting events are present */
3115 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3118 event->destroy = hw_perf_lbr_event_destroy;
3124 static int core_pmu_hw_config(struct perf_event *event)
3126 int ret = x86_pmu_hw_config(event);
3131 return intel_pmu_bts_config(event);
3134 static int intel_pmu_hw_config(struct perf_event *event)
3136 int ret = x86_pmu_hw_config(event);
3141 ret = intel_pmu_bts_config(event);
3145 if (event->attr.precise_ip) {
3146 if (!event->attr.freq) {
3147 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3148 if (!(event->attr.sample_type &
3149 ~intel_pmu_large_pebs_flags(event)))
3150 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3152 if (x86_pmu.pebs_aliases)
3153 x86_pmu.pebs_aliases(event);
3155 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3156 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3159 if (needs_branch_stack(event)) {
3160 ret = intel_pmu_setup_lbr_filter(event);
3165 * BTS is set up earlier in this path, so don't account twice
3167 if (!unlikely(intel_pmu_has_bts(event))) {
3168 /* disallow lbr if conflicting events are present */
3169 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3172 event->destroy = hw_perf_lbr_event_destroy;
3176 if (event->attr.type != PERF_TYPE_RAW)
3179 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3182 if (x86_pmu.version < 3)
3185 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3188 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3193 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3195 if (x86_pmu.guest_get_msrs)
3196 return x86_pmu.guest_get_msrs(nr);
3200 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3202 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3204 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3205 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3207 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3208 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3209 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3210 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3211 arr[0].guest &= ~cpuc->pebs_enabled;
3213 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3216 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3218 * If PMU counter has PEBS enabled it is not enough to
3219 * disable counter on a guest entry since PEBS memory
3220 * write can overshoot guest entry and corrupt guest
3221 * memory. Disabling PEBS solves the problem.
3223 * Don't do this if the CPU already enforces it.
3225 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3226 arr[1].host = cpuc->pebs_enabled;
3234 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3236 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3237 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3240 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3241 struct perf_event *event = cpuc->events[idx];
3243 arr[idx].msr = x86_pmu_config_addr(idx);
3244 arr[idx].host = arr[idx].guest = 0;
3246 if (!test_bit(idx, cpuc->active_mask))
3249 arr[idx].host = arr[idx].guest =
3250 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3252 if (event->attr.exclude_host)
3253 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3254 else if (event->attr.exclude_guest)
3255 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3258 *nr = x86_pmu.num_counters;
3262 static void core_pmu_enable_event(struct perf_event *event)
3264 if (!event->attr.exclude_host)
3265 x86_pmu_enable_event(event);
3268 static void core_pmu_enable_all(int added)
3270 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3273 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3274 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3276 if (!test_bit(idx, cpuc->active_mask) ||
3277 cpuc->events[idx]->attr.exclude_host)
3280 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3284 static int hsw_hw_config(struct perf_event *event)
3286 int ret = intel_pmu_hw_config(event);
3290 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3292 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3295 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3296 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3299 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3300 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3301 event->attr.precise_ip > 0))
3304 if (event_is_checkpointed(event)) {
3306 * Sampling of checkpointed events can cause situations where
3307 * the CPU constantly aborts because of a overflow, which is
3308 * then checkpointed back and ignored. Forbid checkpointing
3311 * But still allow a long sampling period, so that perf stat
3314 if (event->attr.sample_period > 0 &&
3315 event->attr.sample_period < 0x7fffffff)
3321 static struct event_constraint counter0_constraint =
3322 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3324 static struct event_constraint counter2_constraint =
3325 EVENT_CONSTRAINT(0, 0x4, 0);
3327 static struct event_constraint *
3328 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3329 struct perf_event *event)
3331 struct event_constraint *c;
3333 c = intel_get_event_constraints(cpuc, idx, event);
3335 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3336 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3337 if (c->idxmsk64 & (1U << 2))
3338 return &counter2_constraint;
3339 return &emptyconstraint;
3345 static struct event_constraint *
3346 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3347 struct perf_event *event)
3349 struct event_constraint *c;
3351 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3352 if (event->attr.precise_ip == 3)
3353 return &counter0_constraint;
3355 c = intel_get_event_constraints(cpuc, idx, event);
3363 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3364 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3365 * the two to enforce a minimum period of 128 (the smallest value that has bits
3366 * 0-5 cleared and >= 100).
3368 * Because of how the code in x86_perf_event_set_period() works, the truncation
3369 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3370 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3372 * Therefore the effective (average) period matches the requested period,
3373 * despite coarser hardware granularity.
3375 static u64 bdw_limit_period(struct perf_event *event, u64 left)
3377 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3378 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3386 PMU_FORMAT_ATTR(event, "config:0-7" );
3387 PMU_FORMAT_ATTR(umask, "config:8-15" );
3388 PMU_FORMAT_ATTR(edge, "config:18" );
3389 PMU_FORMAT_ATTR(pc, "config:19" );
3390 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
3391 PMU_FORMAT_ATTR(inv, "config:23" );
3392 PMU_FORMAT_ATTR(cmask, "config:24-31" );
3393 PMU_FORMAT_ATTR(in_tx, "config:32");
3394 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3396 static struct attribute *intel_arch_formats_attr[] = {
3397 &format_attr_event.attr,
3398 &format_attr_umask.attr,
3399 &format_attr_edge.attr,
3400 &format_attr_pc.attr,
3401 &format_attr_inv.attr,
3402 &format_attr_cmask.attr,
3406 ssize_t intel_event_sysfs_show(char *page, u64 config)
3408 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3410 return x86_event_sysfs_show(page, config, event);
3413 struct intel_shared_regs *allocate_shared_regs(int cpu)
3415 struct intel_shared_regs *regs;
3418 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3419 GFP_KERNEL, cpu_to_node(cpu));
3422 * initialize the locks to keep lockdep happy
3424 for (i = 0; i < EXTRA_REG_MAX; i++)
3425 raw_spin_lock_init(®s->regs[i].lock);
3432 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3434 struct intel_excl_cntrs *c;
3436 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3437 GFP_KERNEL, cpu_to_node(cpu));
3439 raw_spin_lock_init(&c->lock);
3445 static int intel_pmu_cpu_prepare(int cpu)
3447 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3449 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3450 cpuc->shared_regs = allocate_shared_regs(cpu);
3451 if (!cpuc->shared_regs)
3455 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3456 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3458 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
3459 if (!cpuc->constraint_list)
3460 goto err_shared_regs;
3462 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3463 if (!cpuc->excl_cntrs)
3464 goto err_constraint_list;
3466 cpuc->excl_thread_id = 0;
3471 err_constraint_list:
3472 kfree(cpuc->constraint_list);
3473 cpuc->constraint_list = NULL;
3476 kfree(cpuc->shared_regs);
3477 cpuc->shared_regs = NULL;
3483 static void flip_smm_bit(void *data)
3485 unsigned long set = *(unsigned long *)data;
3488 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3489 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3491 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3492 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3496 static void intel_pmu_cpu_starting(int cpu)
3498 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3499 int core_id = topology_core_id(cpu);
3502 init_debug_store_on_cpu(cpu);
3504 * Deal with CPUs that don't clear their LBRs on power-up.
3506 intel_pmu_lbr_reset();
3508 cpuc->lbr_sel = NULL;
3510 if (x86_pmu.version > 1)
3511 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3513 if (x86_pmu.counter_freezing)
3514 enable_counter_freeze();
3516 if (!cpuc->shared_regs)
3519 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3520 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3521 struct intel_shared_regs *pc;
3523 pc = per_cpu(cpu_hw_events, i).shared_regs;
3524 if (pc && pc->core_id == core_id) {
3525 cpuc->kfree_on_online[0] = cpuc->shared_regs;
3526 cpuc->shared_regs = pc;
3530 cpuc->shared_regs->core_id = core_id;
3531 cpuc->shared_regs->refcnt++;
3534 if (x86_pmu.lbr_sel_map)
3535 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3537 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3538 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3539 struct cpu_hw_events *sibling;
3540 struct intel_excl_cntrs *c;
3542 sibling = &per_cpu(cpu_hw_events, i);
3543 c = sibling->excl_cntrs;
3544 if (c && c->core_id == core_id) {
3545 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3546 cpuc->excl_cntrs = c;
3547 if (!sibling->excl_thread_id)
3548 cpuc->excl_thread_id = 1;
3552 cpuc->excl_cntrs->core_id = core_id;
3553 cpuc->excl_cntrs->refcnt++;
3557 static void free_excl_cntrs(int cpu)
3559 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3560 struct intel_excl_cntrs *c;
3562 c = cpuc->excl_cntrs;
3564 if (c->core_id == -1 || --c->refcnt == 0)
3566 cpuc->excl_cntrs = NULL;
3567 kfree(cpuc->constraint_list);
3568 cpuc->constraint_list = NULL;
3572 static void intel_pmu_cpu_dying(int cpu)
3574 fini_debug_store_on_cpu(cpu);
3576 if (x86_pmu.counter_freezing)
3577 disable_counter_freeze();
3580 static void intel_pmu_cpu_dead(int cpu)
3582 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3583 struct intel_shared_regs *pc;
3585 pc = cpuc->shared_regs;
3587 if (pc->core_id == -1 || --pc->refcnt == 0)
3589 cpuc->shared_regs = NULL;
3592 free_excl_cntrs(cpu);
3595 static void intel_pmu_sched_task(struct perf_event_context *ctx,
3598 intel_pmu_pebs_sched_task(ctx, sched_in);
3599 intel_pmu_lbr_sched_task(ctx, sched_in);
3602 static int intel_pmu_check_period(struct perf_event *event, u64 value)
3604 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3607 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3609 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3611 PMU_FORMAT_ATTR(frontend, "config1:0-23");
3613 static struct attribute *intel_arch3_formats_attr[] = {
3614 &format_attr_event.attr,
3615 &format_attr_umask.attr,
3616 &format_attr_edge.attr,
3617 &format_attr_pc.attr,
3618 &format_attr_any.attr,
3619 &format_attr_inv.attr,
3620 &format_attr_cmask.attr,
3624 static struct attribute *hsw_format_attr[] = {
3625 &format_attr_in_tx.attr,
3626 &format_attr_in_tx_cp.attr,
3627 &format_attr_offcore_rsp.attr,
3628 &format_attr_ldlat.attr,
3632 static struct attribute *nhm_format_attr[] = {
3633 &format_attr_offcore_rsp.attr,
3634 &format_attr_ldlat.attr,
3638 static struct attribute *slm_format_attr[] = {
3639 &format_attr_offcore_rsp.attr,
3643 static struct attribute *skl_format_attr[] = {
3644 &format_attr_frontend.attr,
3648 static __initconst const struct x86_pmu core_pmu = {
3650 .handle_irq = x86_pmu_handle_irq,
3651 .disable_all = x86_pmu_disable_all,
3652 .enable_all = core_pmu_enable_all,
3653 .enable = core_pmu_enable_event,
3654 .disable = x86_pmu_disable_event,
3655 .hw_config = core_pmu_hw_config,
3656 .schedule_events = x86_schedule_events,
3657 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3658 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3659 .event_map = intel_pmu_event_map,
3660 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3662 .large_pebs_flags = LARGE_PEBS_FLAGS,
3665 * Intel PMCs cannot be accessed sanely above 32-bit width,
3666 * so we install an artificial 1<<31 period regardless of
3667 * the generic event period:
3669 .max_period = (1ULL<<31) - 1,
3670 .get_event_constraints = intel_get_event_constraints,
3671 .put_event_constraints = intel_put_event_constraints,
3672 .event_constraints = intel_core_event_constraints,
3673 .guest_get_msrs = core_guest_get_msrs,
3674 .format_attrs = intel_arch_formats_attr,
3675 .events_sysfs_show = intel_event_sysfs_show,
3678 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3679 * together with PMU version 1 and thus be using core_pmu with
3680 * shared_regs. We need following callbacks here to allocate
3683 .cpu_prepare = intel_pmu_cpu_prepare,
3684 .cpu_starting = intel_pmu_cpu_starting,
3685 .cpu_dying = intel_pmu_cpu_dying,
3686 .cpu_dead = intel_pmu_cpu_dead,
3688 .check_period = intel_pmu_check_period,
3691 static struct attribute *intel_pmu_attrs[];
3693 static __initconst const struct x86_pmu intel_pmu = {
3695 .handle_irq = intel_pmu_handle_irq,
3696 .disable_all = intel_pmu_disable_all,
3697 .enable_all = intel_pmu_enable_all,
3698 .enable = intel_pmu_enable_event,
3699 .disable = intel_pmu_disable_event,
3700 .add = intel_pmu_add_event,
3701 .del = intel_pmu_del_event,
3702 .read = intel_pmu_read_event,
3703 .hw_config = intel_pmu_hw_config,
3704 .schedule_events = x86_schedule_events,
3705 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3706 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3707 .event_map = intel_pmu_event_map,
3708 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3710 .large_pebs_flags = LARGE_PEBS_FLAGS,
3712 * Intel PMCs cannot be accessed sanely above 32 bit width,
3713 * so we install an artificial 1<<31 period regardless of
3714 * the generic event period:
3716 .max_period = (1ULL << 31) - 1,
3717 .get_event_constraints = intel_get_event_constraints,
3718 .put_event_constraints = intel_put_event_constraints,
3719 .pebs_aliases = intel_pebs_aliases_core2,
3721 .format_attrs = intel_arch3_formats_attr,
3722 .events_sysfs_show = intel_event_sysfs_show,
3724 .attrs = intel_pmu_attrs,
3726 .cpu_prepare = intel_pmu_cpu_prepare,
3727 .cpu_starting = intel_pmu_cpu_starting,
3728 .cpu_dying = intel_pmu_cpu_dying,
3729 .cpu_dead = intel_pmu_cpu_dead,
3731 .guest_get_msrs = intel_guest_get_msrs,
3732 .sched_task = intel_pmu_sched_task,
3734 .check_period = intel_pmu_check_period,
3737 static __init void intel_clovertown_quirk(void)
3740 * PEBS is unreliable due to:
3742 * AJ67 - PEBS may experience CPL leaks
3743 * AJ68 - PEBS PMI may be delayed by one event
3744 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3745 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3747 * AJ67 could be worked around by restricting the OS/USR flags.
3748 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3750 * AJ106 could possibly be worked around by not allowing LBR
3751 * usage from PEBS, including the fixup.
3752 * AJ68 could possibly be worked around by always programming
3753 * a pebs_event_reset[0] value and coping with the lost events.
3755 * But taken together it might just make sense to not enable PEBS on
3758 pr_warn("PEBS disabled due to CPU errata\n");
3760 x86_pmu.pebs_constraints = NULL;
3763 static const struct x86_cpu_desc isolation_ucodes[] = {
3764 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f),
3765 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e),
3766 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015),
3767 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
3768 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
3769 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023),
3770 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014),
3771 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010),
3772 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009),
3773 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009),
3774 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002),
3775 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
3776 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
3777 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
3778 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c),
3779 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c),
3780 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e),
3781 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e),
3782 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e),
3783 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e),
3784 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e),
3785 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e),
3786 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e),
3787 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e),
3788 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e),
3792 static void intel_check_pebs_isolation(void)
3794 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
3797 static __init void intel_pebs_isolation_quirk(void)
3799 WARN_ON_ONCE(x86_pmu.check_microcode);
3800 x86_pmu.check_microcode = intel_check_pebs_isolation;
3801 intel_check_pebs_isolation();
3804 static const struct x86_cpu_desc pebs_ucodes[] = {
3805 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
3806 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
3807 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
3811 static bool intel_snb_pebs_broken(void)
3813 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
3816 static void intel_snb_check_microcode(void)
3818 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
3822 * Serialized by the microcode lock..
3824 if (x86_pmu.pebs_broken) {
3825 pr_info("PEBS enabled due to microcode update\n");
3826 x86_pmu.pebs_broken = 0;
3828 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3829 x86_pmu.pebs_broken = 1;
3833 static bool is_lbr_from(unsigned long msr)
3835 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
3837 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
3841 * Under certain circumstances, access certain MSR may cause #GP.
3842 * The function tests if the input MSR can be safely accessed.
3844 static bool check_msr(unsigned long msr, u64 mask)
3846 u64 val_old, val_new, val_tmp;
3849 * Read the current value, change it and read it back to see if it
3850 * matches, this is needed to detect certain hardware emulators
3851 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3853 if (rdmsrl_safe(msr, &val_old))
3857 * Only change the bits which can be updated by wrmsrl.
3859 val_tmp = val_old ^ mask;
3861 if (is_lbr_from(msr))
3862 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
3864 if (wrmsrl_safe(msr, val_tmp) ||
3865 rdmsrl_safe(msr, &val_new))
3869 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
3870 * should equal rdmsrl()'s even with the quirk.
3872 if (val_new != val_tmp)
3875 if (is_lbr_from(msr))
3876 val_old = lbr_from_signext_quirk_wr(val_old);
3878 /* Here it's sure that the MSR can be safely accessed.
3879 * Restore the old value and return.
3881 wrmsrl(msr, val_old);
3886 static __init void intel_sandybridge_quirk(void)
3888 x86_pmu.check_microcode = intel_snb_check_microcode;
3890 intel_snb_check_microcode();
3894 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
3895 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
3896 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
3897 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
3898 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
3899 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
3900 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
3901 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
3904 static __init void intel_arch_events_quirk(void)
3908 /* disable event that reported as not presend by cpuid */
3909 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
3910 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
3911 pr_warn("CPUID marked event: \'%s\' unavailable\n",
3912 intel_arch_events_map[bit].name);
3916 static __init void intel_nehalem_quirk(void)
3918 union cpuid10_ebx ebx;
3920 ebx.full = x86_pmu.events_maskl;
3921 if (ebx.split.no_branch_misses_retired) {
3923 * Erratum AAJ80 detected, we work it around by using
3924 * the BR_MISP_EXEC.ANY event. This will over-count
3925 * branch-misses, but it's still much better than the
3926 * architectural event which is often completely bogus:
3928 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
3929 ebx.split.no_branch_misses_retired = 0;
3930 x86_pmu.events_maskl = ebx.full;
3931 pr_info("CPU erratum AAJ80 worked around\n");
3935 static const struct x86_cpu_desc counter_freezing_ucodes[] = {
3936 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
3937 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
3938 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
3939 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028),
3940 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
3941 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
3945 static bool intel_counter_freezing_broken(void)
3947 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
3950 static __init void intel_counter_freezing_quirk(void)
3952 /* Check if it's already disabled */
3953 if (disable_counter_freezing)
3957 * If the system starts with the wrong ucode, leave the
3958 * counter-freezing feature permanently disabled.
3960 if (intel_counter_freezing_broken()) {
3961 pr_info("PMU counter freezing disabled due to CPU errata,"
3962 "please upgrade microcode\n");
3963 x86_pmu.counter_freezing = false;
3964 x86_pmu.handle_irq = intel_pmu_handle_irq;
3969 * enable software workaround for errata:
3974 * Only needed when HT is enabled. However detecting
3975 * if HT is enabled is difficult (model specific). So instead,
3976 * we enable the workaround in the early boot, and verify if
3977 * it is needed in a later initcall phase once we have valid
3978 * topology information to check if HT is actually enabled
3980 static __init void intel_ht_bug(void)
3982 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
3984 x86_pmu.start_scheduling = intel_start_scheduling;
3985 x86_pmu.commit_scheduling = intel_commit_scheduling;
3986 x86_pmu.stop_scheduling = intel_stop_scheduling;
3989 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
3990 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
3992 /* Haswell special events */
3993 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
3994 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
3995 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
3996 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
3997 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
3998 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
3999 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4000 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4001 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4002 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4003 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4004 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4006 static struct attribute *hsw_events_attrs[] = {
4007 EVENT_PTR(td_slots_issued),
4008 EVENT_PTR(td_slots_retired),
4009 EVENT_PTR(td_fetch_bubbles),
4010 EVENT_PTR(td_total_slots),
4011 EVENT_PTR(td_total_slots_scale),
4012 EVENT_PTR(td_recovery_bubbles),
4013 EVENT_PTR(td_recovery_bubbles_scale),
4017 static struct attribute *hsw_mem_events_attrs[] = {
4018 EVENT_PTR(mem_ld_hsw),
4019 EVENT_PTR(mem_st_hsw),
4023 static struct attribute *hsw_tsx_events_attrs[] = {
4024 EVENT_PTR(tx_start),
4025 EVENT_PTR(tx_commit),
4026 EVENT_PTR(tx_abort),
4027 EVENT_PTR(tx_capacity),
4028 EVENT_PTR(tx_conflict),
4029 EVENT_PTR(el_start),
4030 EVENT_PTR(el_commit),
4031 EVENT_PTR(el_abort),
4032 EVENT_PTR(el_capacity),
4033 EVENT_PTR(el_conflict),
4034 EVENT_PTR(cycles_t),
4035 EVENT_PTR(cycles_ct),
4039 static ssize_t freeze_on_smi_show(struct device *cdev,
4040 struct device_attribute *attr,
4043 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4046 static DEFINE_MUTEX(freeze_on_smi_mutex);
4048 static ssize_t freeze_on_smi_store(struct device *cdev,
4049 struct device_attribute *attr,
4050 const char *buf, size_t count)
4055 ret = kstrtoul(buf, 0, &val);
4062 mutex_lock(&freeze_on_smi_mutex);
4064 if (x86_pmu.attr_freeze_on_smi == val)
4067 x86_pmu.attr_freeze_on_smi = val;
4070 on_each_cpu(flip_smm_bit, &val, 1);
4073 mutex_unlock(&freeze_on_smi_mutex);
4078 static DEVICE_ATTR_RW(freeze_on_smi);
4080 static ssize_t branches_show(struct device *cdev,
4081 struct device_attribute *attr,
4084 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4087 static DEVICE_ATTR_RO(branches);
4089 static struct attribute *lbr_attrs[] = {
4090 &dev_attr_branches.attr,
4094 static char pmu_name_str[30];
4096 static ssize_t pmu_name_show(struct device *cdev,
4097 struct device_attribute *attr,
4100 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4103 static DEVICE_ATTR_RO(pmu_name);
4105 static struct attribute *intel_pmu_caps_attrs[] = {
4106 &dev_attr_pmu_name.attr,
4110 static struct attribute *intel_pmu_attrs[] = {
4111 &dev_attr_freeze_on_smi.attr,
4115 static __init struct attribute **
4116 get_events_attrs(struct attribute **base,
4117 struct attribute **mem,
4118 struct attribute **tsx)
4120 struct attribute **attrs = base;
4121 struct attribute **old;
4123 if (mem && x86_pmu.pebs)
4124 attrs = merge_attr(attrs, mem);
4126 if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
4128 attrs = merge_attr(attrs, tsx);
4136 __init int intel_pmu_init(void)
4138 struct attribute **extra_attr = NULL;
4139 struct attribute **mem_attr = NULL;
4140 struct attribute **tsx_attr = NULL;
4141 struct attribute **to_free = NULL;
4142 union cpuid10_edx edx;
4143 union cpuid10_eax eax;
4144 union cpuid10_ebx ebx;
4145 struct event_constraint *c;
4146 unsigned int unused;
4147 struct extra_reg *er;
4151 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4152 switch (boot_cpu_data.x86) {
4154 return p6_pmu_init();
4156 return knc_pmu_init();
4158 return p4_pmu_init();
4164 * Check whether the Architectural PerfMon supports
4165 * Branch Misses Retired hw_event or not.
4167 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4168 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4171 version = eax.split.version_id;
4175 x86_pmu = intel_pmu;
4177 x86_pmu.version = version;
4178 x86_pmu.num_counters = eax.split.num_counters;
4179 x86_pmu.cntval_bits = eax.split.bit_width;
4180 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4182 x86_pmu.events_maskl = ebx.full;
4183 x86_pmu.events_mask_len = eax.split.mask_length;
4185 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4188 * Quirk: v2 perfmon does not report fixed-purpose events, so
4189 * assume at least 3 events, when not running in a hypervisor:
4192 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4194 x86_pmu.num_counters_fixed =
4195 max((int)edx.split.num_counters_fixed, assume);
4199 x86_pmu.counter_freezing = !disable_counter_freezing;
4201 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4204 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4205 x86_pmu.intel_cap.capabilities = capabilities;
4210 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4213 * Install the hw-cache-events table:
4215 switch (boot_cpu_data.x86_model) {
4216 case INTEL_FAM6_CORE_YONAH:
4217 pr_cont("Core events, ");
4221 case INTEL_FAM6_CORE2_MEROM:
4222 x86_add_quirk(intel_clovertown_quirk);
4223 case INTEL_FAM6_CORE2_MEROM_L:
4224 case INTEL_FAM6_CORE2_PENRYN:
4225 case INTEL_FAM6_CORE2_DUNNINGTON:
4226 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4227 sizeof(hw_cache_event_ids));
4229 intel_pmu_lbr_init_core();
4231 x86_pmu.event_constraints = intel_core2_event_constraints;
4232 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4233 pr_cont("Core2 events, ");
4237 case INTEL_FAM6_NEHALEM:
4238 case INTEL_FAM6_NEHALEM_EP:
4239 case INTEL_FAM6_NEHALEM_EX:
4240 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4241 sizeof(hw_cache_event_ids));
4242 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4243 sizeof(hw_cache_extra_regs));
4245 intel_pmu_lbr_init_nhm();
4247 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4248 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4249 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4250 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4252 mem_attr = nhm_mem_events_attrs;
4254 /* UOPS_ISSUED.STALLED_CYCLES */
4255 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4256 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4257 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4258 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4259 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4261 intel_pmu_pebs_data_source_nhm();
4262 x86_add_quirk(intel_nehalem_quirk);
4263 x86_pmu.pebs_no_tlb = 1;
4264 extra_attr = nhm_format_attr;
4266 pr_cont("Nehalem events, ");
4270 case INTEL_FAM6_ATOM_BONNELL:
4271 case INTEL_FAM6_ATOM_BONNELL_MID:
4272 case INTEL_FAM6_ATOM_SALTWELL:
4273 case INTEL_FAM6_ATOM_SALTWELL_MID:
4274 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4275 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4276 sizeof(hw_cache_event_ids));
4278 intel_pmu_lbr_init_atom();
4280 x86_pmu.event_constraints = intel_gen_event_constraints;
4281 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4282 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4283 pr_cont("Atom events, ");
4287 case INTEL_FAM6_ATOM_SILVERMONT:
4288 case INTEL_FAM6_ATOM_SILVERMONT_X:
4289 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4290 case INTEL_FAM6_ATOM_AIRMONT:
4291 case INTEL_FAM6_ATOM_AIRMONT_MID:
4292 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4293 sizeof(hw_cache_event_ids));
4294 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4295 sizeof(hw_cache_extra_regs));
4297 intel_pmu_lbr_init_slm();
4299 x86_pmu.event_constraints = intel_slm_event_constraints;
4300 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4301 x86_pmu.extra_regs = intel_slm_extra_regs;
4302 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4303 x86_pmu.cpu_events = slm_events_attrs;
4304 extra_attr = slm_format_attr;
4305 pr_cont("Silvermont events, ");
4306 name = "silvermont";
4309 case INTEL_FAM6_ATOM_GOLDMONT:
4310 case INTEL_FAM6_ATOM_GOLDMONT_X:
4311 x86_add_quirk(intel_counter_freezing_quirk);
4312 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4313 sizeof(hw_cache_event_ids));
4314 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4315 sizeof(hw_cache_extra_regs));
4317 intel_pmu_lbr_init_skl();
4319 x86_pmu.event_constraints = intel_slm_event_constraints;
4320 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4321 x86_pmu.extra_regs = intel_glm_extra_regs;
4323 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4324 * for precise cycles.
4325 * :pp is identical to :ppp
4327 x86_pmu.pebs_aliases = NULL;
4328 x86_pmu.pebs_prec_dist = true;
4329 x86_pmu.lbr_pt_coexist = true;
4330 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4331 x86_pmu.cpu_events = glm_events_attrs;
4332 extra_attr = slm_format_attr;
4333 pr_cont("Goldmont events, ");
4337 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4338 x86_add_quirk(intel_counter_freezing_quirk);
4339 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4340 sizeof(hw_cache_event_ids));
4341 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4342 sizeof(hw_cache_extra_regs));
4344 intel_pmu_lbr_init_skl();
4346 x86_pmu.event_constraints = intel_slm_event_constraints;
4347 x86_pmu.extra_regs = intel_glm_extra_regs;
4349 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4350 * for precise cycles.
4352 x86_pmu.pebs_aliases = NULL;
4353 x86_pmu.pebs_prec_dist = true;
4354 x86_pmu.lbr_pt_coexist = true;
4355 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4356 x86_pmu.flags |= PMU_FL_PEBS_ALL;
4357 x86_pmu.get_event_constraints = glp_get_event_constraints;
4358 x86_pmu.cpu_events = glm_events_attrs;
4359 /* Goldmont Plus has 4-wide pipeline */
4360 event_attr_td_total_slots_scale_glm.event_str = "4";
4361 extra_attr = slm_format_attr;
4362 pr_cont("Goldmont plus events, ");
4363 name = "goldmont_plus";
4366 case INTEL_FAM6_WESTMERE:
4367 case INTEL_FAM6_WESTMERE_EP:
4368 case INTEL_FAM6_WESTMERE_EX:
4369 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4370 sizeof(hw_cache_event_ids));
4371 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4372 sizeof(hw_cache_extra_regs));
4374 intel_pmu_lbr_init_nhm();
4376 x86_pmu.event_constraints = intel_westmere_event_constraints;
4377 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4378 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4379 x86_pmu.extra_regs = intel_westmere_extra_regs;
4380 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4382 mem_attr = nhm_mem_events_attrs;
4384 /* UOPS_ISSUED.STALLED_CYCLES */
4385 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4386 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4387 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4388 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4389 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4391 intel_pmu_pebs_data_source_nhm();
4392 extra_attr = nhm_format_attr;
4393 pr_cont("Westmere events, ");
4397 case INTEL_FAM6_SANDYBRIDGE:
4398 case INTEL_FAM6_SANDYBRIDGE_X:
4399 x86_add_quirk(intel_sandybridge_quirk);
4400 x86_add_quirk(intel_ht_bug);
4401 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4402 sizeof(hw_cache_event_ids));
4403 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4404 sizeof(hw_cache_extra_regs));
4406 intel_pmu_lbr_init_snb();
4408 x86_pmu.event_constraints = intel_snb_event_constraints;
4409 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4410 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4411 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4412 x86_pmu.extra_regs = intel_snbep_extra_regs;
4414 x86_pmu.extra_regs = intel_snb_extra_regs;
4417 /* all extra regs are per-cpu when HT is on */
4418 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4419 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4421 x86_pmu.cpu_events = snb_events_attrs;
4422 mem_attr = snb_mem_events_attrs;
4424 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4425 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4426 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4427 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4428 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4429 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4431 extra_attr = nhm_format_attr;
4433 pr_cont("SandyBridge events, ");
4434 name = "sandybridge";
4437 case INTEL_FAM6_IVYBRIDGE:
4438 case INTEL_FAM6_IVYBRIDGE_X:
4439 x86_add_quirk(intel_ht_bug);
4440 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4441 sizeof(hw_cache_event_ids));
4442 /* dTLB-load-misses on IVB is different than SNB */
4443 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4445 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4446 sizeof(hw_cache_extra_regs));
4448 intel_pmu_lbr_init_snb();
4450 x86_pmu.event_constraints = intel_ivb_event_constraints;
4451 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4452 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4453 x86_pmu.pebs_prec_dist = true;
4454 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4455 x86_pmu.extra_regs = intel_snbep_extra_regs;
4457 x86_pmu.extra_regs = intel_snb_extra_regs;
4458 /* all extra regs are per-cpu when HT is on */
4459 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4460 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4462 x86_pmu.cpu_events = snb_events_attrs;
4463 mem_attr = snb_mem_events_attrs;
4465 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4466 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4467 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4469 extra_attr = nhm_format_attr;
4471 pr_cont("IvyBridge events, ");
4476 case INTEL_FAM6_HASWELL_CORE:
4477 case INTEL_FAM6_HASWELL_X:
4478 case INTEL_FAM6_HASWELL_ULT:
4479 case INTEL_FAM6_HASWELL_GT3E:
4480 x86_add_quirk(intel_ht_bug);
4481 x86_add_quirk(intel_pebs_isolation_quirk);
4482 x86_pmu.late_ack = true;
4483 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4484 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4486 intel_pmu_lbr_init_hsw();
4488 x86_pmu.event_constraints = intel_hsw_event_constraints;
4489 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4490 x86_pmu.extra_regs = intel_snbep_extra_regs;
4491 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4492 x86_pmu.pebs_prec_dist = true;
4493 /* all extra regs are per-cpu when HT is on */
4494 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4495 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4497 x86_pmu.hw_config = hsw_hw_config;
4498 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4499 x86_pmu.cpu_events = hsw_events_attrs;
4500 x86_pmu.lbr_double_abort = true;
4501 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4502 hsw_format_attr : nhm_format_attr;
4503 mem_attr = hsw_mem_events_attrs;
4504 tsx_attr = hsw_tsx_events_attrs;
4505 pr_cont("Haswell events, ");
4509 case INTEL_FAM6_BROADWELL_CORE:
4510 case INTEL_FAM6_BROADWELL_XEON_D:
4511 case INTEL_FAM6_BROADWELL_GT3E:
4512 case INTEL_FAM6_BROADWELL_X:
4513 x86_add_quirk(intel_pebs_isolation_quirk);
4514 x86_pmu.late_ack = true;
4515 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4516 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4518 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4519 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4520 BDW_L3_MISS|HSW_SNOOP_DRAM;
4521 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4523 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4524 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4525 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4526 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4528 intel_pmu_lbr_init_hsw();
4530 x86_pmu.event_constraints = intel_bdw_event_constraints;
4531 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4532 x86_pmu.extra_regs = intel_snbep_extra_regs;
4533 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4534 x86_pmu.pebs_prec_dist = true;
4535 /* all extra regs are per-cpu when HT is on */
4536 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4537 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4539 x86_pmu.hw_config = hsw_hw_config;
4540 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4541 x86_pmu.cpu_events = hsw_events_attrs;
4542 x86_pmu.limit_period = bdw_limit_period;
4543 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4544 hsw_format_attr : nhm_format_attr;
4545 mem_attr = hsw_mem_events_attrs;
4546 tsx_attr = hsw_tsx_events_attrs;
4547 pr_cont("Broadwell events, ");
4551 case INTEL_FAM6_XEON_PHI_KNL:
4552 case INTEL_FAM6_XEON_PHI_KNM:
4553 memcpy(hw_cache_event_ids,
4554 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4555 memcpy(hw_cache_extra_regs,
4556 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4557 intel_pmu_lbr_init_knl();
4559 x86_pmu.event_constraints = intel_slm_event_constraints;
4560 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4561 x86_pmu.extra_regs = intel_knl_extra_regs;
4563 /* all extra regs are per-cpu when HT is on */
4564 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4565 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4566 extra_attr = slm_format_attr;
4567 pr_cont("Knights Landing/Mill events, ");
4568 name = "knights-landing";
4571 case INTEL_FAM6_SKYLAKE_MOBILE:
4572 case INTEL_FAM6_SKYLAKE_DESKTOP:
4573 case INTEL_FAM6_SKYLAKE_X:
4574 case INTEL_FAM6_KABYLAKE_MOBILE:
4575 case INTEL_FAM6_KABYLAKE_DESKTOP:
4576 x86_add_quirk(intel_pebs_isolation_quirk);
4577 x86_pmu.late_ack = true;
4578 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4579 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4580 intel_pmu_lbr_init_skl();
4582 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4583 event_attr_td_recovery_bubbles.event_str_noht =
4584 "event=0xd,umask=0x1,cmask=1";
4585 event_attr_td_recovery_bubbles.event_str_ht =
4586 "event=0xd,umask=0x1,cmask=1,any=1";
4588 x86_pmu.event_constraints = intel_skl_event_constraints;
4589 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4590 x86_pmu.extra_regs = intel_skl_extra_regs;
4591 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4592 x86_pmu.pebs_prec_dist = true;
4593 /* all extra regs are per-cpu when HT is on */
4594 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4595 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4597 x86_pmu.hw_config = hsw_hw_config;
4598 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4599 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4600 hsw_format_attr : nhm_format_attr;
4601 extra_attr = merge_attr(extra_attr, skl_format_attr);
4602 to_free = extra_attr;
4603 x86_pmu.cpu_events = hsw_events_attrs;
4604 mem_attr = hsw_mem_events_attrs;
4605 tsx_attr = hsw_tsx_events_attrs;
4606 intel_pmu_pebs_data_source_skl(
4607 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4608 pr_cont("Skylake events, ");
4613 switch (x86_pmu.version) {
4615 x86_pmu.event_constraints = intel_v1_event_constraints;
4616 pr_cont("generic architected perfmon v1, ");
4617 name = "generic_arch_v1";
4621 * default constraints for v2 and up
4623 x86_pmu.event_constraints = intel_gen_event_constraints;
4624 pr_cont("generic architected perfmon, ");
4625 name = "generic_arch_v2+";
4630 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
4632 if (version >= 2 && extra_attr) {
4633 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4635 WARN_ON(!x86_pmu.format_attrs);
4638 x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
4639 mem_attr, tsx_attr);
4641 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
4642 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
4643 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
4644 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
4646 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
4648 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
4649 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
4650 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
4651 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
4654 x86_pmu.intel_ctrl |=
4655 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
4657 if (x86_pmu.event_constraints) {
4659 * event on fixed counter2 (REF_CYCLES) only works on this
4660 * counter, so do not extend mask to generic counters
4662 for_each_event_constraint(c, x86_pmu.event_constraints) {
4663 if (c->cmask == FIXED_EVENT_FLAGS
4664 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
4665 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
4668 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
4669 c->weight = hweight64(c->idxmsk64);
4674 * Access LBR MSR may cause #GP under certain circumstances.
4675 * E.g. KVM doesn't support LBR MSR
4676 * Check all LBT MSR here.
4677 * Disable LBR access if any LBR MSRs can not be accessed.
4679 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
4681 for (i = 0; i < x86_pmu.lbr_nr; i++) {
4682 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
4683 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
4687 x86_pmu.caps_attrs = intel_pmu_caps_attrs;
4689 if (x86_pmu.lbr_nr) {
4690 x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
4691 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
4695 * Access extra MSR may cause #GP under certain circumstances.
4696 * E.g. KVM doesn't support offcore event
4697 * Check all extra_regs here.
4699 if (x86_pmu.extra_regs) {
4700 for (er = x86_pmu.extra_regs; er->msr; er++) {
4701 er->extra_msr_access = check_msr(er->msr, 0x11UL);
4702 /* Disable LBR select mapping */
4703 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
4704 x86_pmu.lbr_sel_map = NULL;
4708 /* Support full width counters using alternative MSR range */
4709 if (x86_pmu.intel_cap.full_width_write) {
4710 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
4711 x86_pmu.perfctr = MSR_IA32_PMC0;
4712 pr_cont("full-width counters, ");
4716 * For arch perfmon 4 use counter freezing to avoid
4717 * several MSR accesses in the PMI.
4719 if (x86_pmu.counter_freezing)
4720 x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
4727 * HT bug: phase 2 init
4728 * Called once we have valid topology information to check
4729 * whether or not HT is enabled
4730 * If HT is off, then we disable the workaround
4732 static __init int fixup_ht_bug(void)
4736 * problem not present on this CPU model, nothing to do
4738 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
4741 if (topology_max_smt_threads() > 1) {
4742 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
4748 hardlockup_detector_perf_stop();
4750 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
4752 x86_pmu.start_scheduling = NULL;
4753 x86_pmu.commit_scheduling = NULL;
4754 x86_pmu.stop_scheduling = NULL;
4756 hardlockup_detector_perf_restart();
4758 for_each_online_cpu(c)
4762 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
4765 subsys_initcall(fixup_ht_bug)