Merge remote-tracking branch 'origin/master' into perf/core
[sfrench/cifs-2.6.git] / arch / x86 / events / intel / uncore_snb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3 #include "uncore.h"
4
5 /* Uncore IMC PCI IDs */
6 #define PCI_DEVICE_ID_INTEL_SNB_IMC             0x0100
7 #define PCI_DEVICE_ID_INTEL_IVB_IMC             0x0154
8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC          0x0150
9 #define PCI_DEVICE_ID_INTEL_HSW_IMC             0x0c00
10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC           0x0a04
11 #define PCI_DEVICE_ID_INTEL_BDW_IMC             0x1604
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC           0x1904
13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC           0x190c
14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC          0x1900
15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC          0x1910
16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC          0x190f
17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC          0x191f
18 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC          0x1918
19 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC           0x590c
20 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC           0x5904
21 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC          0x5914
22 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC          0x590f
23 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC          0x591f
24 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC          0x5910
25 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC          0x5918
26 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC          0x3ecc
27 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC          0x3ed0
28 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC          0x3e10
29 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC          0x3ec4
30 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC        0x3e0f
31 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC        0x3e1f
32 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC        0x3ec2
33 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC        0x3e30
34 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC        0x3e18
35 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC        0x3ec6
36 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC        0x3e31
37 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC        0x3e33
38 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC        0x3eca
39 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC        0x3e32
40 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC          0x590c
41 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC          0x590d
42 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC          0x3ed0
43 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC        0x3e34
44 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC          0x3e35
45 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC          0x9b44
46 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC          0x9b54
47 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC          0x9b64
48 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC          0x9b51
49 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC          0x9b61
50 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC          0x9b71
51 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC          0x9b33
52 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC          0x9b43
53 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC          0x9b53
54 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC          0x9b63
55 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC          0x9b73
56 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC           0x8a02
57 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC          0x8a12
58 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC          0x9a02
59 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC          0x9a04
60 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC          0x9a12
61 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC          0x9a14
62 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC           0x9a36
63 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC           0x4c43
64 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC           0x4c53
65
66 /* SNB event control */
67 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
68 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
69 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
70 #define SNB_UNC_CTL_EN                          (1 << 22)
71 #define SNB_UNC_CTL_INVERT                      (1 << 23)
72 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
73 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
74 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
75
76 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
77                                                  SNB_UNC_CTL_UMASK_MASK | \
78                                                  SNB_UNC_CTL_EDGE_DET | \
79                                                  SNB_UNC_CTL_INVERT | \
80                                                  SNB_UNC_CTL_CMASK_MASK)
81
82 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
83                                                  SNB_UNC_CTL_UMASK_MASK | \
84                                                  SNB_UNC_CTL_EDGE_DET | \
85                                                  SNB_UNC_CTL_INVERT | \
86                                                  NHM_UNC_CTL_CMASK_MASK)
87
88 /* SNB global control register */
89 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
90 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
91 #define SNB_UNC_FIXED_CTR                       0x395
92
93 /* SNB uncore global control */
94 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
95 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
96
97 /* SNB Cbo register */
98 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
99 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
100 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
101
102 /* SNB ARB register */
103 #define SNB_UNC_ARB_PER_CTR0                    0x3b0
104 #define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
105 #define SNB_UNC_ARB_MSR_OFFSET                  0x10
106
107 /* NHM global control register */
108 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
109 #define NHM_UNC_FIXED_CTR                       0x394
110 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
111
112 /* NHM uncore global control */
113 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
114 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
115
116 /* NHM uncore register */
117 #define NHM_UNC_PERFEVTSEL0                     0x3c0
118 #define NHM_UNC_UNCORE_PMC0                     0x3b0
119
120 /* SKL uncore global control */
121 #define SKL_UNC_PERF_GLOBAL_CTL                 0xe01
122 #define SKL_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 5) - 1)
123
124 /* ICL Cbo register */
125 #define ICL_UNC_CBO_CONFIG                      0x396
126 #define ICL_UNC_NUM_CBO_MASK                    0xf
127 #define ICL_UNC_CBO_0_PER_CTR0                  0x702
128 #define ICL_UNC_CBO_MSR_OFFSET                  0x8
129
130 /* ICL ARB register */
131 #define ICL_UNC_ARB_PER_CTR                     0x3b1
132 #define ICL_UNC_ARB_PERFEVTSEL                  0x3b3
133
134 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
135 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
136 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
137 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
138 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
139 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
140
141 /* Sandy Bridge uncore support */
142 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
143 {
144         struct hw_perf_event *hwc = &event->hw;
145
146         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
147                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
148         else
149                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
150 }
151
152 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
153 {
154         wrmsrl(event->hw.config_base, 0);
155 }
156
157 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
158 {
159         if (box->pmu->pmu_idx == 0) {
160                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
161                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
162         }
163 }
164
165 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
166 {
167         wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
168                 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
169 }
170
171 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
172 {
173         if (box->pmu->pmu_idx == 0)
174                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
175 }
176
177 static struct uncore_event_desc snb_uncore_events[] = {
178         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
179         { /* end: all zeroes */ },
180 };
181
182 static struct attribute *snb_uncore_formats_attr[] = {
183         &format_attr_event.attr,
184         &format_attr_umask.attr,
185         &format_attr_edge.attr,
186         &format_attr_inv.attr,
187         &format_attr_cmask5.attr,
188         NULL,
189 };
190
191 static const struct attribute_group snb_uncore_format_group = {
192         .name           = "format",
193         .attrs          = snb_uncore_formats_attr,
194 };
195
196 static struct intel_uncore_ops snb_uncore_msr_ops = {
197         .init_box       = snb_uncore_msr_init_box,
198         .enable_box     = snb_uncore_msr_enable_box,
199         .exit_box       = snb_uncore_msr_exit_box,
200         .disable_event  = snb_uncore_msr_disable_event,
201         .enable_event   = snb_uncore_msr_enable_event,
202         .read_counter   = uncore_msr_read_counter,
203 };
204
205 static struct event_constraint snb_uncore_arb_constraints[] = {
206         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
207         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
208         EVENT_CONSTRAINT_END
209 };
210
211 static struct intel_uncore_type snb_uncore_cbox = {
212         .name           = "cbox",
213         .num_counters   = 2,
214         .num_boxes      = 4,
215         .perf_ctr_bits  = 44,
216         .fixed_ctr_bits = 48,
217         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
218         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
219         .fixed_ctr      = SNB_UNC_FIXED_CTR,
220         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
221         .single_fixed   = 1,
222         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
223         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
224         .ops            = &snb_uncore_msr_ops,
225         .format_group   = &snb_uncore_format_group,
226         .event_descs    = snb_uncore_events,
227 };
228
229 static struct intel_uncore_type snb_uncore_arb = {
230         .name           = "arb",
231         .num_counters   = 2,
232         .num_boxes      = 1,
233         .perf_ctr_bits  = 44,
234         .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
235         .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
236         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
237         .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
238         .constraints    = snb_uncore_arb_constraints,
239         .ops            = &snb_uncore_msr_ops,
240         .format_group   = &snb_uncore_format_group,
241 };
242
243 static struct intel_uncore_type *snb_msr_uncores[] = {
244         &snb_uncore_cbox,
245         &snb_uncore_arb,
246         NULL,
247 };
248
249 void snb_uncore_cpu_init(void)
250 {
251         uncore_msr_uncores = snb_msr_uncores;
252         if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
253                 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
254 }
255
256 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
257 {
258         if (box->pmu->pmu_idx == 0) {
259                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
260                         SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
261         }
262
263         /* The 8th CBOX has different MSR space */
264         if (box->pmu->pmu_idx == 7)
265                 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
266 }
267
268 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
269 {
270         wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
271                 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
272 }
273
274 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
275 {
276         if (box->pmu->pmu_idx == 0)
277                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
278 }
279
280 static struct intel_uncore_ops skl_uncore_msr_ops = {
281         .init_box       = skl_uncore_msr_init_box,
282         .enable_box     = skl_uncore_msr_enable_box,
283         .exit_box       = skl_uncore_msr_exit_box,
284         .disable_event  = snb_uncore_msr_disable_event,
285         .enable_event   = snb_uncore_msr_enable_event,
286         .read_counter   = uncore_msr_read_counter,
287 };
288
289 static struct intel_uncore_type skl_uncore_cbox = {
290         .name           = "cbox",
291         .num_counters   = 4,
292         .num_boxes      = 8,
293         .perf_ctr_bits  = 44,
294         .fixed_ctr_bits = 48,
295         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
296         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
297         .fixed_ctr      = SNB_UNC_FIXED_CTR,
298         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
299         .single_fixed   = 1,
300         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
301         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
302         .ops            = &skl_uncore_msr_ops,
303         .format_group   = &snb_uncore_format_group,
304         .event_descs    = snb_uncore_events,
305 };
306
307 static struct intel_uncore_type *skl_msr_uncores[] = {
308         &skl_uncore_cbox,
309         &snb_uncore_arb,
310         NULL,
311 };
312
313 void skl_uncore_cpu_init(void)
314 {
315         uncore_msr_uncores = skl_msr_uncores;
316         if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
317                 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
318         snb_uncore_arb.ops = &skl_uncore_msr_ops;
319 }
320
321 static struct intel_uncore_ops icl_uncore_msr_ops = {
322         .disable_event  = snb_uncore_msr_disable_event,
323         .enable_event   = snb_uncore_msr_enable_event,
324         .read_counter   = uncore_msr_read_counter,
325 };
326
327 static struct intel_uncore_type icl_uncore_cbox = {
328         .name           = "cbox",
329         .num_counters   = 2,
330         .perf_ctr_bits  = 44,
331         .perf_ctr       = ICL_UNC_CBO_0_PER_CTR0,
332         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
333         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
334         .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
335         .ops            = &icl_uncore_msr_ops,
336         .format_group   = &snb_uncore_format_group,
337 };
338
339 static struct uncore_event_desc icl_uncore_events[] = {
340         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
341         { /* end: all zeroes */ },
342 };
343
344 static struct attribute *icl_uncore_clock_formats_attr[] = {
345         &format_attr_event.attr,
346         NULL,
347 };
348
349 static struct attribute_group icl_uncore_clock_format_group = {
350         .name = "format",
351         .attrs = icl_uncore_clock_formats_attr,
352 };
353
354 static struct intel_uncore_type icl_uncore_clockbox = {
355         .name           = "clock",
356         .num_counters   = 1,
357         .num_boxes      = 1,
358         .fixed_ctr_bits = 48,
359         .fixed_ctr      = SNB_UNC_FIXED_CTR,
360         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
361         .single_fixed   = 1,
362         .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
363         .format_group   = &icl_uncore_clock_format_group,
364         .ops            = &icl_uncore_msr_ops,
365         .event_descs    = icl_uncore_events,
366 };
367
368 static struct intel_uncore_type icl_uncore_arb = {
369         .name           = "arb",
370         .num_counters   = 1,
371         .num_boxes      = 1,
372         .perf_ctr_bits  = 44,
373         .perf_ctr       = ICL_UNC_ARB_PER_CTR,
374         .event_ctl      = ICL_UNC_ARB_PERFEVTSEL,
375         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
376         .ops            = &icl_uncore_msr_ops,
377         .format_group   = &snb_uncore_format_group,
378 };
379
380 static struct intel_uncore_type *icl_msr_uncores[] = {
381         &icl_uncore_cbox,
382         &icl_uncore_arb,
383         &icl_uncore_clockbox,
384         NULL,
385 };
386
387 static int icl_get_cbox_num(void)
388 {
389         u64 num_boxes;
390
391         rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
392
393         return num_boxes & ICL_UNC_NUM_CBO_MASK;
394 }
395
396 void icl_uncore_cpu_init(void)
397 {
398         uncore_msr_uncores = icl_msr_uncores;
399         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
400 }
401
402 static struct intel_uncore_type *tgl_msr_uncores[] = {
403         &icl_uncore_cbox,
404         &snb_uncore_arb,
405         &icl_uncore_clockbox,
406         NULL,
407 };
408
409 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
410 {
411         if (box->pmu->pmu_idx == 0)
412                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
413 }
414
415 void tgl_uncore_cpu_init(void)
416 {
417         uncore_msr_uncores = tgl_msr_uncores;
418         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
419         icl_uncore_cbox.ops = &skl_uncore_msr_ops;
420         icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
421         snb_uncore_arb.ops = &skl_uncore_msr_ops;
422         skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
423 }
424
425 enum {
426         SNB_PCI_UNCORE_IMC,
427 };
428
429 static struct uncore_event_desc snb_uncore_imc_events[] = {
430         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
431         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
432         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
433
434         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
435         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
436         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
437
438         INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
439         INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
440         INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
441
442         INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
443         INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
444         INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
445
446         INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
447         INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
448         INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
449
450         { /* end: all zeroes */ },
451 };
452
453 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
454 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
455
456 /* page size multiple covering all config regs */
457 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
458
459 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
460 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
461 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
462 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
463 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
464
465 /* BW break down- legacy counters */
466 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS          0x3
467 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE     0x5040
468 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS          0x4
469 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE     0x5044
470 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS          0x5
471 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE     0x5048
472
473 enum perf_snb_uncore_imc_freerunning_types {
474         SNB_PCI_UNCORE_IMC_DATA_READS           = 0,
475         SNB_PCI_UNCORE_IMC_DATA_WRITES,
476         SNB_PCI_UNCORE_IMC_GT_REQUESTS,
477         SNB_PCI_UNCORE_IMC_IA_REQUESTS,
478         SNB_PCI_UNCORE_IMC_IO_REQUESTS,
479
480         SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
481 };
482
483 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
484         [SNB_PCI_UNCORE_IMC_DATA_READS]         = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
485                                                         0x0, 0x0, 1, 32 },
486         [SNB_PCI_UNCORE_IMC_DATA_WRITES]        = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
487                                                         0x0, 0x0, 1, 32 },
488         [SNB_PCI_UNCORE_IMC_GT_REQUESTS]        = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
489                                                         0x0, 0x0, 1, 32 },
490         [SNB_PCI_UNCORE_IMC_IA_REQUESTS]        = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
491                                                         0x0, 0x0, 1, 32 },
492         [SNB_PCI_UNCORE_IMC_IO_REQUESTS]        = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
493                                                         0x0, 0x0, 1, 32 },
494 };
495
496 static struct attribute *snb_uncore_imc_formats_attr[] = {
497         &format_attr_event.attr,
498         NULL,
499 };
500
501 static const struct attribute_group snb_uncore_imc_format_group = {
502         .name = "format",
503         .attrs = snb_uncore_imc_formats_attr,
504 };
505
506 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
507 {
508         struct intel_uncore_type *type = box->pmu->type;
509         struct pci_dev *pdev = box->pci_dev;
510         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
511         resource_size_t addr;
512         u32 pci_dword;
513
514         pci_read_config_dword(pdev, where, &pci_dword);
515         addr = pci_dword;
516
517 #ifdef CONFIG_PHYS_ADDR_T_64BIT
518         pci_read_config_dword(pdev, where + 4, &pci_dword);
519         addr |= ((resource_size_t)pci_dword << 32);
520 #endif
521
522         addr &= ~(PAGE_SIZE - 1);
523
524         box->io_addr = ioremap(addr, type->mmio_map_size);
525         if (!box->io_addr)
526                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
527
528         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
529 }
530
531 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
532 {}
533
534 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
535 {}
536
537 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
538 {}
539
540 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
541 {}
542
543 /*
544  * Keep the custom event_init() function compatible with old event
545  * encoding for free running counters.
546  */
547 static int snb_uncore_imc_event_init(struct perf_event *event)
548 {
549         struct intel_uncore_pmu *pmu;
550         struct intel_uncore_box *box;
551         struct hw_perf_event *hwc = &event->hw;
552         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
553         int idx, base;
554
555         if (event->attr.type != event->pmu->type)
556                 return -ENOENT;
557
558         pmu = uncore_event_to_pmu(event);
559         /* no device found for this pmu */
560         if (pmu->func_id < 0)
561                 return -ENOENT;
562
563         /* Sampling not supported yet */
564         if (hwc->sample_period)
565                 return -EINVAL;
566
567         /* unsupported modes and filters */
568         if (event->attr.sample_period) /* no sampling */
569                 return -EINVAL;
570
571         /*
572          * Place all uncore events for a particular physical package
573          * onto a single cpu
574          */
575         if (event->cpu < 0)
576                 return -EINVAL;
577
578         /* check only supported bits are set */
579         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
580                 return -EINVAL;
581
582         box = uncore_pmu_to_box(pmu, event->cpu);
583         if (!box || box->cpu < 0)
584                 return -EINVAL;
585
586         event->cpu = box->cpu;
587         event->pmu_private = box;
588
589         event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
590
591         event->hw.idx = -1;
592         event->hw.last_tag = ~0ULL;
593         event->hw.extra_reg.idx = EXTRA_REG_NONE;
594         event->hw.branch_reg.idx = EXTRA_REG_NONE;
595         /*
596          * check event is known (whitelist, determines counter)
597          */
598         switch (cfg) {
599         case SNB_UNCORE_PCI_IMC_DATA_READS:
600                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
601                 idx = UNCORE_PMC_IDX_FREERUNNING;
602                 break;
603         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
604                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
605                 idx = UNCORE_PMC_IDX_FREERUNNING;
606                 break;
607         case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
608                 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
609                 idx = UNCORE_PMC_IDX_FREERUNNING;
610                 break;
611         case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
612                 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
613                 idx = UNCORE_PMC_IDX_FREERUNNING;
614                 break;
615         case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
616                 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
617                 idx = UNCORE_PMC_IDX_FREERUNNING;
618                 break;
619         default:
620                 return -EINVAL;
621         }
622
623         /* must be done before validate_group */
624         event->hw.event_base = base;
625         event->hw.idx = idx;
626
627         /* Convert to standard encoding format for freerunning counters */
628         event->hw.config = ((cfg - 1) << 8) | 0x10ff;
629
630         /* no group validation needed, we have free running counters */
631
632         return 0;
633 }
634
635 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
636 {
637         return 0;
638 }
639
640 int snb_pci2phy_map_init(int devid)
641 {
642         struct pci_dev *dev = NULL;
643         struct pci2phy_map *map;
644         int bus, segment;
645
646         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
647         if (!dev)
648                 return -ENOTTY;
649
650         bus = dev->bus->number;
651         segment = pci_domain_nr(dev->bus);
652
653         raw_spin_lock(&pci2phy_map_lock);
654         map = __find_pci2phy_map(segment);
655         if (!map) {
656                 raw_spin_unlock(&pci2phy_map_lock);
657                 pci_dev_put(dev);
658                 return -ENOMEM;
659         }
660         map->pbus_to_physid[bus] = 0;
661         raw_spin_unlock(&pci2phy_map_lock);
662
663         pci_dev_put(dev);
664
665         return 0;
666 }
667
668 static struct pmu snb_uncore_imc_pmu = {
669         .task_ctx_nr    = perf_invalid_context,
670         .event_init     = snb_uncore_imc_event_init,
671         .add            = uncore_pmu_event_add,
672         .del            = uncore_pmu_event_del,
673         .start          = uncore_pmu_event_start,
674         .stop           = uncore_pmu_event_stop,
675         .read           = uncore_pmu_event_read,
676         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
677 };
678
679 static struct intel_uncore_ops snb_uncore_imc_ops = {
680         .init_box       = snb_uncore_imc_init_box,
681         .exit_box       = uncore_mmio_exit_box,
682         .enable_box     = snb_uncore_imc_enable_box,
683         .disable_box    = snb_uncore_imc_disable_box,
684         .disable_event  = snb_uncore_imc_disable_event,
685         .enable_event   = snb_uncore_imc_enable_event,
686         .hw_config      = snb_uncore_imc_hw_config,
687         .read_counter   = uncore_mmio_read_counter,
688 };
689
690 static struct intel_uncore_type snb_uncore_imc = {
691         .name           = "imc",
692         .num_counters   = 5,
693         .num_boxes      = 1,
694         .num_freerunning_types  = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
695         .mmio_map_size  = SNB_UNCORE_PCI_IMC_MAP_SIZE,
696         .freerunning    = snb_uncore_imc_freerunning,
697         .event_descs    = snb_uncore_imc_events,
698         .format_group   = &snb_uncore_imc_format_group,
699         .ops            = &snb_uncore_imc_ops,
700         .pmu            = &snb_uncore_imc_pmu,
701 };
702
703 static struct intel_uncore_type *snb_pci_uncores[] = {
704         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
705         NULL,
706 };
707
708 static const struct pci_device_id snb_uncore_pci_ids[] = {
709         { /* IMC */
710                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
711                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
712         },
713         { /* end: all zeroes */ },
714 };
715
716 static const struct pci_device_id ivb_uncore_pci_ids[] = {
717         { /* IMC */
718                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
719                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
720         },
721         { /* IMC */
722                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
723                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
724         },
725         { /* end: all zeroes */ },
726 };
727
728 static const struct pci_device_id hsw_uncore_pci_ids[] = {
729         { /* IMC */
730                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
731                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
732         },
733         { /* IMC */
734                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
735                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
736         },
737         { /* end: all zeroes */ },
738 };
739
740 static const struct pci_device_id bdw_uncore_pci_ids[] = {
741         { /* IMC */
742                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
743                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
744         },
745         { /* end: all zeroes */ },
746 };
747
748 static const struct pci_device_id skl_uncore_pci_ids[] = {
749         { /* IMC */
750                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
751                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
752         },
753         { /* IMC */
754                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
755                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
756         },
757         { /* IMC */
758                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
759                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
760         },
761         { /* IMC */
762                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
763                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
764         },
765         { /* IMC */
766                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
767                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
768         },
769         { /* IMC */
770                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
771                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
772         },
773         { /* IMC */
774                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
775                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
776         },
777         { /* IMC */
778                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
779                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
780         },
781         { /* IMC */
782                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
783                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
784         },
785         { /* IMC */
786                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
787                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
788         },
789         { /* IMC */
790                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
791                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
792         },
793         { /* IMC */
794                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
795                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
796         },
797         { /* IMC */
798                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
799                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
800         },
801         { /* IMC */
802                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
803                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
804         },
805         { /* IMC */
806                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
807                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
808         },
809         { /* IMC */
810                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
811                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
812         },
813         { /* IMC */
814                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
815                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
816         },
817         { /* IMC */
818                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
819                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
820         },
821         { /* IMC */
822                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
823                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
824         },
825         { /* IMC */
826                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
827                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
828         },
829         { /* IMC */
830                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
831                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
832         },
833         { /* IMC */
834                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
835                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
836         },
837         { /* IMC */
838                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
839                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
840         },
841         { /* IMC */
842                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
843                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
844         },
845         { /* IMC */
846                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
847                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
848         },
849         { /* IMC */
850                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
851                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
852         },
853         { /* IMC */
854                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
855                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
856         },
857         { /* IMC */
858                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
859                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
860         },
861         { /* IMC */
862                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
863                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
864         },
865         { /* IMC */
866                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
867                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
868         },
869         { /* IMC */
870                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
871                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
872         },
873         { /* IMC */
874                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
875                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
876         },
877         { /* IMC */
878                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
879                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
880         },
881         { /* IMC */
882                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC),
883                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
884         },
885         { /* IMC */
886                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC),
887                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
888         },
889         { /* IMC */
890                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC),
891                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
892         },
893         { /* IMC */
894                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC),
895                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
896         },
897         { /* IMC */
898                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC),
899                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
900         },
901         { /* IMC */
902                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC),
903                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
904         },
905         { /* IMC */
906                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC),
907                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
908         },
909         { /* IMC */
910                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC),
911                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
912         },
913         { /* IMC */
914                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC),
915                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
916         },
917         { /* IMC */
918                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC),
919                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
920         },
921         { /* IMC */
922                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC),
923                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
924         },
925         { /* end: all zeroes */ },
926 };
927
928 static const struct pci_device_id icl_uncore_pci_ids[] = {
929         { /* IMC */
930                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
931                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
932         },
933         { /* IMC */
934                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
935                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
936         },
937         { /* IMC */
938                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_1_IMC),
939                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
940         },
941         { /* IMC */
942                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_2_IMC),
943                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
944         },
945         { /* end: all zeroes */ },
946 };
947
948 static struct pci_driver snb_uncore_pci_driver = {
949         .name           = "snb_uncore",
950         .id_table       = snb_uncore_pci_ids,
951 };
952
953 static struct pci_driver ivb_uncore_pci_driver = {
954         .name           = "ivb_uncore",
955         .id_table       = ivb_uncore_pci_ids,
956 };
957
958 static struct pci_driver hsw_uncore_pci_driver = {
959         .name           = "hsw_uncore",
960         .id_table       = hsw_uncore_pci_ids,
961 };
962
963 static struct pci_driver bdw_uncore_pci_driver = {
964         .name           = "bdw_uncore",
965         .id_table       = bdw_uncore_pci_ids,
966 };
967
968 static struct pci_driver skl_uncore_pci_driver = {
969         .name           = "skl_uncore",
970         .id_table       = skl_uncore_pci_ids,
971 };
972
973 static struct pci_driver icl_uncore_pci_driver = {
974         .name           = "icl_uncore",
975         .id_table       = icl_uncore_pci_ids,
976 };
977
978 struct imc_uncore_pci_dev {
979         __u32 pci_id;
980         struct pci_driver *driver;
981 };
982 #define IMC_DEV(a, d) \
983         { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
984
985 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
986         IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
987         IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
988         IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
989         IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
990         IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
991         IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
992         IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
993         IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
994         IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
995         IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
996         IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
997         IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
998         IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
999         IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
1000         IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
1001         IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
1002         IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
1003         IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
1004         IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core H Quad Core */
1005         IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S 4 cores Work Station */
1006         IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
1007         IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
1008         IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
1009         IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
1010         IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
1011         IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
1012         IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
1013         IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
1014         IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
1015         IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
1016         IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
1017         IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
1018         IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
1019         IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
1020         IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Dual Core */
1021         IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Quad Core */
1022         IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Quad Core */
1023         IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U Mobile Quad Core */
1024         IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Dual Core */
1025         IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
1026         IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
1027         IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
1028         IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
1029         IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
1030         IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
1031         IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
1032         IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
1033         IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
1034         IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
1035         IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
1036         IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),     /* 10th Gen Core Mobile */
1037         IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),    /* 10th Gen Core Mobile */
1038         IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
1039         IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
1040         {  /* end marker */ }
1041 };
1042
1043
1044 #define for_each_imc_pci_id(x, t) \
1045         for (x = (t); (x)->pci_id; x++)
1046
1047 static struct pci_driver *imc_uncore_find_dev(void)
1048 {
1049         const struct imc_uncore_pci_dev *p;
1050         int ret;
1051
1052         for_each_imc_pci_id(p, desktop_imc_pci_ids) {
1053                 ret = snb_pci2phy_map_init(p->pci_id);
1054                 if (ret == 0)
1055                         return p->driver;
1056         }
1057         return NULL;
1058 }
1059
1060 static int imc_uncore_pci_init(void)
1061 {
1062         struct pci_driver *imc_drv = imc_uncore_find_dev();
1063
1064         if (!imc_drv)
1065                 return -ENODEV;
1066
1067         uncore_pci_uncores = snb_pci_uncores;
1068         uncore_pci_driver = imc_drv;
1069
1070         return 0;
1071 }
1072
1073 int snb_uncore_pci_init(void)
1074 {
1075         return imc_uncore_pci_init();
1076 }
1077
1078 int ivb_uncore_pci_init(void)
1079 {
1080         return imc_uncore_pci_init();
1081 }
1082 int hsw_uncore_pci_init(void)
1083 {
1084         return imc_uncore_pci_init();
1085 }
1086
1087 int bdw_uncore_pci_init(void)
1088 {
1089         return imc_uncore_pci_init();
1090 }
1091
1092 int skl_uncore_pci_init(void)
1093 {
1094         return imc_uncore_pci_init();
1095 }
1096
1097 /* end of Sandy Bridge uncore support */
1098
1099 /* Nehalem uncore support */
1100 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1101 {
1102         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1103 }
1104
1105 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1106 {
1107         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1108 }
1109
1110 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1111 {
1112         struct hw_perf_event *hwc = &event->hw;
1113
1114         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1115                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1116         else
1117                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1118 }
1119
1120 static struct attribute *nhm_uncore_formats_attr[] = {
1121         &format_attr_event.attr,
1122         &format_attr_umask.attr,
1123         &format_attr_edge.attr,
1124         &format_attr_inv.attr,
1125         &format_attr_cmask8.attr,
1126         NULL,
1127 };
1128
1129 static const struct attribute_group nhm_uncore_format_group = {
1130         .name = "format",
1131         .attrs = nhm_uncore_formats_attr,
1132 };
1133
1134 static struct uncore_event_desc nhm_uncore_events[] = {
1135         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
1136         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
1137         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
1138         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
1139         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
1140         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
1141         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1142         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
1143         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
1144         { /* end: all zeroes */ },
1145 };
1146
1147 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1148         .disable_box    = nhm_uncore_msr_disable_box,
1149         .enable_box     = nhm_uncore_msr_enable_box,
1150         .disable_event  = snb_uncore_msr_disable_event,
1151         .enable_event   = nhm_uncore_msr_enable_event,
1152         .read_counter   = uncore_msr_read_counter,
1153 };
1154
1155 static struct intel_uncore_type nhm_uncore = {
1156         .name           = "",
1157         .num_counters   = 8,
1158         .num_boxes      = 1,
1159         .perf_ctr_bits  = 48,
1160         .fixed_ctr_bits = 48,
1161         .event_ctl      = NHM_UNC_PERFEVTSEL0,
1162         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
1163         .fixed_ctr      = NHM_UNC_FIXED_CTR,
1164         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
1165         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
1166         .event_descs    = nhm_uncore_events,
1167         .ops            = &nhm_uncore_msr_ops,
1168         .format_group   = &nhm_uncore_format_group,
1169 };
1170
1171 static struct intel_uncore_type *nhm_msr_uncores[] = {
1172         &nhm_uncore,
1173         NULL,
1174 };
1175
1176 void nhm_uncore_cpu_init(void)
1177 {
1178         uncore_msr_uncores = nhm_msr_uncores;
1179 }
1180
1181 /* end of Nehalem uncore support */
1182
1183 /* Tiger Lake MMIO uncore support */
1184
1185 static const struct pci_device_id tgl_uncore_pci_ids[] = {
1186         { /* IMC */
1187                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC),
1188                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1189         },
1190         { /* IMC */
1191                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC),
1192                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1193         },
1194         { /* IMC */
1195                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC),
1196                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1197         },
1198         { /* IMC */
1199                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC),
1200                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1201         },
1202         { /* IMC */
1203                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
1204                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1205         },
1206         { /* end: all zeroes */ }
1207 };
1208
1209 enum perf_tgl_uncore_imc_freerunning_types {
1210         TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
1211         TGL_MMIO_UNCORE_IMC_DATA_READ,
1212         TGL_MMIO_UNCORE_IMC_DATA_WRITE,
1213         TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1214 };
1215
1216 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
1217         [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0x5040, 0x0, 0x0, 1, 64 },
1218         [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0x5058, 0x0, 0x0, 1, 64 },
1219         [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0x50A0, 0x0, 0x0, 1, 64 },
1220 };
1221
1222 static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
1223         [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0xd840, 0x0, 0x0, 1, 64 },
1224         [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0xd858, 0x0, 0x0, 1, 64 },
1225         [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0xd8A0, 0x0, 0x0, 1, 64 },
1226 };
1227
1228 static struct uncore_event_desc tgl_uncore_imc_events[] = {
1229         INTEL_UNCORE_EVENT_DESC(data_total,         "event=0xff,umask=0x10"),
1230         INTEL_UNCORE_EVENT_DESC(data_total.scale,   "6.103515625e-5"),
1231         INTEL_UNCORE_EVENT_DESC(data_total.unit,    "MiB"),
1232
1233         INTEL_UNCORE_EVENT_DESC(data_read,         "event=0xff,umask=0x20"),
1234         INTEL_UNCORE_EVENT_DESC(data_read.scale,   "6.103515625e-5"),
1235         INTEL_UNCORE_EVENT_DESC(data_read.unit,    "MiB"),
1236
1237         INTEL_UNCORE_EVENT_DESC(data_write,        "event=0xff,umask=0x30"),
1238         INTEL_UNCORE_EVENT_DESC(data_write.scale,  "6.103515625e-5"),
1239         INTEL_UNCORE_EVENT_DESC(data_write.unit,   "MiB"),
1240
1241         { /* end: all zeroes */ }
1242 };
1243
1244 static struct pci_dev *tgl_uncore_get_mc_dev(void)
1245 {
1246         const struct pci_device_id *ids = tgl_uncore_pci_ids;
1247         struct pci_dev *mc_dev = NULL;
1248
1249         while (ids && ids->vendor) {
1250                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
1251                 if (mc_dev)
1252                         return mc_dev;
1253                 ids++;
1254         }
1255
1256         return mc_dev;
1257 }
1258
1259 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET          0x10000
1260 #define TGL_UNCORE_PCI_IMC_MAP_SIZE             0xe000
1261
1262 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1263 {
1264         struct pci_dev *pdev = tgl_uncore_get_mc_dev();
1265         struct intel_uncore_pmu *pmu = box->pmu;
1266         struct intel_uncore_type *type = pmu->type;
1267         resource_size_t addr;
1268         u32 mch_bar;
1269
1270         if (!pdev) {
1271                 pr_warn("perf uncore: Cannot find matched IMC device.\n");
1272                 return;
1273         }
1274
1275         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
1276         /* MCHBAR is disabled */
1277         if (!(mch_bar & BIT(0))) {
1278                 pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
1279                 return;
1280         }
1281         mch_bar &= ~BIT(0);
1282         addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
1283
1284 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1285         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
1286         addr |= ((resource_size_t)mch_bar << 32);
1287 #endif
1288
1289         box->io_addr = ioremap(addr, type->mmio_map_size);
1290         if (!box->io_addr)
1291                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
1292 }
1293
1294 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1295         .init_box       = tgl_uncore_imc_freerunning_init_box,
1296         .exit_box       = uncore_mmio_exit_box,
1297         .read_counter   = uncore_mmio_read_counter,
1298         .hw_config      = uncore_freerunning_hw_config,
1299 };
1300
1301 static struct attribute *tgl_uncore_imc_formats_attr[] = {
1302         &format_attr_event.attr,
1303         &format_attr_umask.attr,
1304         NULL
1305 };
1306
1307 static const struct attribute_group tgl_uncore_imc_format_group = {
1308         .name = "format",
1309         .attrs = tgl_uncore_imc_formats_attr,
1310 };
1311
1312 static struct intel_uncore_type tgl_uncore_imc_free_running = {
1313         .name                   = "imc_free_running",
1314         .num_counters           = 3,
1315         .num_boxes              = 2,
1316         .num_freerunning_types  = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1317         .mmio_map_size          = TGL_UNCORE_PCI_IMC_MAP_SIZE,
1318         .freerunning            = tgl_uncore_imc_freerunning,
1319         .ops                    = &tgl_uncore_imc_freerunning_ops,
1320         .event_descs            = tgl_uncore_imc_events,
1321         .format_group           = &tgl_uncore_imc_format_group,
1322 };
1323
1324 static struct intel_uncore_type *tgl_mmio_uncores[] = {
1325         &tgl_uncore_imc_free_running,
1326         NULL
1327 };
1328
1329 void tgl_l_uncore_mmio_init(void)
1330 {
1331         tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
1332         uncore_mmio_uncores = tgl_mmio_uncores;
1333 }
1334
1335 void tgl_uncore_mmio_init(void)
1336 {
1337         uncore_mmio_uncores = tgl_mmio_uncores;
1338 }
1339
1340 /* end of Tiger Lake MMIO uncore support */