1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
277 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278 * that BIOS programmed. MSR has package scope.
279 * | Bit | Default | Description
280 * | [63] | 00h | VALID - When set, indicates the CPU bus
281 * numbers have been initialized. (RO)
282 * |[62:48]| --- | Reserved
283 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
285 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
287 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
289 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
291 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
293 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
296 #define SKX_MSR_CPU_BUS_NUMBER 0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
298 #define BUS_NUM_STRIDE 8
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
316 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
319 #define SKX_IIO_MSR_OFFSET 0x20
321 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
323 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
326 SNBEP_PMON_CTL_UMASK_MASK | \
327 SNBEP_PMON_CTL_EDGE_DET | \
328 SNBEP_PMON_CTL_INVERT | \
329 SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
331 SKX_PMON_CTL_CH_MASK | \
332 SKX_PMON_CTL_FC_MASK)
335 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
338 #define SKX_IRP_MSR_OFFSET 0x20
341 #define SKX_UPI_PCI_PMON_CTL0 0x350
342 #define SKX_UPI_PCI_PMON_CTR0 0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
344 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
347 #define SKX_M2M_PCI_PMON_CTL0 0x228
348 #define SKX_M2M_PCI_PMON_CTR0 0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
352 #define SNR_U_MSR_PMON_CTR0 0x1f98
353 #define SNR_U_MSR_PMON_CTL0 0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
358 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
366 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
369 #define SNR_IIO_MSR_OFFSET 0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
373 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
376 #define SNR_IRP_MSR_OFFSET 0x10
379 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET 0x10
385 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
391 #define SNR_M2M_PCI_PMON_CTL0 0x468
392 #define SNR_M2M_PCI_PMON_CTR0 0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
397 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
398 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
399 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
402 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
403 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
404 #define SNR_IMC_MMIO_PMON_CTL0 0x40
405 #define SNR_IMC_MMIO_PMON_CTR0 0x8
406 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
407 #define SNR_IMC_MMIO_OFFSET 0x4000
408 #define SNR_IMC_MMIO_SIZE 0x4000
409 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
410 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
411 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
412 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
415 #define ICX_C34_MSR_PMON_CTR0 0xb68
416 #define ICX_C34_MSR_PMON_CTL0 0xb61
417 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
418 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
421 #define ICX_IIO_MSR_PMON_CTL0 0xa58
422 #define ICX_IIO_MSR_PMON_CTR0 0xa51
423 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
426 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
427 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
428 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
431 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
432 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
433 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
436 #define ICX_UPI_PCI_PMON_CTL0 0x350
437 #define ICX_UPI_PCI_PMON_CTR0 0x320
438 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
439 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
442 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
443 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
444 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
447 #define ICX_NUMBER_IMC_CHN 2
448 #define ICX_IMC_MEM_STRIDE 0x4
450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
462 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
463 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
464 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
465 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
466 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
467 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
468 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
510 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
511 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
512 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
513 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
514 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
515 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
516 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
517 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
518 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
521 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
523 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
524 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
525 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
526 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
527 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
529 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
531 struct pci_dev *pdev = box->pci_dev;
532 int box_ctl = uncore_pci_box_ctl(box);
535 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
536 config |= SNBEP_PMON_BOX_CTL_FRZ;
537 pci_write_config_dword(pdev, box_ctl, config);
541 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
543 struct pci_dev *pdev = box->pci_dev;
544 int box_ctl = uncore_pci_box_ctl(box);
547 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
548 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
549 pci_write_config_dword(pdev, box_ctl, config);
553 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
555 struct pci_dev *pdev = box->pci_dev;
556 struct hw_perf_event *hwc = &event->hw;
558 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
561 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
563 struct pci_dev *pdev = box->pci_dev;
564 struct hw_perf_event *hwc = &event->hw;
566 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
569 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
571 struct pci_dev *pdev = box->pci_dev;
572 struct hw_perf_event *hwc = &event->hw;
575 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
576 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
581 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
583 struct pci_dev *pdev = box->pci_dev;
584 int box_ctl = uncore_pci_box_ctl(box);
586 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
589 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
594 msr = uncore_msr_box_ctl(box);
597 config |= SNBEP_PMON_BOX_CTL_FRZ;
602 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
607 msr = uncore_msr_box_ctl(box);
610 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
615 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
617 struct hw_perf_event *hwc = &event->hw;
618 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
620 if (reg1->idx != EXTRA_REG_NONE)
621 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
623 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
626 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
627 struct perf_event *event)
629 struct hw_perf_event *hwc = &event->hw;
631 wrmsrl(hwc->config_base, hwc->config);
634 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
636 unsigned msr = uncore_msr_box_ctl(box);
639 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
642 static struct attribute *snbep_uncore_formats_attr[] = {
643 &format_attr_event.attr,
644 &format_attr_umask.attr,
645 &format_attr_edge.attr,
646 &format_attr_inv.attr,
647 &format_attr_thresh8.attr,
651 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
652 &format_attr_event.attr,
653 &format_attr_umask.attr,
654 &format_attr_edge.attr,
655 &format_attr_inv.attr,
656 &format_attr_thresh5.attr,
660 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
661 &format_attr_event.attr,
662 &format_attr_umask.attr,
663 &format_attr_edge.attr,
664 &format_attr_tid_en.attr,
665 &format_attr_inv.attr,
666 &format_attr_thresh8.attr,
667 &format_attr_filter_tid.attr,
668 &format_attr_filter_nid.attr,
669 &format_attr_filter_state.attr,
670 &format_attr_filter_opc.attr,
674 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
675 &format_attr_event.attr,
676 &format_attr_occ_sel.attr,
677 &format_attr_edge.attr,
678 &format_attr_inv.attr,
679 &format_attr_thresh5.attr,
680 &format_attr_occ_invert.attr,
681 &format_attr_occ_edge.attr,
682 &format_attr_filter_band0.attr,
683 &format_attr_filter_band1.attr,
684 &format_attr_filter_band2.attr,
685 &format_attr_filter_band3.attr,
689 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
690 &format_attr_event_ext.attr,
691 &format_attr_umask.attr,
692 &format_attr_edge.attr,
693 &format_attr_inv.attr,
694 &format_attr_thresh8.attr,
695 &format_attr_match_rds.attr,
696 &format_attr_match_rnid30.attr,
697 &format_attr_match_rnid4.attr,
698 &format_attr_match_dnid.attr,
699 &format_attr_match_mc.attr,
700 &format_attr_match_opc.attr,
701 &format_attr_match_vnw.attr,
702 &format_attr_match0.attr,
703 &format_attr_match1.attr,
704 &format_attr_mask_rds.attr,
705 &format_attr_mask_rnid30.attr,
706 &format_attr_mask_rnid4.attr,
707 &format_attr_mask_dnid.attr,
708 &format_attr_mask_mc.attr,
709 &format_attr_mask_opc.attr,
710 &format_attr_mask_vnw.attr,
711 &format_attr_mask0.attr,
712 &format_attr_mask1.attr,
716 static struct uncore_event_desc snbep_uncore_imc_events[] = {
717 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
718 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
719 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
720 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
721 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
722 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
723 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
724 { /* end: all zeroes */ },
727 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
728 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
729 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
730 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
731 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
732 { /* end: all zeroes */ },
735 static const struct attribute_group snbep_uncore_format_group = {
737 .attrs = snbep_uncore_formats_attr,
740 static const struct attribute_group snbep_uncore_ubox_format_group = {
742 .attrs = snbep_uncore_ubox_formats_attr,
745 static const struct attribute_group snbep_uncore_cbox_format_group = {
747 .attrs = snbep_uncore_cbox_formats_attr,
750 static const struct attribute_group snbep_uncore_pcu_format_group = {
752 .attrs = snbep_uncore_pcu_formats_attr,
755 static const struct attribute_group snbep_uncore_qpi_format_group = {
757 .attrs = snbep_uncore_qpi_formats_attr,
760 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
761 .disable_box = snbep_uncore_msr_disable_box, \
762 .enable_box = snbep_uncore_msr_enable_box, \
763 .disable_event = snbep_uncore_msr_disable_event, \
764 .enable_event = snbep_uncore_msr_enable_event, \
765 .read_counter = uncore_msr_read_counter
767 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
768 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
769 .init_box = snbep_uncore_msr_init_box \
771 static struct intel_uncore_ops snbep_uncore_msr_ops = {
772 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
775 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
776 .init_box = snbep_uncore_pci_init_box, \
777 .disable_box = snbep_uncore_pci_disable_box, \
778 .enable_box = snbep_uncore_pci_enable_box, \
779 .disable_event = snbep_uncore_pci_disable_event, \
780 .read_counter = snbep_uncore_pci_read_counter
782 static struct intel_uncore_ops snbep_uncore_pci_ops = {
783 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
784 .enable_event = snbep_uncore_pci_enable_event, \
787 static struct event_constraint snbep_uncore_cbox_constraints[] = {
788 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
789 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
790 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
791 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
792 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
793 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
794 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
795 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
796 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
797 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
798 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
799 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
800 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
801 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
802 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
803 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
804 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
805 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
806 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
807 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
808 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
809 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
810 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
811 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
812 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
813 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
817 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
818 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
819 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
820 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
821 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
822 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
823 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
824 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
825 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
831 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
832 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
834 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
835 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
836 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
837 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
838 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
839 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
841 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
843 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
844 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
848 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
850 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
852 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
854 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
855 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
856 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
858 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
863 static struct intel_uncore_type snbep_uncore_ubox = {
868 .fixed_ctr_bits = 48,
869 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
870 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
871 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
872 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
873 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
874 .ops = &snbep_uncore_msr_ops,
875 .format_group = &snbep_uncore_ubox_format_group,
878 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
879 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
880 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
881 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
882 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
883 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
884 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
885 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
886 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
887 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
888 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
889 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
890 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
891 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
892 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
893 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
894 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
895 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
896 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
897 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
898 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
899 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
900 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
907 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
909 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
910 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
913 if (uncore_box_is_fake(box))
916 for (i = 0; i < 5; i++) {
917 if (reg1->alloc & (0x1 << i))
918 atomic_sub(1 << (i * 6), &er->ref);
923 static struct event_constraint *
924 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
925 u64 (*cbox_filter_mask)(int fields))
927 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
928 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
933 if (reg1->idx == EXTRA_REG_NONE)
936 raw_spin_lock_irqsave(&er->lock, flags);
937 for (i = 0; i < 5; i++) {
938 if (!(reg1->idx & (0x1 << i)))
940 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
943 mask = cbox_filter_mask(0x1 << i);
944 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
945 !((reg1->config ^ er->config) & mask)) {
946 atomic_add(1 << (i * 6), &er->ref);
948 er->config |= reg1->config & mask;
954 raw_spin_unlock_irqrestore(&er->lock, flags);
958 if (!uncore_box_is_fake(box))
959 reg1->alloc |= alloc;
963 for (; i >= 0; i--) {
964 if (alloc & (0x1 << i))
965 atomic_sub(1 << (i * 6), &er->ref);
967 return &uncore_constraint_empty;
970 static u64 snbep_cbox_filter_mask(int fields)
975 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
977 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
979 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
981 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
986 static struct event_constraint *
987 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
989 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
992 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
994 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
995 struct extra_reg *er;
998 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
999 if (er->event != (event->hw.config & er->config_mask))
1005 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1006 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1007 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1013 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1014 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1015 .hw_config = snbep_cbox_hw_config,
1016 .get_constraint = snbep_cbox_get_constraint,
1017 .put_constraint = snbep_cbox_put_constraint,
1020 static struct intel_uncore_type snbep_uncore_cbox = {
1024 .perf_ctr_bits = 44,
1025 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1026 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1027 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1028 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1029 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1030 .num_shared_regs = 1,
1031 .constraints = snbep_uncore_cbox_constraints,
1032 .ops = &snbep_uncore_cbox_ops,
1033 .format_group = &snbep_uncore_cbox_format_group,
1036 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1038 struct hw_perf_event *hwc = &event->hw;
1039 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040 u64 config = reg1->config;
1042 if (new_idx > reg1->idx)
1043 config <<= 8 * (new_idx - reg1->idx);
1045 config >>= 8 * (reg1->idx - new_idx);
1048 hwc->config += new_idx - reg1->idx;
1049 reg1->config = config;
1050 reg1->idx = new_idx;
1055 static struct event_constraint *
1056 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1058 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1060 unsigned long flags;
1061 int idx = reg1->idx;
1062 u64 mask, config1 = reg1->config;
1065 if (reg1->idx == EXTRA_REG_NONE ||
1066 (!uncore_box_is_fake(box) && reg1->alloc))
1069 mask = 0xffULL << (idx * 8);
1070 raw_spin_lock_irqsave(&er->lock, flags);
1071 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1072 !((config1 ^ er->config) & mask)) {
1073 atomic_add(1 << (idx * 8), &er->ref);
1074 er->config &= ~mask;
1075 er->config |= config1 & mask;
1078 raw_spin_unlock_irqrestore(&er->lock, flags);
1081 idx = (idx + 1) % 4;
1082 if (idx != reg1->idx) {
1083 config1 = snbep_pcu_alter_er(event, idx, false);
1086 return &uncore_constraint_empty;
1089 if (!uncore_box_is_fake(box)) {
1090 if (idx != reg1->idx)
1091 snbep_pcu_alter_er(event, idx, true);
1097 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1099 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1100 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1102 if (uncore_box_is_fake(box) || !reg1->alloc)
1105 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1109 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1111 struct hw_perf_event *hwc = &event->hw;
1112 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1113 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1115 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1116 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1117 reg1->idx = ev_sel - 0xb;
1118 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1123 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1124 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1125 .hw_config = snbep_pcu_hw_config,
1126 .get_constraint = snbep_pcu_get_constraint,
1127 .put_constraint = snbep_pcu_put_constraint,
1130 static struct intel_uncore_type snbep_uncore_pcu = {
1134 .perf_ctr_bits = 48,
1135 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1136 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1137 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1138 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1139 .num_shared_regs = 1,
1140 .ops = &snbep_uncore_pcu_ops,
1141 .format_group = &snbep_uncore_pcu_format_group,
1144 static struct intel_uncore_type *snbep_msr_uncores[] = {
1151 void snbep_uncore_cpu_init(void)
1153 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1154 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1155 uncore_msr_uncores = snbep_msr_uncores;
1159 SNBEP_PCI_QPI_PORT0_FILTER,
1160 SNBEP_PCI_QPI_PORT1_FILTER,
1161 BDX_PCI_QPI_PORT2_FILTER,
1164 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1166 struct hw_perf_event *hwc = &event->hw;
1167 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1168 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1170 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1172 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1173 reg1->config = event->attr.config1;
1174 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1175 reg2->config = event->attr.config2;
1180 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1182 struct pci_dev *pdev = box->pci_dev;
1183 struct hw_perf_event *hwc = &event->hw;
1184 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1185 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1187 if (reg1->idx != EXTRA_REG_NONE) {
1188 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1189 int die = box->dieid;
1190 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1193 pci_write_config_dword(filter_pdev, reg1->reg,
1195 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1196 (u32)(reg1->config >> 32));
1197 pci_write_config_dword(filter_pdev, reg2->reg,
1199 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1200 (u32)(reg2->config >> 32));
1204 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1207 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1208 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1209 .enable_event = snbep_qpi_enable_event,
1210 .hw_config = snbep_qpi_hw_config,
1211 .get_constraint = uncore_get_constraint,
1212 .put_constraint = uncore_put_constraint,
1215 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1216 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1217 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1218 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1219 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1220 .ops = &snbep_uncore_pci_ops, \
1221 .format_group = &snbep_uncore_format_group
1223 static struct intel_uncore_type snbep_uncore_ha = {
1227 .perf_ctr_bits = 48,
1228 SNBEP_UNCORE_PCI_COMMON_INIT(),
1231 static struct intel_uncore_type snbep_uncore_imc = {
1235 .perf_ctr_bits = 48,
1236 .fixed_ctr_bits = 48,
1237 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1238 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1239 .event_descs = snbep_uncore_imc_events,
1240 SNBEP_UNCORE_PCI_COMMON_INIT(),
1243 static struct intel_uncore_type snbep_uncore_qpi = {
1247 .perf_ctr_bits = 48,
1248 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1249 .event_ctl = SNBEP_PCI_PMON_CTL0,
1250 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1251 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1252 .num_shared_regs = 1,
1253 .ops = &snbep_uncore_qpi_ops,
1254 .event_descs = snbep_uncore_qpi_events,
1255 .format_group = &snbep_uncore_qpi_format_group,
1259 static struct intel_uncore_type snbep_uncore_r2pcie = {
1263 .perf_ctr_bits = 44,
1264 .constraints = snbep_uncore_r2pcie_constraints,
1265 SNBEP_UNCORE_PCI_COMMON_INIT(),
1268 static struct intel_uncore_type snbep_uncore_r3qpi = {
1272 .perf_ctr_bits = 44,
1273 .constraints = snbep_uncore_r3qpi_constraints,
1274 SNBEP_UNCORE_PCI_COMMON_INIT(),
1278 SNBEP_PCI_UNCORE_HA,
1279 SNBEP_PCI_UNCORE_IMC,
1280 SNBEP_PCI_UNCORE_QPI,
1281 SNBEP_PCI_UNCORE_R2PCIE,
1282 SNBEP_PCI_UNCORE_R3QPI,
1285 static struct intel_uncore_type *snbep_pci_uncores[] = {
1286 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1287 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1288 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1289 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1290 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1294 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1296 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1297 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1299 { /* MC Channel 0 */
1300 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1301 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1303 { /* MC Channel 1 */
1304 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1305 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1307 { /* MC Channel 2 */
1308 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1309 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1311 { /* MC Channel 3 */
1312 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1313 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1316 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1317 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1321 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1324 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1325 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1327 { /* R3QPI Link 0 */
1328 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1329 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1331 { /* R3QPI Link 1 */
1332 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1333 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1335 { /* QPI Port 0 filter */
1336 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1337 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1338 SNBEP_PCI_QPI_PORT0_FILTER),
1340 { /* QPI Port 0 filter */
1341 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1342 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1343 SNBEP_PCI_QPI_PORT1_FILTER),
1345 { /* end: all zeroes */ }
1348 static struct pci_driver snbep_uncore_pci_driver = {
1349 .name = "snbep_uncore",
1350 .id_table = snbep_uncore_pci_ids,
1353 #define NODE_ID_MASK 0x7
1356 * build pci bus to socket mapping
1358 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1360 struct pci_dev *ubox_dev = NULL;
1361 int i, bus, nodeid, segment, die_id;
1362 struct pci2phy_map *map;
1367 /* find the UBOX device */
1368 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1371 bus = ubox_dev->bus->number;
1373 * The nodeid and idmap registers only contain enough
1374 * information to handle 8 nodes. On systems with more
1375 * than 8 nodes, we need to rely on NUMA information,
1376 * filled in from BIOS supplied information, to determine
1379 if (nr_node_ids <= 8) {
1380 /* get the Node ID of the local register */
1381 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1384 nodeid = config & NODE_ID_MASK;
1385 /* get the Node ID mapping */
1386 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1390 segment = pci_domain_nr(ubox_dev->bus);
1391 raw_spin_lock(&pci2phy_map_lock);
1392 map = __find_pci2phy_map(segment);
1394 raw_spin_unlock(&pci2phy_map_lock);
1400 * every three bits in the Node ID mapping register maps
1401 * to a particular node.
1403 for (i = 0; i < 8; i++) {
1404 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1405 if (topology_max_die_per_package() > 1)
1408 die_id = topology_phys_to_logical_pkg(i);
1411 map->pbus_to_dieid[bus] = die_id;
1415 raw_spin_unlock(&pci2phy_map_lock);
1417 int node = pcibus_to_node(ubox_dev->bus);
1420 segment = pci_domain_nr(ubox_dev->bus);
1421 raw_spin_lock(&pci2phy_map_lock);
1422 map = __find_pci2phy_map(segment);
1424 raw_spin_unlock(&pci2phy_map_lock);
1430 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1431 struct cpuinfo_x86 *c = &cpu_data(cpu);
1433 if (c->initialized && cpu_to_node(cpu) == node) {
1434 map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1438 raw_spin_unlock(&pci2phy_map_lock);
1440 if (WARN_ON_ONCE(die_id == -1)) {
1449 * For PCI bus with no UBOX device, find the next bus
1450 * that has UBOX device and use its mapping.
1452 raw_spin_lock(&pci2phy_map_lock);
1453 list_for_each_entry(map, &pci2phy_map_head, list) {
1456 for (bus = 255; bus >= 0; bus--) {
1457 if (map->pbus_to_dieid[bus] != -1)
1458 i = map->pbus_to_dieid[bus];
1460 map->pbus_to_dieid[bus] = i;
1463 for (bus = 0; bus <= 255; bus++) {
1464 if (map->pbus_to_dieid[bus] != -1)
1465 i = map->pbus_to_dieid[bus];
1467 map->pbus_to_dieid[bus] = i;
1471 raw_spin_unlock(&pci2phy_map_lock);
1474 pci_dev_put(ubox_dev);
1476 return err ? pcibios_err_to_errno(err) : 0;
1479 int snbep_uncore_pci_init(void)
1481 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1484 uncore_pci_uncores = snbep_pci_uncores;
1485 uncore_pci_driver = &snbep_uncore_pci_driver;
1488 /* end of Sandy Bridge-EP uncore support */
1490 /* IvyTown uncore support */
1491 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1493 unsigned msr = uncore_msr_box_ctl(box);
1495 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1498 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1500 struct pci_dev *pdev = box->pci_dev;
1502 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1505 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1506 .init_box = ivbep_uncore_msr_init_box, \
1507 .disable_box = snbep_uncore_msr_disable_box, \
1508 .enable_box = snbep_uncore_msr_enable_box, \
1509 .disable_event = snbep_uncore_msr_disable_event, \
1510 .enable_event = snbep_uncore_msr_enable_event, \
1511 .read_counter = uncore_msr_read_counter
1513 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1514 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1517 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1518 .init_box = ivbep_uncore_pci_init_box,
1519 .disable_box = snbep_uncore_pci_disable_box,
1520 .enable_box = snbep_uncore_pci_enable_box,
1521 .disable_event = snbep_uncore_pci_disable_event,
1522 .enable_event = snbep_uncore_pci_enable_event,
1523 .read_counter = snbep_uncore_pci_read_counter,
1526 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1527 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1528 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1529 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1530 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1531 .ops = &ivbep_uncore_pci_ops, \
1532 .format_group = &ivbep_uncore_format_group
1534 static struct attribute *ivbep_uncore_formats_attr[] = {
1535 &format_attr_event.attr,
1536 &format_attr_umask.attr,
1537 &format_attr_edge.attr,
1538 &format_attr_inv.attr,
1539 &format_attr_thresh8.attr,
1543 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1544 &format_attr_event.attr,
1545 &format_attr_umask.attr,
1546 &format_attr_edge.attr,
1547 &format_attr_inv.attr,
1548 &format_attr_thresh5.attr,
1552 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1553 &format_attr_event.attr,
1554 &format_attr_umask.attr,
1555 &format_attr_edge.attr,
1556 &format_attr_tid_en.attr,
1557 &format_attr_thresh8.attr,
1558 &format_attr_filter_tid.attr,
1559 &format_attr_filter_link.attr,
1560 &format_attr_filter_state2.attr,
1561 &format_attr_filter_nid2.attr,
1562 &format_attr_filter_opc2.attr,
1563 &format_attr_filter_nc.attr,
1564 &format_attr_filter_c6.attr,
1565 &format_attr_filter_isoc.attr,
1569 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1570 &format_attr_event.attr,
1571 &format_attr_occ_sel.attr,
1572 &format_attr_edge.attr,
1573 &format_attr_thresh5.attr,
1574 &format_attr_occ_invert.attr,
1575 &format_attr_occ_edge.attr,
1576 &format_attr_filter_band0.attr,
1577 &format_attr_filter_band1.attr,
1578 &format_attr_filter_band2.attr,
1579 &format_attr_filter_band3.attr,
1583 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1584 &format_attr_event_ext.attr,
1585 &format_attr_umask.attr,
1586 &format_attr_edge.attr,
1587 &format_attr_thresh8.attr,
1588 &format_attr_match_rds.attr,
1589 &format_attr_match_rnid30.attr,
1590 &format_attr_match_rnid4.attr,
1591 &format_attr_match_dnid.attr,
1592 &format_attr_match_mc.attr,
1593 &format_attr_match_opc.attr,
1594 &format_attr_match_vnw.attr,
1595 &format_attr_match0.attr,
1596 &format_attr_match1.attr,
1597 &format_attr_mask_rds.attr,
1598 &format_attr_mask_rnid30.attr,
1599 &format_attr_mask_rnid4.attr,
1600 &format_attr_mask_dnid.attr,
1601 &format_attr_mask_mc.attr,
1602 &format_attr_mask_opc.attr,
1603 &format_attr_mask_vnw.attr,
1604 &format_attr_mask0.attr,
1605 &format_attr_mask1.attr,
1609 static const struct attribute_group ivbep_uncore_format_group = {
1611 .attrs = ivbep_uncore_formats_attr,
1614 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1616 .attrs = ivbep_uncore_ubox_formats_attr,
1619 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1621 .attrs = ivbep_uncore_cbox_formats_attr,
1624 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1626 .attrs = ivbep_uncore_pcu_formats_attr,
1629 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1631 .attrs = ivbep_uncore_qpi_formats_attr,
1634 static struct intel_uncore_type ivbep_uncore_ubox = {
1638 .perf_ctr_bits = 44,
1639 .fixed_ctr_bits = 48,
1640 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1641 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1642 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1643 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1644 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1645 .ops = &ivbep_uncore_msr_ops,
1646 .format_group = &ivbep_uncore_ubox_format_group,
1649 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1650 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1651 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1652 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1653 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1654 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1655 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1656 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1657 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1658 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1659 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1660 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1661 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1662 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1663 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1664 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1665 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1666 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1667 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1668 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1669 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1670 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1671 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1672 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1673 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1674 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1675 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1676 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1690 static u64 ivbep_cbox_filter_mask(int fields)
1695 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1697 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1699 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1701 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1702 if (fields & 0x10) {
1703 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1704 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1705 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1706 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1712 static struct event_constraint *
1713 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1715 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1718 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1720 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1721 struct extra_reg *er;
1724 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1725 if (er->event != (event->hw.config & er->config_mask))
1731 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1732 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1733 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1739 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1741 struct hw_perf_event *hwc = &event->hw;
1742 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1744 if (reg1->idx != EXTRA_REG_NONE) {
1745 u64 filter = uncore_shared_reg_config(box, 0);
1746 wrmsrl(reg1->reg, filter & 0xffffffff);
1747 wrmsrl(reg1->reg + 6, filter >> 32);
1750 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1753 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1754 .init_box = ivbep_uncore_msr_init_box,
1755 .disable_box = snbep_uncore_msr_disable_box,
1756 .enable_box = snbep_uncore_msr_enable_box,
1757 .disable_event = snbep_uncore_msr_disable_event,
1758 .enable_event = ivbep_cbox_enable_event,
1759 .read_counter = uncore_msr_read_counter,
1760 .hw_config = ivbep_cbox_hw_config,
1761 .get_constraint = ivbep_cbox_get_constraint,
1762 .put_constraint = snbep_cbox_put_constraint,
1765 static struct intel_uncore_type ivbep_uncore_cbox = {
1769 .perf_ctr_bits = 44,
1770 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1771 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1772 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1773 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1774 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1775 .num_shared_regs = 1,
1776 .constraints = snbep_uncore_cbox_constraints,
1777 .ops = &ivbep_uncore_cbox_ops,
1778 .format_group = &ivbep_uncore_cbox_format_group,
1781 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1782 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1783 .hw_config = snbep_pcu_hw_config,
1784 .get_constraint = snbep_pcu_get_constraint,
1785 .put_constraint = snbep_pcu_put_constraint,
1788 static struct intel_uncore_type ivbep_uncore_pcu = {
1792 .perf_ctr_bits = 48,
1793 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1794 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1795 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1796 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1797 .num_shared_regs = 1,
1798 .ops = &ivbep_uncore_pcu_ops,
1799 .format_group = &ivbep_uncore_pcu_format_group,
1802 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1809 void ivbep_uncore_cpu_init(void)
1811 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1812 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1813 uncore_msr_uncores = ivbep_msr_uncores;
1816 static struct intel_uncore_type ivbep_uncore_ha = {
1820 .perf_ctr_bits = 48,
1821 IVBEP_UNCORE_PCI_COMMON_INIT(),
1824 static struct intel_uncore_type ivbep_uncore_imc = {
1828 .perf_ctr_bits = 48,
1829 .fixed_ctr_bits = 48,
1830 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1831 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1832 .event_descs = snbep_uncore_imc_events,
1833 IVBEP_UNCORE_PCI_COMMON_INIT(),
1836 /* registers in IRP boxes are not properly aligned */
1837 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1838 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1840 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1842 struct pci_dev *pdev = box->pci_dev;
1843 struct hw_perf_event *hwc = &event->hw;
1845 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1846 hwc->config | SNBEP_PMON_CTL_EN);
1849 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1851 struct pci_dev *pdev = box->pci_dev;
1852 struct hw_perf_event *hwc = &event->hw;
1854 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1857 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1859 struct pci_dev *pdev = box->pci_dev;
1860 struct hw_perf_event *hwc = &event->hw;
1863 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1864 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1869 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1870 .init_box = ivbep_uncore_pci_init_box,
1871 .disable_box = snbep_uncore_pci_disable_box,
1872 .enable_box = snbep_uncore_pci_enable_box,
1873 .disable_event = ivbep_uncore_irp_disable_event,
1874 .enable_event = ivbep_uncore_irp_enable_event,
1875 .read_counter = ivbep_uncore_irp_read_counter,
1878 static struct intel_uncore_type ivbep_uncore_irp = {
1882 .perf_ctr_bits = 48,
1883 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1884 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1885 .ops = &ivbep_uncore_irp_ops,
1886 .format_group = &ivbep_uncore_format_group,
1889 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1890 .init_box = ivbep_uncore_pci_init_box,
1891 .disable_box = snbep_uncore_pci_disable_box,
1892 .enable_box = snbep_uncore_pci_enable_box,
1893 .disable_event = snbep_uncore_pci_disable_event,
1894 .enable_event = snbep_qpi_enable_event,
1895 .read_counter = snbep_uncore_pci_read_counter,
1896 .hw_config = snbep_qpi_hw_config,
1897 .get_constraint = uncore_get_constraint,
1898 .put_constraint = uncore_put_constraint,
1901 static struct intel_uncore_type ivbep_uncore_qpi = {
1905 .perf_ctr_bits = 48,
1906 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1907 .event_ctl = SNBEP_PCI_PMON_CTL0,
1908 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1909 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1910 .num_shared_regs = 1,
1911 .ops = &ivbep_uncore_qpi_ops,
1912 .format_group = &ivbep_uncore_qpi_format_group,
1915 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1919 .perf_ctr_bits = 44,
1920 .constraints = snbep_uncore_r2pcie_constraints,
1921 IVBEP_UNCORE_PCI_COMMON_INIT(),
1924 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1928 .perf_ctr_bits = 44,
1929 .constraints = snbep_uncore_r3qpi_constraints,
1930 IVBEP_UNCORE_PCI_COMMON_INIT(),
1934 IVBEP_PCI_UNCORE_HA,
1935 IVBEP_PCI_UNCORE_IMC,
1936 IVBEP_PCI_UNCORE_IRP,
1937 IVBEP_PCI_UNCORE_QPI,
1938 IVBEP_PCI_UNCORE_R2PCIE,
1939 IVBEP_PCI_UNCORE_R3QPI,
1942 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1943 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1944 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1945 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1946 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1947 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1948 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1952 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1953 { /* Home Agent 0 */
1954 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1955 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1957 { /* Home Agent 1 */
1958 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1959 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1961 { /* MC0 Channel 0 */
1962 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1963 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1965 { /* MC0 Channel 1 */
1966 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1967 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1969 { /* MC0 Channel 3 */
1970 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1971 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1973 { /* MC0 Channel 4 */
1974 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1975 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1977 { /* MC1 Channel 0 */
1978 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1979 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1981 { /* MC1 Channel 1 */
1982 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1983 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1985 { /* MC1 Channel 3 */
1986 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1987 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1989 { /* MC1 Channel 4 */
1990 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1991 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1994 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1995 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1998 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1999 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2002 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2003 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2006 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2007 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2010 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2011 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2013 { /* R3QPI0 Link 0 */
2014 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2015 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2017 { /* R3QPI0 Link 1 */
2018 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2019 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2021 { /* R3QPI1 Link 2 */
2022 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2023 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2025 { /* QPI Port 0 filter */
2026 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2027 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2028 SNBEP_PCI_QPI_PORT0_FILTER),
2030 { /* QPI Port 0 filter */
2031 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2032 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2033 SNBEP_PCI_QPI_PORT1_FILTER),
2035 { /* end: all zeroes */ }
2038 static struct pci_driver ivbep_uncore_pci_driver = {
2039 .name = "ivbep_uncore",
2040 .id_table = ivbep_uncore_pci_ids,
2043 int ivbep_uncore_pci_init(void)
2045 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2048 uncore_pci_uncores = ivbep_pci_uncores;
2049 uncore_pci_driver = &ivbep_uncore_pci_driver;
2052 /* end of IvyTown uncore support */
2054 /* KNL uncore support */
2055 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2056 &format_attr_event.attr,
2057 &format_attr_umask.attr,
2058 &format_attr_edge.attr,
2059 &format_attr_tid_en.attr,
2060 &format_attr_inv.attr,
2061 &format_attr_thresh5.attr,
2065 static const struct attribute_group knl_uncore_ubox_format_group = {
2067 .attrs = knl_uncore_ubox_formats_attr,
2070 static struct intel_uncore_type knl_uncore_ubox = {
2074 .perf_ctr_bits = 48,
2075 .fixed_ctr_bits = 48,
2076 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2077 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2078 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2079 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2080 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2081 .ops = &snbep_uncore_msr_ops,
2082 .format_group = &knl_uncore_ubox_format_group,
2085 static struct attribute *knl_uncore_cha_formats_attr[] = {
2086 &format_attr_event.attr,
2087 &format_attr_umask.attr,
2088 &format_attr_qor.attr,
2089 &format_attr_edge.attr,
2090 &format_attr_tid_en.attr,
2091 &format_attr_inv.attr,
2092 &format_attr_thresh8.attr,
2093 &format_attr_filter_tid4.attr,
2094 &format_attr_filter_link3.attr,
2095 &format_attr_filter_state4.attr,
2096 &format_attr_filter_local.attr,
2097 &format_attr_filter_all_op.attr,
2098 &format_attr_filter_nnm.attr,
2099 &format_attr_filter_opc3.attr,
2100 &format_attr_filter_nc.attr,
2101 &format_attr_filter_isoc.attr,
2105 static const struct attribute_group knl_uncore_cha_format_group = {
2107 .attrs = knl_uncore_cha_formats_attr,
2110 static struct event_constraint knl_uncore_cha_constraints[] = {
2111 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2112 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2113 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2114 EVENT_CONSTRAINT_END
2117 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2118 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2119 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2120 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2121 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2122 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2126 static u64 knl_cha_filter_mask(int fields)
2131 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2133 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2135 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2139 static struct event_constraint *
2140 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2142 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2145 static int knl_cha_hw_config(struct intel_uncore_box *box,
2146 struct perf_event *event)
2148 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2149 struct extra_reg *er;
2152 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2153 if (er->event != (event->hw.config & er->config_mask))
2159 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2160 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2161 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2163 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2164 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2165 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2171 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2172 struct perf_event *event);
2174 static struct intel_uncore_ops knl_uncore_cha_ops = {
2175 .init_box = snbep_uncore_msr_init_box,
2176 .disable_box = snbep_uncore_msr_disable_box,
2177 .enable_box = snbep_uncore_msr_enable_box,
2178 .disable_event = snbep_uncore_msr_disable_event,
2179 .enable_event = hswep_cbox_enable_event,
2180 .read_counter = uncore_msr_read_counter,
2181 .hw_config = knl_cha_hw_config,
2182 .get_constraint = knl_cha_get_constraint,
2183 .put_constraint = snbep_cbox_put_constraint,
2186 static struct intel_uncore_type knl_uncore_cha = {
2190 .perf_ctr_bits = 48,
2191 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2192 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2193 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2194 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2195 .msr_offset = KNL_CHA_MSR_OFFSET,
2196 .num_shared_regs = 1,
2197 .constraints = knl_uncore_cha_constraints,
2198 .ops = &knl_uncore_cha_ops,
2199 .format_group = &knl_uncore_cha_format_group,
2202 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2203 &format_attr_event2.attr,
2204 &format_attr_use_occ_ctr.attr,
2205 &format_attr_occ_sel.attr,
2206 &format_attr_edge.attr,
2207 &format_attr_tid_en.attr,
2208 &format_attr_inv.attr,
2209 &format_attr_thresh6.attr,
2210 &format_attr_occ_invert.attr,
2211 &format_attr_occ_edge_det.attr,
2215 static const struct attribute_group knl_uncore_pcu_format_group = {
2217 .attrs = knl_uncore_pcu_formats_attr,
2220 static struct intel_uncore_type knl_uncore_pcu = {
2224 .perf_ctr_bits = 48,
2225 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2226 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2227 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2228 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2229 .ops = &snbep_uncore_msr_ops,
2230 .format_group = &knl_uncore_pcu_format_group,
2233 static struct intel_uncore_type *knl_msr_uncores[] = {
2240 void knl_uncore_cpu_init(void)
2242 uncore_msr_uncores = knl_msr_uncores;
2245 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2247 struct pci_dev *pdev = box->pci_dev;
2248 int box_ctl = uncore_pci_box_ctl(box);
2250 pci_write_config_dword(pdev, box_ctl, 0);
2253 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2254 struct perf_event *event)
2256 struct pci_dev *pdev = box->pci_dev;
2257 struct hw_perf_event *hwc = &event->hw;
2259 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2260 == UNCORE_FIXED_EVENT)
2261 pci_write_config_dword(pdev, hwc->config_base,
2262 hwc->config | KNL_PMON_FIXED_CTL_EN);
2264 pci_write_config_dword(pdev, hwc->config_base,
2265 hwc->config | SNBEP_PMON_CTL_EN);
2268 static struct intel_uncore_ops knl_uncore_imc_ops = {
2269 .init_box = snbep_uncore_pci_init_box,
2270 .disable_box = snbep_uncore_pci_disable_box,
2271 .enable_box = knl_uncore_imc_enable_box,
2272 .read_counter = snbep_uncore_pci_read_counter,
2273 .enable_event = knl_uncore_imc_enable_event,
2274 .disable_event = snbep_uncore_pci_disable_event,
2277 static struct intel_uncore_type knl_uncore_imc_uclk = {
2281 .perf_ctr_bits = 48,
2282 .fixed_ctr_bits = 48,
2283 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2284 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2285 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2286 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2287 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2288 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2289 .ops = &knl_uncore_imc_ops,
2290 .format_group = &snbep_uncore_format_group,
2293 static struct intel_uncore_type knl_uncore_imc_dclk = {
2297 .perf_ctr_bits = 48,
2298 .fixed_ctr_bits = 48,
2299 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2300 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2301 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2302 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2303 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2304 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2305 .ops = &knl_uncore_imc_ops,
2306 .format_group = &snbep_uncore_format_group,
2309 static struct intel_uncore_type knl_uncore_edc_uclk = {
2313 .perf_ctr_bits = 48,
2314 .fixed_ctr_bits = 48,
2315 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2316 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2317 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2318 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2319 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2320 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2321 .ops = &knl_uncore_imc_ops,
2322 .format_group = &snbep_uncore_format_group,
2325 static struct intel_uncore_type knl_uncore_edc_eclk = {
2329 .perf_ctr_bits = 48,
2330 .fixed_ctr_bits = 48,
2331 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2332 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2333 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2334 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2335 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2336 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2337 .ops = &knl_uncore_imc_ops,
2338 .format_group = &snbep_uncore_format_group,
2341 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2342 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2343 EVENT_CONSTRAINT_END
2346 static struct intel_uncore_type knl_uncore_m2pcie = {
2350 .perf_ctr_bits = 48,
2351 .constraints = knl_uncore_m2pcie_constraints,
2352 SNBEP_UNCORE_PCI_COMMON_INIT(),
2355 static struct attribute *knl_uncore_irp_formats_attr[] = {
2356 &format_attr_event.attr,
2357 &format_attr_umask.attr,
2358 &format_attr_qor.attr,
2359 &format_attr_edge.attr,
2360 &format_attr_inv.attr,
2361 &format_attr_thresh8.attr,
2365 static const struct attribute_group knl_uncore_irp_format_group = {
2367 .attrs = knl_uncore_irp_formats_attr,
2370 static struct intel_uncore_type knl_uncore_irp = {
2374 .perf_ctr_bits = 48,
2375 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2376 .event_ctl = SNBEP_PCI_PMON_CTL0,
2377 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2378 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2379 .ops = &snbep_uncore_pci_ops,
2380 .format_group = &knl_uncore_irp_format_group,
2384 KNL_PCI_UNCORE_MC_UCLK,
2385 KNL_PCI_UNCORE_MC_DCLK,
2386 KNL_PCI_UNCORE_EDC_UCLK,
2387 KNL_PCI_UNCORE_EDC_ECLK,
2388 KNL_PCI_UNCORE_M2PCIE,
2392 static struct intel_uncore_type *knl_pci_uncores[] = {
2393 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2394 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2395 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2396 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2397 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2398 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2403 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2404 * device type. prior to KNL, each instance of a PMU device type had a unique
2407 * PCI Device ID Uncore PMU Devices
2408 * ----------------------------------
2409 * 0x7841 MC0 UClk, MC1 UClk
2410 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2411 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2412 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2413 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2414 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2415 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2420 static const struct pci_device_id knl_uncore_pci_ids[] = {
2422 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2423 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2426 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2427 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2429 { /* MC0 DClk CH 0 */
2430 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2431 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2433 { /* MC0 DClk CH 1 */
2434 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2435 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2437 { /* MC0 DClk CH 2 */
2438 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2439 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2441 { /* MC1 DClk CH 0 */
2442 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2443 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2445 { /* MC1 DClk CH 1 */
2446 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2447 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2449 { /* MC1 DClk CH 2 */
2450 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2451 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2454 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2455 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2458 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2459 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2462 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2463 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2466 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2467 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2470 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2471 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2474 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2475 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2478 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2479 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2482 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2483 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2486 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2487 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2490 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2491 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2494 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2495 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2498 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2499 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2502 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2503 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2506 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2507 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2510 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2511 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2514 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2515 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2518 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2519 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2522 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2523 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2525 { /* end: all zeroes */ }
2528 static struct pci_driver knl_uncore_pci_driver = {
2529 .name = "knl_uncore",
2530 .id_table = knl_uncore_pci_ids,
2533 int knl_uncore_pci_init(void)
2537 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2538 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2541 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2544 uncore_pci_uncores = knl_pci_uncores;
2545 uncore_pci_driver = &knl_uncore_pci_driver;
2549 /* end of KNL uncore support */
2551 /* Haswell-EP uncore support */
2552 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2553 &format_attr_event.attr,
2554 &format_attr_umask.attr,
2555 &format_attr_edge.attr,
2556 &format_attr_inv.attr,
2557 &format_attr_thresh5.attr,
2558 &format_attr_filter_tid2.attr,
2559 &format_attr_filter_cid.attr,
2563 static const struct attribute_group hswep_uncore_ubox_format_group = {
2565 .attrs = hswep_uncore_ubox_formats_attr,
2568 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2570 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2571 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2572 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2577 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2578 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2579 .hw_config = hswep_ubox_hw_config,
2580 .get_constraint = uncore_get_constraint,
2581 .put_constraint = uncore_put_constraint,
2584 static struct intel_uncore_type hswep_uncore_ubox = {
2588 .perf_ctr_bits = 44,
2589 .fixed_ctr_bits = 48,
2590 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2591 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2592 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2593 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2594 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2595 .num_shared_regs = 1,
2596 .ops = &hswep_uncore_ubox_ops,
2597 .format_group = &hswep_uncore_ubox_format_group,
2600 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2601 &format_attr_event.attr,
2602 &format_attr_umask.attr,
2603 &format_attr_edge.attr,
2604 &format_attr_tid_en.attr,
2605 &format_attr_thresh8.attr,
2606 &format_attr_filter_tid3.attr,
2607 &format_attr_filter_link2.attr,
2608 &format_attr_filter_state3.attr,
2609 &format_attr_filter_nid2.attr,
2610 &format_attr_filter_opc2.attr,
2611 &format_attr_filter_nc.attr,
2612 &format_attr_filter_c6.attr,
2613 &format_attr_filter_isoc.attr,
2617 static const struct attribute_group hswep_uncore_cbox_format_group = {
2619 .attrs = hswep_uncore_cbox_formats_attr,
2622 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2623 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2624 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2625 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2626 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2627 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2628 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2629 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2630 EVENT_CONSTRAINT_END
2633 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2634 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2635 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2636 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2637 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2638 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2639 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2640 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2641 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2642 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2643 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2644 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2645 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2646 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2647 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2648 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2649 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2650 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2651 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2652 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2653 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2654 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2655 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2656 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2657 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2658 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2659 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2660 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2671 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2675 static u64 hswep_cbox_filter_mask(int fields)
2679 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2681 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2683 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2685 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2686 if (fields & 0x10) {
2687 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2688 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2689 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2690 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2695 static struct event_constraint *
2696 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2698 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2701 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2703 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2704 struct extra_reg *er;
2707 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2708 if (er->event != (event->hw.config & er->config_mask))
2714 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2715 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2716 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2722 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2723 struct perf_event *event)
2725 struct hw_perf_event *hwc = &event->hw;
2726 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2728 if (reg1->idx != EXTRA_REG_NONE) {
2729 u64 filter = uncore_shared_reg_config(box, 0);
2730 wrmsrl(reg1->reg, filter & 0xffffffff);
2731 wrmsrl(reg1->reg + 1, filter >> 32);
2734 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2737 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2738 .init_box = snbep_uncore_msr_init_box,
2739 .disable_box = snbep_uncore_msr_disable_box,
2740 .enable_box = snbep_uncore_msr_enable_box,
2741 .disable_event = snbep_uncore_msr_disable_event,
2742 .enable_event = hswep_cbox_enable_event,
2743 .read_counter = uncore_msr_read_counter,
2744 .hw_config = hswep_cbox_hw_config,
2745 .get_constraint = hswep_cbox_get_constraint,
2746 .put_constraint = snbep_cbox_put_constraint,
2749 static struct intel_uncore_type hswep_uncore_cbox = {
2753 .perf_ctr_bits = 48,
2754 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2755 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2756 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2757 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2758 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2759 .num_shared_regs = 1,
2760 .constraints = hswep_uncore_cbox_constraints,
2761 .ops = &hswep_uncore_cbox_ops,
2762 .format_group = &hswep_uncore_cbox_format_group,
2766 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2768 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2770 unsigned msr = uncore_msr_box_ctl(box);
2773 u64 init = SNBEP_PMON_BOX_CTL_INT;
2777 for_each_set_bit(i, (unsigned long *)&init, 64) {
2778 flags |= (1ULL << i);
2784 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2785 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2786 .init_box = hswep_uncore_sbox_msr_init_box
2789 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2790 &format_attr_event.attr,
2791 &format_attr_umask.attr,
2792 &format_attr_edge.attr,
2793 &format_attr_tid_en.attr,
2794 &format_attr_inv.attr,
2795 &format_attr_thresh8.attr,
2799 static const struct attribute_group hswep_uncore_sbox_format_group = {
2801 .attrs = hswep_uncore_sbox_formats_attr,
2804 static struct intel_uncore_type hswep_uncore_sbox = {
2808 .perf_ctr_bits = 44,
2809 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2810 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2811 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2812 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2813 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2814 .ops = &hswep_uncore_sbox_msr_ops,
2815 .format_group = &hswep_uncore_sbox_format_group,
2818 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2820 struct hw_perf_event *hwc = &event->hw;
2821 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2822 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2824 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2825 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2826 reg1->idx = ev_sel - 0xb;
2827 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2832 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2833 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2834 .hw_config = hswep_pcu_hw_config,
2835 .get_constraint = snbep_pcu_get_constraint,
2836 .put_constraint = snbep_pcu_put_constraint,
2839 static struct intel_uncore_type hswep_uncore_pcu = {
2843 .perf_ctr_bits = 48,
2844 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2845 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2846 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2847 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2848 .num_shared_regs = 1,
2849 .ops = &hswep_uncore_pcu_ops,
2850 .format_group = &snbep_uncore_pcu_format_group,
2853 static struct intel_uncore_type *hswep_msr_uncores[] = {
2861 #define HSWEP_PCU_DID 0x2fc0
2862 #define HSWEP_PCU_CAPID4_OFFET 0x94
2863 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2865 static bool hswep_has_limit_sbox(unsigned int device)
2867 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2873 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2874 if (!hswep_get_chop(capid4))
2880 void hswep_uncore_cpu_init(void)
2882 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2883 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2885 /* Detect 6-8 core systems with only two SBOXes */
2886 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2887 hswep_uncore_sbox.num_boxes = 2;
2889 uncore_msr_uncores = hswep_msr_uncores;
2892 static struct intel_uncore_type hswep_uncore_ha = {
2896 .perf_ctr_bits = 48,
2897 SNBEP_UNCORE_PCI_COMMON_INIT(),
2900 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2901 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2902 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2903 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2904 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2905 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2906 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2907 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2908 { /* end: all zeroes */ },
2911 static struct intel_uncore_type hswep_uncore_imc = {
2915 .perf_ctr_bits = 48,
2916 .fixed_ctr_bits = 48,
2917 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2918 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2919 .event_descs = hswep_uncore_imc_events,
2920 SNBEP_UNCORE_PCI_COMMON_INIT(),
2923 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2925 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2927 struct pci_dev *pdev = box->pci_dev;
2928 struct hw_perf_event *hwc = &event->hw;
2931 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2932 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2937 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2938 .init_box = snbep_uncore_pci_init_box,
2939 .disable_box = snbep_uncore_pci_disable_box,
2940 .enable_box = snbep_uncore_pci_enable_box,
2941 .disable_event = ivbep_uncore_irp_disable_event,
2942 .enable_event = ivbep_uncore_irp_enable_event,
2943 .read_counter = hswep_uncore_irp_read_counter,
2946 static struct intel_uncore_type hswep_uncore_irp = {
2950 .perf_ctr_bits = 48,
2951 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2952 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2953 .ops = &hswep_uncore_irp_ops,
2954 .format_group = &snbep_uncore_format_group,
2957 static struct intel_uncore_type hswep_uncore_qpi = {
2961 .perf_ctr_bits = 48,
2962 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2963 .event_ctl = SNBEP_PCI_PMON_CTL0,
2964 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2965 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2966 .num_shared_regs = 1,
2967 .ops = &snbep_uncore_qpi_ops,
2968 .format_group = &snbep_uncore_qpi_format_group,
2971 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2972 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2973 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2974 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2975 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2976 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2977 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2978 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2979 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2980 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2981 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2982 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2983 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2984 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2985 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2986 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2987 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2988 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2989 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2990 EVENT_CONSTRAINT_END
2993 static struct intel_uncore_type hswep_uncore_r2pcie = {
2997 .perf_ctr_bits = 48,
2998 .constraints = hswep_uncore_r2pcie_constraints,
2999 SNBEP_UNCORE_PCI_COMMON_INIT(),
3002 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3003 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3004 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3005 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3006 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3007 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3008 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3009 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3010 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3011 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3012 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3013 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3014 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3015 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3016 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3017 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3018 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3019 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3020 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3021 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3022 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3023 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3024 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3025 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3026 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3027 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3028 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3029 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3030 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3031 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3032 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3033 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3034 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3035 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3036 EVENT_CONSTRAINT_END
3039 static struct intel_uncore_type hswep_uncore_r3qpi = {
3043 .perf_ctr_bits = 44,
3044 .constraints = hswep_uncore_r3qpi_constraints,
3045 SNBEP_UNCORE_PCI_COMMON_INIT(),
3049 HSWEP_PCI_UNCORE_HA,
3050 HSWEP_PCI_UNCORE_IMC,
3051 HSWEP_PCI_UNCORE_IRP,
3052 HSWEP_PCI_UNCORE_QPI,
3053 HSWEP_PCI_UNCORE_R2PCIE,
3054 HSWEP_PCI_UNCORE_R3QPI,
3057 static struct intel_uncore_type *hswep_pci_uncores[] = {
3058 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3059 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3060 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3061 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3062 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3063 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3067 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3068 { /* Home Agent 0 */
3069 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3070 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3072 { /* Home Agent 1 */
3073 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3074 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3076 { /* MC0 Channel 0 */
3077 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3078 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3080 { /* MC0 Channel 1 */
3081 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3082 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3084 { /* MC0 Channel 2 */
3085 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3086 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3088 { /* MC0 Channel 3 */
3089 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3090 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3092 { /* MC1 Channel 0 */
3093 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3094 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3096 { /* MC1 Channel 1 */
3097 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3098 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3100 { /* MC1 Channel 2 */
3101 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3102 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3104 { /* MC1 Channel 3 */
3105 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3106 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3109 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3110 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3113 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3114 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3117 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3118 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3121 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3122 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3125 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3126 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3128 { /* R3QPI0 Link 0 */
3129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3130 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3132 { /* R3QPI0 Link 1 */
3133 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3134 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3136 { /* R3QPI1 Link 2 */
3137 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3138 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3140 { /* QPI Port 0 filter */
3141 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3142 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3143 SNBEP_PCI_QPI_PORT0_FILTER),
3145 { /* QPI Port 1 filter */
3146 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3147 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3148 SNBEP_PCI_QPI_PORT1_FILTER),
3150 { /* end: all zeroes */ }
3153 static struct pci_driver hswep_uncore_pci_driver = {
3154 .name = "hswep_uncore",
3155 .id_table = hswep_uncore_pci_ids,
3158 int hswep_uncore_pci_init(void)
3160 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3163 uncore_pci_uncores = hswep_pci_uncores;
3164 uncore_pci_driver = &hswep_uncore_pci_driver;
3167 /* end of Haswell-EP uncore support */
3169 /* BDX uncore support */
3171 static struct intel_uncore_type bdx_uncore_ubox = {
3175 .perf_ctr_bits = 48,
3176 .fixed_ctr_bits = 48,
3177 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3178 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3179 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3180 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3181 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3182 .num_shared_regs = 1,
3183 .ops = &ivbep_uncore_msr_ops,
3184 .format_group = &ivbep_uncore_ubox_format_group,
3187 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3188 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3189 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3190 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3191 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3192 EVENT_CONSTRAINT_END
3195 static struct intel_uncore_type bdx_uncore_cbox = {
3199 .perf_ctr_bits = 48,
3200 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3201 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3202 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3203 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3204 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3205 .num_shared_regs = 1,
3206 .constraints = bdx_uncore_cbox_constraints,
3207 .ops = &hswep_uncore_cbox_ops,
3208 .format_group = &hswep_uncore_cbox_format_group,
3211 static struct intel_uncore_type bdx_uncore_sbox = {
3215 .perf_ctr_bits = 48,
3216 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3217 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3218 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3219 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3220 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3221 .ops = &hswep_uncore_sbox_msr_ops,
3222 .format_group = &hswep_uncore_sbox_format_group,
3225 #define BDX_MSR_UNCORE_SBOX 3
3227 static struct intel_uncore_type *bdx_msr_uncores[] = {
3235 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3236 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3237 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3238 EVENT_CONSTRAINT_END
3241 #define BDX_PCU_DID 0x6fc0
3243 void bdx_uncore_cpu_init(void)
3245 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3246 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3247 uncore_msr_uncores = bdx_msr_uncores;
3249 /* Detect systems with no SBOXes */
3250 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3251 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3253 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3256 static struct intel_uncore_type bdx_uncore_ha = {
3260 .perf_ctr_bits = 48,
3261 SNBEP_UNCORE_PCI_COMMON_INIT(),
3264 static struct intel_uncore_type bdx_uncore_imc = {
3268 .perf_ctr_bits = 48,
3269 .fixed_ctr_bits = 48,
3270 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3271 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3272 .event_descs = hswep_uncore_imc_events,
3273 SNBEP_UNCORE_PCI_COMMON_INIT(),
3276 static struct intel_uncore_type bdx_uncore_irp = {
3280 .perf_ctr_bits = 48,
3281 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3282 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3283 .ops = &hswep_uncore_irp_ops,
3284 .format_group = &snbep_uncore_format_group,
3287 static struct intel_uncore_type bdx_uncore_qpi = {
3291 .perf_ctr_bits = 48,
3292 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3293 .event_ctl = SNBEP_PCI_PMON_CTL0,
3294 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3295 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3296 .num_shared_regs = 1,
3297 .ops = &snbep_uncore_qpi_ops,
3298 .format_group = &snbep_uncore_qpi_format_group,
3301 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3302 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3303 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3304 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3305 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3306 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3307 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3308 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3309 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3310 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3311 EVENT_CONSTRAINT_END
3314 static struct intel_uncore_type bdx_uncore_r2pcie = {
3318 .perf_ctr_bits = 48,
3319 .constraints = bdx_uncore_r2pcie_constraints,
3320 SNBEP_UNCORE_PCI_COMMON_INIT(),
3323 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3324 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3325 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3326 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3327 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3328 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3329 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3330 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3331 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3332 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3333 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3334 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3335 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3336 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3337 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3338 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3339 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3340 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3341 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3342 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3343 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3344 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3345 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3346 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3347 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3348 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3349 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3350 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3351 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3352 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3353 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3354 EVENT_CONSTRAINT_END
3357 static struct intel_uncore_type bdx_uncore_r3qpi = {
3361 .perf_ctr_bits = 48,
3362 .constraints = bdx_uncore_r3qpi_constraints,
3363 SNBEP_UNCORE_PCI_COMMON_INIT(),
3371 BDX_PCI_UNCORE_R2PCIE,
3372 BDX_PCI_UNCORE_R3QPI,
3375 static struct intel_uncore_type *bdx_pci_uncores[] = {
3376 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3377 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3378 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3379 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3380 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3381 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3385 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3386 { /* Home Agent 0 */
3387 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3388 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3390 { /* Home Agent 1 */
3391 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3392 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3394 { /* MC0 Channel 0 */
3395 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3396 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3398 { /* MC0 Channel 1 */
3399 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3400 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3402 { /* MC0 Channel 2 */
3403 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3404 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3406 { /* MC0 Channel 3 */
3407 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3408 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3410 { /* MC1 Channel 0 */
3411 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3412 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3414 { /* MC1 Channel 1 */
3415 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3416 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3418 { /* MC1 Channel 2 */
3419 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3420 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3422 { /* MC1 Channel 3 */
3423 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3424 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3427 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3428 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3431 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3432 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3435 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3436 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3439 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3440 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3443 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3444 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3446 { /* R3QPI0 Link 0 */
3447 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3448 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3450 { /* R3QPI0 Link 1 */
3451 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3452 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3454 { /* R3QPI1 Link 2 */
3455 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3456 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3458 { /* QPI Port 0 filter */
3459 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3460 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3461 SNBEP_PCI_QPI_PORT0_FILTER),
3463 { /* QPI Port 1 filter */
3464 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3465 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3466 SNBEP_PCI_QPI_PORT1_FILTER),
3468 { /* QPI Port 2 filter */
3469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3470 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3471 BDX_PCI_QPI_PORT2_FILTER),
3473 { /* end: all zeroes */ }
3476 static struct pci_driver bdx_uncore_pci_driver = {
3477 .name = "bdx_uncore",
3478 .id_table = bdx_uncore_pci_ids,
3481 int bdx_uncore_pci_init(void)
3483 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3487 uncore_pci_uncores = bdx_pci_uncores;
3488 uncore_pci_driver = &bdx_uncore_pci_driver;
3492 /* end of BDX uncore support */
3494 /* SKX uncore support */
3496 static struct intel_uncore_type skx_uncore_ubox = {
3500 .perf_ctr_bits = 48,
3501 .fixed_ctr_bits = 48,
3502 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3503 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3504 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3505 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3506 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3507 .ops = &ivbep_uncore_msr_ops,
3508 .format_group = &ivbep_uncore_ubox_format_group,
3511 static struct attribute *skx_uncore_cha_formats_attr[] = {
3512 &format_attr_event.attr,
3513 &format_attr_umask.attr,
3514 &format_attr_edge.attr,
3515 &format_attr_tid_en.attr,
3516 &format_attr_inv.attr,
3517 &format_attr_thresh8.attr,
3518 &format_attr_filter_tid4.attr,
3519 &format_attr_filter_state5.attr,
3520 &format_attr_filter_rem.attr,
3521 &format_attr_filter_loc.attr,
3522 &format_attr_filter_nm.attr,
3523 &format_attr_filter_all_op.attr,
3524 &format_attr_filter_not_nm.attr,
3525 &format_attr_filter_opc_0.attr,
3526 &format_attr_filter_opc_1.attr,
3527 &format_attr_filter_nc.attr,
3528 &format_attr_filter_isoc.attr,
3532 static const struct attribute_group skx_uncore_chabox_format_group = {
3534 .attrs = skx_uncore_cha_formats_attr,
3537 static struct event_constraint skx_uncore_chabox_constraints[] = {
3538 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3539 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3540 EVENT_CONSTRAINT_END
3543 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3544 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3545 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3546 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3547 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3548 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3549 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3550 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3551 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3552 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3556 static u64 skx_cha_filter_mask(int fields)
3561 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3563 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3565 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3567 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3568 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3569 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3570 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3571 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3572 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3573 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3574 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3575 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3580 static struct event_constraint *
3581 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3583 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3586 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3588 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3589 struct extra_reg *er;
3592 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3593 if (er->event != (event->hw.config & er->config_mask))
3599 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3600 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3601 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3607 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3608 /* There is no frz_en for chabox ctl */
3609 .init_box = ivbep_uncore_msr_init_box,
3610 .disable_box = snbep_uncore_msr_disable_box,
3611 .enable_box = snbep_uncore_msr_enable_box,
3612 .disable_event = snbep_uncore_msr_disable_event,
3613 .enable_event = hswep_cbox_enable_event,
3614 .read_counter = uncore_msr_read_counter,
3615 .hw_config = skx_cha_hw_config,
3616 .get_constraint = skx_cha_get_constraint,
3617 .put_constraint = snbep_cbox_put_constraint,
3620 static struct intel_uncore_type skx_uncore_chabox = {
3623 .perf_ctr_bits = 48,
3624 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3625 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3626 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3627 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3628 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3629 .num_shared_regs = 1,
3630 .constraints = skx_uncore_chabox_constraints,
3631 .ops = &skx_uncore_chabox_ops,
3632 .format_group = &skx_uncore_chabox_format_group,
3635 static struct attribute *skx_uncore_iio_formats_attr[] = {
3636 &format_attr_event.attr,
3637 &format_attr_umask.attr,
3638 &format_attr_edge.attr,
3639 &format_attr_inv.attr,
3640 &format_attr_thresh9.attr,
3641 &format_attr_ch_mask.attr,
3642 &format_attr_fc_mask.attr,
3646 static const struct attribute_group skx_uncore_iio_format_group = {
3648 .attrs = skx_uncore_iio_formats_attr,
3651 static struct event_constraint skx_uncore_iio_constraints[] = {
3652 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3653 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3654 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3655 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3656 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3657 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3658 EVENT_CONSTRAINT_END
3661 static void skx_iio_enable_event(struct intel_uncore_box *box,
3662 struct perf_event *event)
3664 struct hw_perf_event *hwc = &event->hw;
3666 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3669 static struct intel_uncore_ops skx_uncore_iio_ops = {
3670 .init_box = ivbep_uncore_msr_init_box,
3671 .disable_box = snbep_uncore_msr_disable_box,
3672 .enable_box = snbep_uncore_msr_enable_box,
3673 .disable_event = snbep_uncore_msr_disable_event,
3674 .enable_event = skx_iio_enable_event,
3675 .read_counter = uncore_msr_read_counter,
3678 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3680 return pmu->type->topology[die].configuration >>
3681 (pmu->pmu_idx * BUS_NUM_STRIDE);
3685 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3687 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3689 /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3690 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3693 static ssize_t skx_iio_mapping_show(struct device *dev,
3694 struct device_attribute *attr, char *buf)
3696 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3697 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3698 long die = (long)ea->var;
3700 return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3701 skx_iio_stack(pmu, die));
3704 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3708 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3709 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3712 *topology = msr_value;
3717 static int die_to_cpu(int die)
3719 int res = 0, cpu, current_die;
3721 * Using cpus_read_lock() to ensure cpu is not going down between
3722 * looking at cpu_online_mask.
3725 for_each_online_cpu(cpu) {
3726 current_die = topology_logical_die_id(cpu);
3727 if (current_die == die) {
3736 static int skx_iio_get_topology(struct intel_uncore_type *type)
3738 int die, ret = -EPERM;
3740 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3742 if (!type->topology)
3745 for (die = 0; die < uncore_max_dies(); die++) {
3746 ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3747 &type->topology[die].configuration);
3751 ret = uncore_die_to_segment(die);
3755 type->topology[die].segment = ret;
3759 kfree(type->topology);
3760 type->topology = NULL;
3766 static struct attribute_group skx_iio_mapping_group = {
3767 .is_visible = skx_iio_mapping_visible,
3770 static const struct attribute_group *skx_iio_attr_update[] = {
3771 &skx_iio_mapping_group,
3775 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3780 struct attribute **attrs = NULL;
3781 struct dev_ext_attribute *eas = NULL;
3783 ret = skx_iio_get_topology(type);
3785 goto clear_attr_update;
3789 /* One more for NULL. */
3790 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3794 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3798 for (die = 0; die < uncore_max_dies(); die++) {
3799 sprintf(buf, "die%ld", die);
3800 sysfs_attr_init(&eas[die].attr.attr);
3801 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3802 if (!eas[die].attr.attr.name)
3804 eas[die].attr.attr.mode = 0444;
3805 eas[die].attr.show = skx_iio_mapping_show;
3806 eas[die].attr.store = NULL;
3807 eas[die].var = (void *)die;
3808 attrs[die] = &eas[die].attr.attr;
3810 skx_iio_mapping_group.attrs = attrs;
3814 for (; die >= 0; die--)
3815 kfree(eas[die].attr.attr.name);
3818 kfree(type->topology);
3820 type->attr_update = NULL;
3824 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3826 struct attribute **attr = skx_iio_mapping_group.attrs;
3831 for (; *attr; attr++)
3832 kfree((*attr)->name);
3833 kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3834 kfree(skx_iio_mapping_group.attrs);
3835 skx_iio_mapping_group.attrs = NULL;
3836 kfree(type->topology);
3839 static struct intel_uncore_type skx_uncore_iio = {
3843 .perf_ctr_bits = 48,
3844 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3845 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3846 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3847 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3848 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3849 .msr_offset = SKX_IIO_MSR_OFFSET,
3850 .constraints = skx_uncore_iio_constraints,
3851 .ops = &skx_uncore_iio_ops,
3852 .format_group = &skx_uncore_iio_format_group,
3853 .attr_update = skx_iio_attr_update,
3854 .set_mapping = skx_iio_set_mapping,
3855 .cleanup_mapping = skx_iio_cleanup_mapping,
3858 enum perf_uncore_iio_freerunning_type_id {
3859 SKX_IIO_MSR_IOCLK = 0,
3861 SKX_IIO_MSR_UTIL = 2,
3863 SKX_IIO_FREERUNNING_TYPE_MAX,
3867 static struct freerunning_counters skx_iio_freerunning[] = {
3868 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3869 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3870 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3873 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3874 /* Free-Running IO CLOCKS Counter */
3875 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3876 /* Free-Running IIO BANDWIDTH Counters */
3877 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3878 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3879 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3880 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3881 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3882 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3883 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3884 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3885 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3886 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3887 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3888 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3889 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3890 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3891 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3892 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3893 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3894 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3895 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3896 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3897 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3898 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3899 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3900 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3901 /* Free-running IIO UTILIZATION Counters */
3902 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3903 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3904 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3905 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3906 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3907 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3908 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3909 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3910 { /* end: all zeroes */ },
3913 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3914 .read_counter = uncore_msr_read_counter,
3915 .hw_config = uncore_freerunning_hw_config,
3918 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3919 &format_attr_event.attr,
3920 &format_attr_umask.attr,
3924 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3926 .attrs = skx_uncore_iio_freerunning_formats_attr,
3929 static struct intel_uncore_type skx_uncore_iio_free_running = {
3930 .name = "iio_free_running",
3933 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3934 .freerunning = skx_iio_freerunning,
3935 .ops = &skx_uncore_iio_freerunning_ops,
3936 .event_descs = skx_uncore_iio_freerunning_events,
3937 .format_group = &skx_uncore_iio_freerunning_format_group,
3940 static struct attribute *skx_uncore_formats_attr[] = {
3941 &format_attr_event.attr,
3942 &format_attr_umask.attr,
3943 &format_attr_edge.attr,
3944 &format_attr_inv.attr,
3945 &format_attr_thresh8.attr,
3949 static const struct attribute_group skx_uncore_format_group = {
3951 .attrs = skx_uncore_formats_attr,
3954 static struct intel_uncore_type skx_uncore_irp = {
3958 .perf_ctr_bits = 48,
3959 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3960 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3961 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3962 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3963 .msr_offset = SKX_IRP_MSR_OFFSET,
3964 .ops = &skx_uncore_iio_ops,
3965 .format_group = &skx_uncore_format_group,
3968 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3969 &format_attr_event.attr,
3970 &format_attr_umask.attr,
3971 &format_attr_edge.attr,
3972 &format_attr_inv.attr,
3973 &format_attr_thresh8.attr,
3974 &format_attr_occ_invert.attr,
3975 &format_attr_occ_edge_det.attr,
3976 &format_attr_filter_band0.attr,
3977 &format_attr_filter_band1.attr,
3978 &format_attr_filter_band2.attr,
3979 &format_attr_filter_band3.attr,
3983 static struct attribute_group skx_uncore_pcu_format_group = {
3985 .attrs = skx_uncore_pcu_formats_attr,
3988 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3989 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3990 .hw_config = hswep_pcu_hw_config,
3991 .get_constraint = snbep_pcu_get_constraint,
3992 .put_constraint = snbep_pcu_put_constraint,
3995 static struct intel_uncore_type skx_uncore_pcu = {
3999 .perf_ctr_bits = 48,
4000 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4001 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4002 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4003 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4004 .num_shared_regs = 1,
4005 .ops = &skx_uncore_pcu_ops,
4006 .format_group = &skx_uncore_pcu_format_group,
4009 static struct intel_uncore_type *skx_msr_uncores[] = {
4013 &skx_uncore_iio_free_running,
4020 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4021 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4023 #define SKX_CAPID6 0x9c
4024 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4026 static int skx_count_chabox(void)
4028 struct pci_dev *dev = NULL;
4031 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4035 pci_read_config_dword(dev, SKX_CAPID6, &val);
4036 val &= SKX_CHA_BIT_MASK;
4039 return hweight32(val);
4042 void skx_uncore_cpu_init(void)
4044 skx_uncore_chabox.num_boxes = skx_count_chabox();
4045 uncore_msr_uncores = skx_msr_uncores;
4048 static struct intel_uncore_type skx_uncore_imc = {
4052 .perf_ctr_bits = 48,
4053 .fixed_ctr_bits = 48,
4054 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4055 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4056 .event_descs = hswep_uncore_imc_events,
4057 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4058 .event_ctl = SNBEP_PCI_PMON_CTL0,
4059 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4060 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4061 .ops = &ivbep_uncore_pci_ops,
4062 .format_group = &skx_uncore_format_group,
4065 static struct attribute *skx_upi_uncore_formats_attr[] = {
4066 &format_attr_event.attr,
4067 &format_attr_umask_ext.attr,
4068 &format_attr_edge.attr,
4069 &format_attr_inv.attr,
4070 &format_attr_thresh8.attr,
4074 static const struct attribute_group skx_upi_uncore_format_group = {
4076 .attrs = skx_upi_uncore_formats_attr,
4079 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4081 struct pci_dev *pdev = box->pci_dev;
4083 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4084 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4087 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4088 .init_box = skx_upi_uncore_pci_init_box,
4089 .disable_box = snbep_uncore_pci_disable_box,
4090 .enable_box = snbep_uncore_pci_enable_box,
4091 .disable_event = snbep_uncore_pci_disable_event,
4092 .enable_event = snbep_uncore_pci_enable_event,
4093 .read_counter = snbep_uncore_pci_read_counter,
4096 static struct intel_uncore_type skx_uncore_upi = {
4100 .perf_ctr_bits = 48,
4101 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4102 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4103 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4104 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4105 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4106 .ops = &skx_upi_uncore_pci_ops,
4107 .format_group = &skx_upi_uncore_format_group,
4110 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4112 struct pci_dev *pdev = box->pci_dev;
4114 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4115 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4118 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4119 .init_box = skx_m2m_uncore_pci_init_box,
4120 .disable_box = snbep_uncore_pci_disable_box,
4121 .enable_box = snbep_uncore_pci_enable_box,
4122 .disable_event = snbep_uncore_pci_disable_event,
4123 .enable_event = snbep_uncore_pci_enable_event,
4124 .read_counter = snbep_uncore_pci_read_counter,
4127 static struct intel_uncore_type skx_uncore_m2m = {
4131 .perf_ctr_bits = 48,
4132 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4133 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4134 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4135 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4136 .ops = &skx_m2m_uncore_pci_ops,
4137 .format_group = &skx_uncore_format_group,
4140 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4141 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4142 EVENT_CONSTRAINT_END
4145 static struct intel_uncore_type skx_uncore_m2pcie = {
4149 .perf_ctr_bits = 48,
4150 .constraints = skx_uncore_m2pcie_constraints,
4151 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4152 .event_ctl = SNBEP_PCI_PMON_CTL0,
4153 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4154 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4155 .ops = &ivbep_uncore_pci_ops,
4156 .format_group = &skx_uncore_format_group,
4159 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4160 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4161 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4162 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4163 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4164 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4165 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4166 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4167 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4168 EVENT_CONSTRAINT_END
4171 static struct intel_uncore_type skx_uncore_m3upi = {
4175 .perf_ctr_bits = 48,
4176 .constraints = skx_uncore_m3upi_constraints,
4177 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4178 .event_ctl = SNBEP_PCI_PMON_CTL0,
4179 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4180 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4181 .ops = &ivbep_uncore_pci_ops,
4182 .format_group = &skx_uncore_format_group,
4189 SKX_PCI_UNCORE_M2PCIE,
4190 SKX_PCI_UNCORE_M3UPI,
4193 static struct intel_uncore_type *skx_pci_uncores[] = {
4194 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4195 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4196 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4197 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4198 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4202 static const struct pci_device_id skx_uncore_pci_ids[] = {
4203 { /* MC0 Channel 0 */
4204 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4205 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4207 { /* MC0 Channel 1 */
4208 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4209 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4211 { /* MC0 Channel 2 */
4212 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4213 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4215 { /* MC1 Channel 0 */
4216 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4217 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4219 { /* MC1 Channel 1 */
4220 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4221 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4223 { /* MC1 Channel 2 */
4224 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4225 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4228 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4229 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4232 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4233 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4236 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4237 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4240 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4241 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4244 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4245 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4248 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4249 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4252 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4253 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4256 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4257 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4260 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4261 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4263 { /* M3UPI0 Link 0 */
4264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4265 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4267 { /* M3UPI0 Link 1 */
4268 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4269 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4271 { /* M3UPI1 Link 2 */
4272 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4273 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4275 { /* end: all zeroes */ }
4279 static struct pci_driver skx_uncore_pci_driver = {
4280 .name = "skx_uncore",
4281 .id_table = skx_uncore_pci_ids,
4284 int skx_uncore_pci_init(void)
4286 /* need to double check pci address */
4287 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4292 uncore_pci_uncores = skx_pci_uncores;
4293 uncore_pci_driver = &skx_uncore_pci_driver;
4297 /* end of SKX uncore support */
4299 /* SNR uncore support */
4301 static struct intel_uncore_type snr_uncore_ubox = {
4305 .perf_ctr_bits = 48,
4306 .fixed_ctr_bits = 48,
4307 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4308 .event_ctl = SNR_U_MSR_PMON_CTL0,
4309 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4310 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4311 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4312 .ops = &ivbep_uncore_msr_ops,
4313 .format_group = &ivbep_uncore_format_group,
4316 static struct attribute *snr_uncore_cha_formats_attr[] = {
4317 &format_attr_event.attr,
4318 &format_attr_umask_ext2.attr,
4319 &format_attr_edge.attr,
4320 &format_attr_tid_en.attr,
4321 &format_attr_inv.attr,
4322 &format_attr_thresh8.attr,
4323 &format_attr_filter_tid5.attr,
4326 static const struct attribute_group snr_uncore_chabox_format_group = {
4328 .attrs = snr_uncore_cha_formats_attr,
4331 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4333 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4335 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4336 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4337 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4343 static void snr_cha_enable_event(struct intel_uncore_box *box,
4344 struct perf_event *event)
4346 struct hw_perf_event *hwc = &event->hw;
4347 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4349 if (reg1->idx != EXTRA_REG_NONE)
4350 wrmsrl(reg1->reg, reg1->config);
4352 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4355 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4356 .init_box = ivbep_uncore_msr_init_box,
4357 .disable_box = snbep_uncore_msr_disable_box,
4358 .enable_box = snbep_uncore_msr_enable_box,
4359 .disable_event = snbep_uncore_msr_disable_event,
4360 .enable_event = snr_cha_enable_event,
4361 .read_counter = uncore_msr_read_counter,
4362 .hw_config = snr_cha_hw_config,
4365 static struct intel_uncore_type snr_uncore_chabox = {
4369 .perf_ctr_bits = 48,
4370 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4371 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4372 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4373 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4374 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4375 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4376 .ops = &snr_uncore_chabox_ops,
4377 .format_group = &snr_uncore_chabox_format_group,
4380 static struct attribute *snr_uncore_iio_formats_attr[] = {
4381 &format_attr_event.attr,
4382 &format_attr_umask.attr,
4383 &format_attr_edge.attr,
4384 &format_attr_inv.attr,
4385 &format_attr_thresh9.attr,
4386 &format_attr_ch_mask2.attr,
4387 &format_attr_fc_mask2.attr,
4391 static const struct attribute_group snr_uncore_iio_format_group = {
4393 .attrs = snr_uncore_iio_formats_attr,
4396 static struct intel_uncore_type snr_uncore_iio = {
4400 .perf_ctr_bits = 48,
4401 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4402 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4403 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4404 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4405 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4406 .msr_offset = SNR_IIO_MSR_OFFSET,
4407 .ops = &ivbep_uncore_msr_ops,
4408 .format_group = &snr_uncore_iio_format_group,
4411 static struct intel_uncore_type snr_uncore_irp = {
4415 .perf_ctr_bits = 48,
4416 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4417 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4418 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4419 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4420 .msr_offset = SNR_IRP_MSR_OFFSET,
4421 .ops = &ivbep_uncore_msr_ops,
4422 .format_group = &ivbep_uncore_format_group,
4425 static struct intel_uncore_type snr_uncore_m2pcie = {
4429 .perf_ctr_bits = 48,
4430 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4431 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4432 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4433 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4434 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4435 .ops = &ivbep_uncore_msr_ops,
4436 .format_group = &ivbep_uncore_format_group,
4439 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4441 struct hw_perf_event *hwc = &event->hw;
4442 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4443 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4445 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4446 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4447 reg1->idx = ev_sel - 0xb;
4448 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4453 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4454 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4455 .hw_config = snr_pcu_hw_config,
4456 .get_constraint = snbep_pcu_get_constraint,
4457 .put_constraint = snbep_pcu_put_constraint,
4460 static struct intel_uncore_type snr_uncore_pcu = {
4464 .perf_ctr_bits = 48,
4465 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4466 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4467 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4468 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4469 .num_shared_regs = 1,
4470 .ops = &snr_uncore_pcu_ops,
4471 .format_group = &skx_uncore_pcu_format_group,
4474 enum perf_uncore_snr_iio_freerunning_type_id {
4478 SNR_IIO_FREERUNNING_TYPE_MAX,
4481 static struct freerunning_counters snr_iio_freerunning[] = {
4482 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4483 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4486 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4487 /* Free-Running IIO CLOCKS Counter */
4488 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4489 /* Free-Running IIO BANDWIDTH IN Counters */
4490 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4491 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4492 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4493 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4494 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4495 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4496 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4497 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4498 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4499 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4500 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4501 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4502 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4503 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4504 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4505 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4506 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4507 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4508 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4509 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4510 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4511 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4512 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4513 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4514 { /* end: all zeroes */ },
4517 static struct intel_uncore_type snr_uncore_iio_free_running = {
4518 .name = "iio_free_running",
4521 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4522 .freerunning = snr_iio_freerunning,
4523 .ops = &skx_uncore_iio_freerunning_ops,
4524 .event_descs = snr_uncore_iio_freerunning_events,
4525 .format_group = &skx_uncore_iio_freerunning_format_group,
4528 static struct intel_uncore_type *snr_msr_uncores[] = {
4535 &snr_uncore_iio_free_running,
4539 void snr_uncore_cpu_init(void)
4541 uncore_msr_uncores = snr_msr_uncores;
4544 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4546 struct pci_dev *pdev = box->pci_dev;
4547 int box_ctl = uncore_pci_box_ctl(box);
4549 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4550 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4553 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4554 .init_box = snr_m2m_uncore_pci_init_box,
4555 .disable_box = snbep_uncore_pci_disable_box,
4556 .enable_box = snbep_uncore_pci_enable_box,
4557 .disable_event = snbep_uncore_pci_disable_event,
4558 .enable_event = snbep_uncore_pci_enable_event,
4559 .read_counter = snbep_uncore_pci_read_counter,
4562 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4563 &format_attr_event.attr,
4564 &format_attr_umask_ext3.attr,
4565 &format_attr_edge.attr,
4566 &format_attr_inv.attr,
4567 &format_attr_thresh8.attr,
4571 static const struct attribute_group snr_m2m_uncore_format_group = {
4573 .attrs = snr_m2m_uncore_formats_attr,
4576 static struct intel_uncore_type snr_uncore_m2m = {
4580 .perf_ctr_bits = 48,
4581 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4582 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4583 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4584 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4585 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4586 .ops = &snr_m2m_uncore_pci_ops,
4587 .format_group = &snr_m2m_uncore_format_group,
4590 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4592 struct pci_dev *pdev = box->pci_dev;
4593 struct hw_perf_event *hwc = &event->hw;
4595 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4596 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4599 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4600 .init_box = snr_m2m_uncore_pci_init_box,
4601 .disable_box = snbep_uncore_pci_disable_box,
4602 .enable_box = snbep_uncore_pci_enable_box,
4603 .disable_event = snbep_uncore_pci_disable_event,
4604 .enable_event = snr_uncore_pci_enable_event,
4605 .read_counter = snbep_uncore_pci_read_counter,
4608 static struct intel_uncore_type snr_uncore_pcie3 = {
4612 .perf_ctr_bits = 48,
4613 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4614 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4615 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4616 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4617 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4618 .ops = &snr_pcie3_uncore_pci_ops,
4619 .format_group = &skx_uncore_iio_format_group,
4624 SNR_PCI_UNCORE_PCIE3,
4627 static struct intel_uncore_type *snr_pci_uncores[] = {
4628 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4629 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4633 static const struct pci_device_id snr_uncore_pci_ids[] = {
4635 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4636 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4638 { /* end: all zeroes */ }
4641 static struct pci_driver snr_uncore_pci_driver = {
4642 .name = "snr_uncore",
4643 .id_table = snr_uncore_pci_ids,
4646 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4648 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4649 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4651 { /* end: all zeroes */ }
4654 static struct pci_driver snr_uncore_pci_sub_driver = {
4655 .name = "snr_uncore_sub",
4656 .id_table = snr_uncore_pci_sub_ids,
4659 int snr_uncore_pci_init(void)
4662 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4663 SKX_GIDNIDMAP, true);
4668 uncore_pci_uncores = snr_pci_uncores;
4669 uncore_pci_driver = &snr_uncore_pci_driver;
4670 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4674 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4676 struct pci_dev *mc_dev = NULL;
4680 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4683 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4690 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4691 unsigned int box_ctl, int mem_offset)
4693 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4694 struct intel_uncore_type *type = box->pmu->type;
4695 resource_size_t addr;
4701 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4702 addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4704 pci_read_config_dword(pdev, mem_offset, &pci_dword);
4705 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4709 box->io_addr = ioremap(addr, type->mmio_map_size);
4710 if (!box->io_addr) {
4711 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4715 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4718 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4720 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4721 SNR_IMC_MMIO_MEM0_OFFSET);
4724 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4731 config = readl(box->io_addr);
4732 config |= SNBEP_PMON_BOX_CTL_FRZ;
4733 writel(config, box->io_addr);
4736 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4743 config = readl(box->io_addr);
4744 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4745 writel(config, box->io_addr);
4748 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4749 struct perf_event *event)
4751 struct hw_perf_event *hwc = &event->hw;
4756 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4759 writel(hwc->config | SNBEP_PMON_CTL_EN,
4760 box->io_addr + hwc->config_base);
4763 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4764 struct perf_event *event)
4766 struct hw_perf_event *hwc = &event->hw;
4771 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4774 writel(hwc->config, box->io_addr + hwc->config_base);
4777 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4778 .init_box = snr_uncore_mmio_init_box,
4779 .exit_box = uncore_mmio_exit_box,
4780 .disable_box = snr_uncore_mmio_disable_box,
4781 .enable_box = snr_uncore_mmio_enable_box,
4782 .disable_event = snr_uncore_mmio_disable_event,
4783 .enable_event = snr_uncore_mmio_enable_event,
4784 .read_counter = uncore_mmio_read_counter,
4787 static struct uncore_event_desc snr_uncore_imc_events[] = {
4788 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4789 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4790 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4791 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4792 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4793 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4794 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4795 { /* end: all zeroes */ },
4798 static struct intel_uncore_type snr_uncore_imc = {
4802 .perf_ctr_bits = 48,
4803 .fixed_ctr_bits = 48,
4804 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4805 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4806 .event_descs = snr_uncore_imc_events,
4807 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4808 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4809 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4810 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4811 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4812 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4813 .ops = &snr_uncore_mmio_ops,
4814 .format_group = &skx_uncore_format_group,
4817 enum perf_uncore_snr_imc_freerunning_type_id {
4821 SNR_IMC_FREERUNNING_TYPE_MAX,
4824 static struct freerunning_counters snr_imc_freerunning[] = {
4825 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4826 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4829 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4830 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4832 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4833 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4834 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4835 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4836 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4837 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4838 { /* end: all zeroes */ },
4841 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4842 .init_box = snr_uncore_mmio_init_box,
4843 .exit_box = uncore_mmio_exit_box,
4844 .read_counter = uncore_mmio_read_counter,
4845 .hw_config = uncore_freerunning_hw_config,
4848 static struct intel_uncore_type snr_uncore_imc_free_running = {
4849 .name = "imc_free_running",
4852 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4853 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4854 .freerunning = snr_imc_freerunning,
4855 .ops = &snr_uncore_imc_freerunning_ops,
4856 .event_descs = snr_uncore_imc_freerunning_events,
4857 .format_group = &skx_uncore_iio_freerunning_format_group,
4860 static struct intel_uncore_type *snr_mmio_uncores[] = {
4862 &snr_uncore_imc_free_running,
4866 void snr_uncore_mmio_init(void)
4868 uncore_mmio_uncores = snr_mmio_uncores;
4871 /* end of SNR uncore support */
4873 /* ICX uncore support */
4875 static unsigned icx_cha_msr_offsets[] = {
4876 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4877 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4878 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4879 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
4880 0x1c, 0x2a, 0x38, 0x46,
4883 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4885 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4886 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4889 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4890 icx_cha_msr_offsets[box->pmu->pmu_idx];
4891 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4898 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4899 .init_box = ivbep_uncore_msr_init_box,
4900 .disable_box = snbep_uncore_msr_disable_box,
4901 .enable_box = snbep_uncore_msr_enable_box,
4902 .disable_event = snbep_uncore_msr_disable_event,
4903 .enable_event = snr_cha_enable_event,
4904 .read_counter = uncore_msr_read_counter,
4905 .hw_config = icx_cha_hw_config,
4908 static struct intel_uncore_type icx_uncore_chabox = {
4911 .perf_ctr_bits = 48,
4912 .event_ctl = ICX_C34_MSR_PMON_CTL0,
4913 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
4914 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
4915 .msr_offsets = icx_cha_msr_offsets,
4916 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4917 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4918 .constraints = skx_uncore_chabox_constraints,
4919 .ops = &icx_uncore_chabox_ops,
4920 .format_group = &snr_uncore_chabox_format_group,
4923 static unsigned icx_msr_offsets[] = {
4924 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4927 static struct event_constraint icx_uncore_iio_constraints[] = {
4928 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4929 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4930 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4931 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4932 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4933 EVENT_CONSTRAINT_END
4936 static struct intel_uncore_type icx_uncore_iio = {
4940 .perf_ctr_bits = 48,
4941 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
4942 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
4943 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4944 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4945 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
4946 .msr_offsets = icx_msr_offsets,
4947 .constraints = icx_uncore_iio_constraints,
4948 .ops = &skx_uncore_iio_ops,
4949 .format_group = &snr_uncore_iio_format_group,
4952 static struct intel_uncore_type icx_uncore_irp = {
4956 .perf_ctr_bits = 48,
4957 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
4958 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
4959 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4960 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
4961 .msr_offsets = icx_msr_offsets,
4962 .ops = &ivbep_uncore_msr_ops,
4963 .format_group = &ivbep_uncore_format_group,
4966 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4967 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4968 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4969 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4970 EVENT_CONSTRAINT_END
4973 static struct intel_uncore_type icx_uncore_m2pcie = {
4977 .perf_ctr_bits = 48,
4978 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
4979 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
4980 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
4981 .msr_offsets = icx_msr_offsets,
4982 .constraints = icx_uncore_m2pcie_constraints,
4983 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4984 .ops = &ivbep_uncore_msr_ops,
4985 .format_group = &ivbep_uncore_format_group,
4988 enum perf_uncore_icx_iio_freerunning_type_id {
4992 ICX_IIO_FREERUNNING_TYPE_MAX,
4995 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
4996 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4999 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5000 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5003 static struct freerunning_counters icx_iio_freerunning[] = {
5004 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5005 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5008 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5009 /* Free-Running IIO CLOCKS Counter */
5010 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5011 /* Free-Running IIO BANDWIDTH IN Counters */
5012 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5013 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5014 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5015 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5016 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5017 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5018 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5019 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5020 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5021 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5022 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5023 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5024 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5025 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5026 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5027 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5028 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5029 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5030 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5031 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5032 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5033 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5034 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5035 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5036 { /* end: all zeroes */ },
5039 static struct intel_uncore_type icx_uncore_iio_free_running = {
5040 .name = "iio_free_running",
5043 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5044 .freerunning = icx_iio_freerunning,
5045 .ops = &skx_uncore_iio_freerunning_ops,
5046 .event_descs = icx_uncore_iio_freerunning_events,
5047 .format_group = &skx_uncore_iio_freerunning_format_group,
5050 static struct intel_uncore_type *icx_msr_uncores[] = {
5057 &icx_uncore_iio_free_running,
5062 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5063 * registers which located at Device 30, Function 3
5065 #define ICX_CAPID6 0x9c
5066 #define ICX_CAPID7 0xa0
5068 static u64 icx_count_chabox(void)
5070 struct pci_dev *dev = NULL;
5073 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5077 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5078 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5081 return hweight64(caps);
5084 void icx_uncore_cpu_init(void)
5086 u64 num_boxes = icx_count_chabox();
5088 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5090 icx_uncore_chabox.num_boxes = num_boxes;
5091 uncore_msr_uncores = icx_msr_uncores;
5094 static struct intel_uncore_type icx_uncore_m2m = {
5098 .perf_ctr_bits = 48,
5099 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5100 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5101 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5102 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5103 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5104 .ops = &snr_m2m_uncore_pci_ops,
5105 .format_group = &snr_m2m_uncore_format_group,
5108 static struct attribute *icx_upi_uncore_formats_attr[] = {
5109 &format_attr_event.attr,
5110 &format_attr_umask_ext4.attr,
5111 &format_attr_edge.attr,
5112 &format_attr_inv.attr,
5113 &format_attr_thresh8.attr,
5117 static const struct attribute_group icx_upi_uncore_format_group = {
5119 .attrs = icx_upi_uncore_formats_attr,
5122 static struct intel_uncore_type icx_uncore_upi = {
5126 .perf_ctr_bits = 48,
5127 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5128 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5129 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5130 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5131 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5132 .ops = &skx_upi_uncore_pci_ops,
5133 .format_group = &icx_upi_uncore_format_group,
5136 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5137 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5138 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5139 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5140 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5141 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5142 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5143 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5144 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5145 EVENT_CONSTRAINT_END
5148 static struct intel_uncore_type icx_uncore_m3upi = {
5152 .perf_ctr_bits = 48,
5153 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5154 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5155 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5156 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5157 .constraints = icx_uncore_m3upi_constraints,
5158 .ops = &ivbep_uncore_pci_ops,
5159 .format_group = &skx_uncore_format_group,
5165 ICX_PCI_UNCORE_M3UPI,
5168 static struct intel_uncore_type *icx_pci_uncores[] = {
5169 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5170 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5171 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5175 static const struct pci_device_id icx_uncore_pci_ids[] = {
5177 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5178 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5181 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5182 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5185 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5186 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5189 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5190 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5193 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5194 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5197 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5198 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5201 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5202 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5204 { /* M3UPI Link 0 */
5205 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5206 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5208 { /* M3UPI Link 1 */
5209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5210 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5212 { /* M3UPI Link 2 */
5213 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5214 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5216 { /* end: all zeroes */ }
5219 static struct pci_driver icx_uncore_pci_driver = {
5220 .name = "icx_uncore",
5221 .id_table = icx_uncore_pci_ids,
5224 int icx_uncore_pci_init(void)
5227 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5228 SKX_GIDNIDMAP, true);
5233 uncore_pci_uncores = icx_pci_uncores;
5234 uncore_pci_driver = &icx_uncore_pci_driver;
5238 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5240 unsigned int box_ctl = box->pmu->type->box_ctl +
5241 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5242 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5243 SNR_IMC_MMIO_MEM0_OFFSET;
5245 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5248 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5249 .init_box = icx_uncore_imc_init_box,
5250 .exit_box = uncore_mmio_exit_box,
5251 .disable_box = snr_uncore_mmio_disable_box,
5252 .enable_box = snr_uncore_mmio_enable_box,
5253 .disable_event = snr_uncore_mmio_disable_event,
5254 .enable_event = snr_uncore_mmio_enable_event,
5255 .read_counter = uncore_mmio_read_counter,
5258 static struct intel_uncore_type icx_uncore_imc = {
5262 .perf_ctr_bits = 48,
5263 .fixed_ctr_bits = 48,
5264 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5265 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5266 .event_descs = hswep_uncore_imc_events,
5267 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5268 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5269 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5270 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5271 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5272 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5273 .ops = &icx_uncore_mmio_ops,
5274 .format_group = &skx_uncore_format_group,
5277 enum perf_uncore_icx_imc_freerunning_type_id {
5282 ICX_IMC_FREERUNNING_TYPE_MAX,
5285 static struct freerunning_counters icx_imc_freerunning[] = {
5286 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5287 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5288 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5291 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5292 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5294 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5295 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5296 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5297 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5298 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5299 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5301 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5302 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5303 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5304 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5305 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5306 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5307 { /* end: all zeroes */ },
5310 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5312 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5313 SNR_IMC_MMIO_MEM0_OFFSET;
5315 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5318 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5319 .init_box = icx_uncore_imc_freerunning_init_box,
5320 .exit_box = uncore_mmio_exit_box,
5321 .read_counter = uncore_mmio_read_counter,
5322 .hw_config = uncore_freerunning_hw_config,
5325 static struct intel_uncore_type icx_uncore_imc_free_running = {
5326 .name = "imc_free_running",
5329 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5330 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5331 .freerunning = icx_imc_freerunning,
5332 .ops = &icx_uncore_imc_freerunning_ops,
5333 .event_descs = icx_uncore_imc_freerunning_events,
5334 .format_group = &skx_uncore_iio_freerunning_format_group,
5337 static struct intel_uncore_type *icx_mmio_uncores[] = {
5339 &icx_uncore_imc_free_running,
5343 void icx_uncore_mmio_init(void)
5345 uncore_mmio_uncores = icx_mmio_uncores;
5348 /* end of ICX uncore support */