1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
277 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278 * that BIOS programmed. MSR has package scope.
279 * | Bit | Default | Description
280 * | [63] | 00h | VALID - When set, indicates the CPU bus
281 * numbers have been initialized. (RO)
282 * |[62:48]| --- | Reserved
283 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
285 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
287 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
289 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
291 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
293 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
296 #define SKX_MSR_CPU_BUS_NUMBER 0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
298 #define BUS_NUM_STRIDE 8
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
316 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
319 #define SKX_IIO_MSR_OFFSET 0x20
321 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
323 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
326 SNBEP_PMON_CTL_UMASK_MASK | \
327 SNBEP_PMON_CTL_EDGE_DET | \
328 SNBEP_PMON_CTL_INVERT | \
329 SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
331 SKX_PMON_CTL_CH_MASK | \
332 SKX_PMON_CTL_FC_MASK)
335 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
338 #define SKX_IRP_MSR_OFFSET 0x20
341 #define SKX_UPI_PCI_PMON_CTL0 0x350
342 #define SKX_UPI_PCI_PMON_CTR0 0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
344 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
347 #define SKX_M2M_PCI_PMON_CTL0 0x228
348 #define SKX_M2M_PCI_PMON_CTR0 0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
351 /* Memory Map registers device ID */
352 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
353 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4
355 /* Getting I/O stack id in SAD_COTROL_CFG notation */
356 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
359 #define SNR_U_MSR_PMON_CTR0 0x1f98
360 #define SNR_U_MSR_PMON_CTL0 0x1f91
361 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
365 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
366 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
367 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
368 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
369 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
373 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
374 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
375 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
376 #define SNR_IIO_MSR_OFFSET 0x10
377 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
380 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
381 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
382 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
383 #define SNR_IRP_MSR_OFFSET 0x10
386 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
387 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
388 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
389 #define SNR_M2PCIE_MSR_OFFSET 0x10
392 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
393 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
394 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
395 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
398 #define SNR_M2M_PCI_PMON_CTL0 0x468
399 #define SNR_M2M_PCI_PMON_CTR0 0x440
400 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
401 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
404 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
405 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
406 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
409 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
410 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
411 #define SNR_IMC_MMIO_PMON_CTL0 0x40
412 #define SNR_IMC_MMIO_PMON_CTR0 0x8
413 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
414 #define SNR_IMC_MMIO_OFFSET 0x4000
415 #define SNR_IMC_MMIO_SIZE 0x4000
416 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
417 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
418 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
419 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
422 #define ICX_C34_MSR_PMON_CTR0 0xb68
423 #define ICX_C34_MSR_PMON_CTL0 0xb61
424 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
425 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
428 #define ICX_IIO_MSR_PMON_CTL0 0xa58
429 #define ICX_IIO_MSR_PMON_CTR0 0xa51
430 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
433 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
434 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
435 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
438 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
439 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
440 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
443 #define ICX_UPI_PCI_PMON_CTL0 0x350
444 #define ICX_UPI_PCI_PMON_CTR0 0x320
445 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
446 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
449 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
450 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
451 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
454 #define ICX_NUMBER_IMC_CHN 2
455 #define ICX_IMC_MEM_STRIDE 0x4
457 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
458 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
459 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
460 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
461 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
462 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
463 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
464 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
465 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
466 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
467 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
468 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
469 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
470 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
471 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
472 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
473 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
474 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
475 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
476 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
477 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
478 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
479 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
480 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
481 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
517 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
518 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
519 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
520 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
521 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
522 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
523 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
524 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
525 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
526 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
527 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
528 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
529 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
530 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
531 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
532 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
533 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
534 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
536 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
538 struct pci_dev *pdev = box->pci_dev;
539 int box_ctl = uncore_pci_box_ctl(box);
542 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
543 config |= SNBEP_PMON_BOX_CTL_FRZ;
544 pci_write_config_dword(pdev, box_ctl, config);
548 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
550 struct pci_dev *pdev = box->pci_dev;
551 int box_ctl = uncore_pci_box_ctl(box);
554 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
555 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
556 pci_write_config_dword(pdev, box_ctl, config);
560 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
562 struct pci_dev *pdev = box->pci_dev;
563 struct hw_perf_event *hwc = &event->hw;
565 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
568 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
570 struct pci_dev *pdev = box->pci_dev;
571 struct hw_perf_event *hwc = &event->hw;
573 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
576 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
578 struct pci_dev *pdev = box->pci_dev;
579 struct hw_perf_event *hwc = &event->hw;
582 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
583 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
588 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
590 struct pci_dev *pdev = box->pci_dev;
591 int box_ctl = uncore_pci_box_ctl(box);
593 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
596 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
601 msr = uncore_msr_box_ctl(box);
604 config |= SNBEP_PMON_BOX_CTL_FRZ;
609 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
614 msr = uncore_msr_box_ctl(box);
617 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
622 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
624 struct hw_perf_event *hwc = &event->hw;
625 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
627 if (reg1->idx != EXTRA_REG_NONE)
628 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
630 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
633 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
634 struct perf_event *event)
636 struct hw_perf_event *hwc = &event->hw;
638 wrmsrl(hwc->config_base, hwc->config);
641 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
643 unsigned msr = uncore_msr_box_ctl(box);
646 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
649 static struct attribute *snbep_uncore_formats_attr[] = {
650 &format_attr_event.attr,
651 &format_attr_umask.attr,
652 &format_attr_edge.attr,
653 &format_attr_inv.attr,
654 &format_attr_thresh8.attr,
658 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
659 &format_attr_event.attr,
660 &format_attr_umask.attr,
661 &format_attr_edge.attr,
662 &format_attr_inv.attr,
663 &format_attr_thresh5.attr,
667 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
668 &format_attr_event.attr,
669 &format_attr_umask.attr,
670 &format_attr_edge.attr,
671 &format_attr_tid_en.attr,
672 &format_attr_inv.attr,
673 &format_attr_thresh8.attr,
674 &format_attr_filter_tid.attr,
675 &format_attr_filter_nid.attr,
676 &format_attr_filter_state.attr,
677 &format_attr_filter_opc.attr,
681 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
682 &format_attr_event.attr,
683 &format_attr_occ_sel.attr,
684 &format_attr_edge.attr,
685 &format_attr_inv.attr,
686 &format_attr_thresh5.attr,
687 &format_attr_occ_invert.attr,
688 &format_attr_occ_edge.attr,
689 &format_attr_filter_band0.attr,
690 &format_attr_filter_band1.attr,
691 &format_attr_filter_band2.attr,
692 &format_attr_filter_band3.attr,
696 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
697 &format_attr_event_ext.attr,
698 &format_attr_umask.attr,
699 &format_attr_edge.attr,
700 &format_attr_inv.attr,
701 &format_attr_thresh8.attr,
702 &format_attr_match_rds.attr,
703 &format_attr_match_rnid30.attr,
704 &format_attr_match_rnid4.attr,
705 &format_attr_match_dnid.attr,
706 &format_attr_match_mc.attr,
707 &format_attr_match_opc.attr,
708 &format_attr_match_vnw.attr,
709 &format_attr_match0.attr,
710 &format_attr_match1.attr,
711 &format_attr_mask_rds.attr,
712 &format_attr_mask_rnid30.attr,
713 &format_attr_mask_rnid4.attr,
714 &format_attr_mask_dnid.attr,
715 &format_attr_mask_mc.attr,
716 &format_attr_mask_opc.attr,
717 &format_attr_mask_vnw.attr,
718 &format_attr_mask0.attr,
719 &format_attr_mask1.attr,
723 static struct uncore_event_desc snbep_uncore_imc_events[] = {
724 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
725 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
726 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
727 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
728 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
729 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
730 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
731 { /* end: all zeroes */ },
734 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
735 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
736 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
737 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
738 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
739 { /* end: all zeroes */ },
742 static const struct attribute_group snbep_uncore_format_group = {
744 .attrs = snbep_uncore_formats_attr,
747 static const struct attribute_group snbep_uncore_ubox_format_group = {
749 .attrs = snbep_uncore_ubox_formats_attr,
752 static const struct attribute_group snbep_uncore_cbox_format_group = {
754 .attrs = snbep_uncore_cbox_formats_attr,
757 static const struct attribute_group snbep_uncore_pcu_format_group = {
759 .attrs = snbep_uncore_pcu_formats_attr,
762 static const struct attribute_group snbep_uncore_qpi_format_group = {
764 .attrs = snbep_uncore_qpi_formats_attr,
767 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
768 .disable_box = snbep_uncore_msr_disable_box, \
769 .enable_box = snbep_uncore_msr_enable_box, \
770 .disable_event = snbep_uncore_msr_disable_event, \
771 .enable_event = snbep_uncore_msr_enable_event, \
772 .read_counter = uncore_msr_read_counter
774 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
775 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
776 .init_box = snbep_uncore_msr_init_box \
778 static struct intel_uncore_ops snbep_uncore_msr_ops = {
779 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
782 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
783 .init_box = snbep_uncore_pci_init_box, \
784 .disable_box = snbep_uncore_pci_disable_box, \
785 .enable_box = snbep_uncore_pci_enable_box, \
786 .disable_event = snbep_uncore_pci_disable_event, \
787 .read_counter = snbep_uncore_pci_read_counter
789 static struct intel_uncore_ops snbep_uncore_pci_ops = {
790 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
791 .enable_event = snbep_uncore_pci_enable_event, \
794 static struct event_constraint snbep_uncore_cbox_constraints[] = {
795 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
796 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
797 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
798 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
799 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
800 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
801 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
802 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
803 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
804 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
805 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
806 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
807 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
808 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
809 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
810 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
811 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
812 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
813 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
814 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
815 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
816 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
817 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
818 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
819 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
820 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
824 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
825 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
826 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
827 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
828 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
829 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
830 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
831 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
832 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
834 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
838 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
839 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
841 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
843 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
844 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
848 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
850 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
852 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
854 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
855 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
856 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
858 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
860 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
861 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
862 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
863 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
864 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
865 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
866 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
870 static struct intel_uncore_type snbep_uncore_ubox = {
875 .fixed_ctr_bits = 48,
876 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
877 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
878 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
879 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
880 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
881 .ops = &snbep_uncore_msr_ops,
882 .format_group = &snbep_uncore_ubox_format_group,
885 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
886 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
887 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
888 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
889 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
890 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
891 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
892 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
893 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
894 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
895 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
896 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
897 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
898 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
899 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
900 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
904 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
905 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
906 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
907 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
908 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
909 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
910 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
914 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
916 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
917 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
920 if (uncore_box_is_fake(box))
923 for (i = 0; i < 5; i++) {
924 if (reg1->alloc & (0x1 << i))
925 atomic_sub(1 << (i * 6), &er->ref);
930 static struct event_constraint *
931 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
932 u64 (*cbox_filter_mask)(int fields))
934 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
935 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
940 if (reg1->idx == EXTRA_REG_NONE)
943 raw_spin_lock_irqsave(&er->lock, flags);
944 for (i = 0; i < 5; i++) {
945 if (!(reg1->idx & (0x1 << i)))
947 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
950 mask = cbox_filter_mask(0x1 << i);
951 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
952 !((reg1->config ^ er->config) & mask)) {
953 atomic_add(1 << (i * 6), &er->ref);
955 er->config |= reg1->config & mask;
961 raw_spin_unlock_irqrestore(&er->lock, flags);
965 if (!uncore_box_is_fake(box))
966 reg1->alloc |= alloc;
970 for (; i >= 0; i--) {
971 if (alloc & (0x1 << i))
972 atomic_sub(1 << (i * 6), &er->ref);
974 return &uncore_constraint_empty;
977 static u64 snbep_cbox_filter_mask(int fields)
982 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
984 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
986 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
988 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
993 static struct event_constraint *
994 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
996 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
999 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1001 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1002 struct extra_reg *er;
1005 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1006 if (er->event != (event->hw.config & er->config_mask))
1012 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1013 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1014 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1020 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1021 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1022 .hw_config = snbep_cbox_hw_config,
1023 .get_constraint = snbep_cbox_get_constraint,
1024 .put_constraint = snbep_cbox_put_constraint,
1027 static struct intel_uncore_type snbep_uncore_cbox = {
1031 .perf_ctr_bits = 44,
1032 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1033 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1034 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1035 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1036 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1037 .num_shared_regs = 1,
1038 .constraints = snbep_uncore_cbox_constraints,
1039 .ops = &snbep_uncore_cbox_ops,
1040 .format_group = &snbep_uncore_cbox_format_group,
1043 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1045 struct hw_perf_event *hwc = &event->hw;
1046 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1047 u64 config = reg1->config;
1049 if (new_idx > reg1->idx)
1050 config <<= 8 * (new_idx - reg1->idx);
1052 config >>= 8 * (reg1->idx - new_idx);
1055 hwc->config += new_idx - reg1->idx;
1056 reg1->config = config;
1057 reg1->idx = new_idx;
1062 static struct event_constraint *
1063 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1065 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1066 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1067 unsigned long flags;
1068 int idx = reg1->idx;
1069 u64 mask, config1 = reg1->config;
1072 if (reg1->idx == EXTRA_REG_NONE ||
1073 (!uncore_box_is_fake(box) && reg1->alloc))
1076 mask = 0xffULL << (idx * 8);
1077 raw_spin_lock_irqsave(&er->lock, flags);
1078 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1079 !((config1 ^ er->config) & mask)) {
1080 atomic_add(1 << (idx * 8), &er->ref);
1081 er->config &= ~mask;
1082 er->config |= config1 & mask;
1085 raw_spin_unlock_irqrestore(&er->lock, flags);
1088 idx = (idx + 1) % 4;
1089 if (idx != reg1->idx) {
1090 config1 = snbep_pcu_alter_er(event, idx, false);
1093 return &uncore_constraint_empty;
1096 if (!uncore_box_is_fake(box)) {
1097 if (idx != reg1->idx)
1098 snbep_pcu_alter_er(event, idx, true);
1104 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1106 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1107 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1109 if (uncore_box_is_fake(box) || !reg1->alloc)
1112 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1116 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1118 struct hw_perf_event *hwc = &event->hw;
1119 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1120 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1122 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1123 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1124 reg1->idx = ev_sel - 0xb;
1125 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1130 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1131 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1132 .hw_config = snbep_pcu_hw_config,
1133 .get_constraint = snbep_pcu_get_constraint,
1134 .put_constraint = snbep_pcu_put_constraint,
1137 static struct intel_uncore_type snbep_uncore_pcu = {
1141 .perf_ctr_bits = 48,
1142 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1143 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1144 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1145 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1146 .num_shared_regs = 1,
1147 .ops = &snbep_uncore_pcu_ops,
1148 .format_group = &snbep_uncore_pcu_format_group,
1151 static struct intel_uncore_type *snbep_msr_uncores[] = {
1158 void snbep_uncore_cpu_init(void)
1160 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1161 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1162 uncore_msr_uncores = snbep_msr_uncores;
1166 SNBEP_PCI_QPI_PORT0_FILTER,
1167 SNBEP_PCI_QPI_PORT1_FILTER,
1168 BDX_PCI_QPI_PORT2_FILTER,
1171 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1173 struct hw_perf_event *hwc = &event->hw;
1174 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1175 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1177 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1179 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1180 reg1->config = event->attr.config1;
1181 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1182 reg2->config = event->attr.config2;
1187 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1189 struct pci_dev *pdev = box->pci_dev;
1190 struct hw_perf_event *hwc = &event->hw;
1191 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1192 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1194 if (reg1->idx != EXTRA_REG_NONE) {
1195 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1196 int die = box->dieid;
1197 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1200 pci_write_config_dword(filter_pdev, reg1->reg,
1202 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1203 (u32)(reg1->config >> 32));
1204 pci_write_config_dword(filter_pdev, reg2->reg,
1206 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1207 (u32)(reg2->config >> 32));
1211 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1214 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1215 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1216 .enable_event = snbep_qpi_enable_event,
1217 .hw_config = snbep_qpi_hw_config,
1218 .get_constraint = uncore_get_constraint,
1219 .put_constraint = uncore_put_constraint,
1222 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1223 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1224 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1225 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1226 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1227 .ops = &snbep_uncore_pci_ops, \
1228 .format_group = &snbep_uncore_format_group
1230 static struct intel_uncore_type snbep_uncore_ha = {
1234 .perf_ctr_bits = 48,
1235 SNBEP_UNCORE_PCI_COMMON_INIT(),
1238 static struct intel_uncore_type snbep_uncore_imc = {
1242 .perf_ctr_bits = 48,
1243 .fixed_ctr_bits = 48,
1244 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1245 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1246 .event_descs = snbep_uncore_imc_events,
1247 SNBEP_UNCORE_PCI_COMMON_INIT(),
1250 static struct intel_uncore_type snbep_uncore_qpi = {
1254 .perf_ctr_bits = 48,
1255 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1256 .event_ctl = SNBEP_PCI_PMON_CTL0,
1257 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1258 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1259 .num_shared_regs = 1,
1260 .ops = &snbep_uncore_qpi_ops,
1261 .event_descs = snbep_uncore_qpi_events,
1262 .format_group = &snbep_uncore_qpi_format_group,
1266 static struct intel_uncore_type snbep_uncore_r2pcie = {
1270 .perf_ctr_bits = 44,
1271 .constraints = snbep_uncore_r2pcie_constraints,
1272 SNBEP_UNCORE_PCI_COMMON_INIT(),
1275 static struct intel_uncore_type snbep_uncore_r3qpi = {
1279 .perf_ctr_bits = 44,
1280 .constraints = snbep_uncore_r3qpi_constraints,
1281 SNBEP_UNCORE_PCI_COMMON_INIT(),
1285 SNBEP_PCI_UNCORE_HA,
1286 SNBEP_PCI_UNCORE_IMC,
1287 SNBEP_PCI_UNCORE_QPI,
1288 SNBEP_PCI_UNCORE_R2PCIE,
1289 SNBEP_PCI_UNCORE_R3QPI,
1292 static struct intel_uncore_type *snbep_pci_uncores[] = {
1293 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1294 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1295 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1296 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1297 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1301 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1303 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1304 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1306 { /* MC Channel 0 */
1307 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1308 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1310 { /* MC Channel 1 */
1311 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1312 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1314 { /* MC Channel 2 */
1315 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1316 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1318 { /* MC Channel 3 */
1319 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1320 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1323 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1324 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1327 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1328 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1331 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1332 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1334 { /* R3QPI Link 0 */
1335 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1336 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1338 { /* R3QPI Link 1 */
1339 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1340 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1342 { /* QPI Port 0 filter */
1343 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1344 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1345 SNBEP_PCI_QPI_PORT0_FILTER),
1347 { /* QPI Port 0 filter */
1348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1349 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1350 SNBEP_PCI_QPI_PORT1_FILTER),
1352 { /* end: all zeroes */ }
1355 static struct pci_driver snbep_uncore_pci_driver = {
1356 .name = "snbep_uncore",
1357 .id_table = snbep_uncore_pci_ids,
1360 #define NODE_ID_MASK 0x7
1363 * build pci bus to socket mapping
1365 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1367 struct pci_dev *ubox_dev = NULL;
1368 int i, bus, nodeid, segment, die_id;
1369 struct pci2phy_map *map;
1374 /* find the UBOX device */
1375 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1378 bus = ubox_dev->bus->number;
1380 * The nodeid and idmap registers only contain enough
1381 * information to handle 8 nodes. On systems with more
1382 * than 8 nodes, we need to rely on NUMA information,
1383 * filled in from BIOS supplied information, to determine
1386 if (nr_node_ids <= 8) {
1387 /* get the Node ID of the local register */
1388 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1391 nodeid = config & NODE_ID_MASK;
1392 /* get the Node ID mapping */
1393 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1397 segment = pci_domain_nr(ubox_dev->bus);
1398 raw_spin_lock(&pci2phy_map_lock);
1399 map = __find_pci2phy_map(segment);
1401 raw_spin_unlock(&pci2phy_map_lock);
1407 * every three bits in the Node ID mapping register maps
1408 * to a particular node.
1410 for (i = 0; i < 8; i++) {
1411 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1412 if (topology_max_die_per_package() > 1)
1415 die_id = topology_phys_to_logical_pkg(i);
1418 map->pbus_to_dieid[bus] = die_id;
1422 raw_spin_unlock(&pci2phy_map_lock);
1424 int node = pcibus_to_node(ubox_dev->bus);
1427 segment = pci_domain_nr(ubox_dev->bus);
1428 raw_spin_lock(&pci2phy_map_lock);
1429 map = __find_pci2phy_map(segment);
1431 raw_spin_unlock(&pci2phy_map_lock);
1437 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1438 struct cpuinfo_x86 *c = &cpu_data(cpu);
1440 if (c->initialized && cpu_to_node(cpu) == node) {
1441 map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1445 raw_spin_unlock(&pci2phy_map_lock);
1447 if (WARN_ON_ONCE(die_id == -1)) {
1456 * For PCI bus with no UBOX device, find the next bus
1457 * that has UBOX device and use its mapping.
1459 raw_spin_lock(&pci2phy_map_lock);
1460 list_for_each_entry(map, &pci2phy_map_head, list) {
1463 for (bus = 255; bus >= 0; bus--) {
1464 if (map->pbus_to_dieid[bus] != -1)
1465 i = map->pbus_to_dieid[bus];
1467 map->pbus_to_dieid[bus] = i;
1470 for (bus = 0; bus <= 255; bus++) {
1471 if (map->pbus_to_dieid[bus] != -1)
1472 i = map->pbus_to_dieid[bus];
1474 map->pbus_to_dieid[bus] = i;
1478 raw_spin_unlock(&pci2phy_map_lock);
1481 pci_dev_put(ubox_dev);
1483 return err ? pcibios_err_to_errno(err) : 0;
1486 int snbep_uncore_pci_init(void)
1488 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1491 uncore_pci_uncores = snbep_pci_uncores;
1492 uncore_pci_driver = &snbep_uncore_pci_driver;
1495 /* end of Sandy Bridge-EP uncore support */
1497 /* IvyTown uncore support */
1498 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1500 unsigned msr = uncore_msr_box_ctl(box);
1502 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1505 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1507 struct pci_dev *pdev = box->pci_dev;
1509 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1512 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1513 .init_box = ivbep_uncore_msr_init_box, \
1514 .disable_box = snbep_uncore_msr_disable_box, \
1515 .enable_box = snbep_uncore_msr_enable_box, \
1516 .disable_event = snbep_uncore_msr_disable_event, \
1517 .enable_event = snbep_uncore_msr_enable_event, \
1518 .read_counter = uncore_msr_read_counter
1520 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1521 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1524 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1525 .init_box = ivbep_uncore_pci_init_box,
1526 .disable_box = snbep_uncore_pci_disable_box,
1527 .enable_box = snbep_uncore_pci_enable_box,
1528 .disable_event = snbep_uncore_pci_disable_event,
1529 .enable_event = snbep_uncore_pci_enable_event,
1530 .read_counter = snbep_uncore_pci_read_counter,
1533 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1534 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1535 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1536 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1537 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1538 .ops = &ivbep_uncore_pci_ops, \
1539 .format_group = &ivbep_uncore_format_group
1541 static struct attribute *ivbep_uncore_formats_attr[] = {
1542 &format_attr_event.attr,
1543 &format_attr_umask.attr,
1544 &format_attr_edge.attr,
1545 &format_attr_inv.attr,
1546 &format_attr_thresh8.attr,
1550 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1551 &format_attr_event.attr,
1552 &format_attr_umask.attr,
1553 &format_attr_edge.attr,
1554 &format_attr_inv.attr,
1555 &format_attr_thresh5.attr,
1559 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1560 &format_attr_event.attr,
1561 &format_attr_umask.attr,
1562 &format_attr_edge.attr,
1563 &format_attr_tid_en.attr,
1564 &format_attr_thresh8.attr,
1565 &format_attr_filter_tid.attr,
1566 &format_attr_filter_link.attr,
1567 &format_attr_filter_state2.attr,
1568 &format_attr_filter_nid2.attr,
1569 &format_attr_filter_opc2.attr,
1570 &format_attr_filter_nc.attr,
1571 &format_attr_filter_c6.attr,
1572 &format_attr_filter_isoc.attr,
1576 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1577 &format_attr_event.attr,
1578 &format_attr_occ_sel.attr,
1579 &format_attr_edge.attr,
1580 &format_attr_thresh5.attr,
1581 &format_attr_occ_invert.attr,
1582 &format_attr_occ_edge.attr,
1583 &format_attr_filter_band0.attr,
1584 &format_attr_filter_band1.attr,
1585 &format_attr_filter_band2.attr,
1586 &format_attr_filter_band3.attr,
1590 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1591 &format_attr_event_ext.attr,
1592 &format_attr_umask.attr,
1593 &format_attr_edge.attr,
1594 &format_attr_thresh8.attr,
1595 &format_attr_match_rds.attr,
1596 &format_attr_match_rnid30.attr,
1597 &format_attr_match_rnid4.attr,
1598 &format_attr_match_dnid.attr,
1599 &format_attr_match_mc.attr,
1600 &format_attr_match_opc.attr,
1601 &format_attr_match_vnw.attr,
1602 &format_attr_match0.attr,
1603 &format_attr_match1.attr,
1604 &format_attr_mask_rds.attr,
1605 &format_attr_mask_rnid30.attr,
1606 &format_attr_mask_rnid4.attr,
1607 &format_attr_mask_dnid.attr,
1608 &format_attr_mask_mc.attr,
1609 &format_attr_mask_opc.attr,
1610 &format_attr_mask_vnw.attr,
1611 &format_attr_mask0.attr,
1612 &format_attr_mask1.attr,
1616 static const struct attribute_group ivbep_uncore_format_group = {
1618 .attrs = ivbep_uncore_formats_attr,
1621 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1623 .attrs = ivbep_uncore_ubox_formats_attr,
1626 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1628 .attrs = ivbep_uncore_cbox_formats_attr,
1631 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1633 .attrs = ivbep_uncore_pcu_formats_attr,
1636 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1638 .attrs = ivbep_uncore_qpi_formats_attr,
1641 static struct intel_uncore_type ivbep_uncore_ubox = {
1645 .perf_ctr_bits = 44,
1646 .fixed_ctr_bits = 48,
1647 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1648 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1649 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1650 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1651 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1652 .ops = &ivbep_uncore_msr_ops,
1653 .format_group = &ivbep_uncore_ubox_format_group,
1656 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1657 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1658 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1659 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1660 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1661 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1662 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1663 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1664 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1665 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1666 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1667 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1668 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1669 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1670 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1671 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1672 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1673 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1674 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1675 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1676 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1687 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1688 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1689 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1690 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1691 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1692 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1693 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1697 static u64 ivbep_cbox_filter_mask(int fields)
1702 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1704 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1706 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1708 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1709 if (fields & 0x10) {
1710 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1711 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1712 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1713 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1719 static struct event_constraint *
1720 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1722 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1725 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1727 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1728 struct extra_reg *er;
1731 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1732 if (er->event != (event->hw.config & er->config_mask))
1738 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1739 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1740 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1746 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1748 struct hw_perf_event *hwc = &event->hw;
1749 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1751 if (reg1->idx != EXTRA_REG_NONE) {
1752 u64 filter = uncore_shared_reg_config(box, 0);
1753 wrmsrl(reg1->reg, filter & 0xffffffff);
1754 wrmsrl(reg1->reg + 6, filter >> 32);
1757 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1760 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1761 .init_box = ivbep_uncore_msr_init_box,
1762 .disable_box = snbep_uncore_msr_disable_box,
1763 .enable_box = snbep_uncore_msr_enable_box,
1764 .disable_event = snbep_uncore_msr_disable_event,
1765 .enable_event = ivbep_cbox_enable_event,
1766 .read_counter = uncore_msr_read_counter,
1767 .hw_config = ivbep_cbox_hw_config,
1768 .get_constraint = ivbep_cbox_get_constraint,
1769 .put_constraint = snbep_cbox_put_constraint,
1772 static struct intel_uncore_type ivbep_uncore_cbox = {
1776 .perf_ctr_bits = 44,
1777 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1778 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1779 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1780 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1781 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1782 .num_shared_regs = 1,
1783 .constraints = snbep_uncore_cbox_constraints,
1784 .ops = &ivbep_uncore_cbox_ops,
1785 .format_group = &ivbep_uncore_cbox_format_group,
1788 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1789 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1790 .hw_config = snbep_pcu_hw_config,
1791 .get_constraint = snbep_pcu_get_constraint,
1792 .put_constraint = snbep_pcu_put_constraint,
1795 static struct intel_uncore_type ivbep_uncore_pcu = {
1799 .perf_ctr_bits = 48,
1800 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1801 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1802 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1803 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1804 .num_shared_regs = 1,
1805 .ops = &ivbep_uncore_pcu_ops,
1806 .format_group = &ivbep_uncore_pcu_format_group,
1809 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1816 void ivbep_uncore_cpu_init(void)
1818 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1819 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1820 uncore_msr_uncores = ivbep_msr_uncores;
1823 static struct intel_uncore_type ivbep_uncore_ha = {
1827 .perf_ctr_bits = 48,
1828 IVBEP_UNCORE_PCI_COMMON_INIT(),
1831 static struct intel_uncore_type ivbep_uncore_imc = {
1835 .perf_ctr_bits = 48,
1836 .fixed_ctr_bits = 48,
1837 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1838 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1839 .event_descs = snbep_uncore_imc_events,
1840 IVBEP_UNCORE_PCI_COMMON_INIT(),
1843 /* registers in IRP boxes are not properly aligned */
1844 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1845 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1847 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1849 struct pci_dev *pdev = box->pci_dev;
1850 struct hw_perf_event *hwc = &event->hw;
1852 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1853 hwc->config | SNBEP_PMON_CTL_EN);
1856 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1858 struct pci_dev *pdev = box->pci_dev;
1859 struct hw_perf_event *hwc = &event->hw;
1861 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1864 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1866 struct pci_dev *pdev = box->pci_dev;
1867 struct hw_perf_event *hwc = &event->hw;
1870 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1871 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1876 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1877 .init_box = ivbep_uncore_pci_init_box,
1878 .disable_box = snbep_uncore_pci_disable_box,
1879 .enable_box = snbep_uncore_pci_enable_box,
1880 .disable_event = ivbep_uncore_irp_disable_event,
1881 .enable_event = ivbep_uncore_irp_enable_event,
1882 .read_counter = ivbep_uncore_irp_read_counter,
1885 static struct intel_uncore_type ivbep_uncore_irp = {
1889 .perf_ctr_bits = 48,
1890 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1891 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1892 .ops = &ivbep_uncore_irp_ops,
1893 .format_group = &ivbep_uncore_format_group,
1896 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1897 .init_box = ivbep_uncore_pci_init_box,
1898 .disable_box = snbep_uncore_pci_disable_box,
1899 .enable_box = snbep_uncore_pci_enable_box,
1900 .disable_event = snbep_uncore_pci_disable_event,
1901 .enable_event = snbep_qpi_enable_event,
1902 .read_counter = snbep_uncore_pci_read_counter,
1903 .hw_config = snbep_qpi_hw_config,
1904 .get_constraint = uncore_get_constraint,
1905 .put_constraint = uncore_put_constraint,
1908 static struct intel_uncore_type ivbep_uncore_qpi = {
1912 .perf_ctr_bits = 48,
1913 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1914 .event_ctl = SNBEP_PCI_PMON_CTL0,
1915 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1916 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1917 .num_shared_regs = 1,
1918 .ops = &ivbep_uncore_qpi_ops,
1919 .format_group = &ivbep_uncore_qpi_format_group,
1922 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1926 .perf_ctr_bits = 44,
1927 .constraints = snbep_uncore_r2pcie_constraints,
1928 IVBEP_UNCORE_PCI_COMMON_INIT(),
1931 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1935 .perf_ctr_bits = 44,
1936 .constraints = snbep_uncore_r3qpi_constraints,
1937 IVBEP_UNCORE_PCI_COMMON_INIT(),
1941 IVBEP_PCI_UNCORE_HA,
1942 IVBEP_PCI_UNCORE_IMC,
1943 IVBEP_PCI_UNCORE_IRP,
1944 IVBEP_PCI_UNCORE_QPI,
1945 IVBEP_PCI_UNCORE_R2PCIE,
1946 IVBEP_PCI_UNCORE_R3QPI,
1949 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1950 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1951 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1952 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1953 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1954 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1955 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1959 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1960 { /* Home Agent 0 */
1961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1962 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1964 { /* Home Agent 1 */
1965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1966 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1968 { /* MC0 Channel 0 */
1969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1970 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1972 { /* MC0 Channel 1 */
1973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1974 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1976 { /* MC0 Channel 3 */
1977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1978 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1980 { /* MC0 Channel 4 */
1981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1982 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1984 { /* MC1 Channel 0 */
1985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1986 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1988 { /* MC1 Channel 1 */
1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1990 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1992 { /* MC1 Channel 3 */
1993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1994 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1996 { /* MC1 Channel 4 */
1997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1998 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2002 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2006 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2010 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2014 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2017 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2018 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2020 { /* R3QPI0 Link 0 */
2021 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2022 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2024 { /* R3QPI0 Link 1 */
2025 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2026 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2028 { /* R3QPI1 Link 2 */
2029 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2030 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2032 { /* QPI Port 0 filter */
2033 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2034 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2035 SNBEP_PCI_QPI_PORT0_FILTER),
2037 { /* QPI Port 0 filter */
2038 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2039 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2040 SNBEP_PCI_QPI_PORT1_FILTER),
2042 { /* end: all zeroes */ }
2045 static struct pci_driver ivbep_uncore_pci_driver = {
2046 .name = "ivbep_uncore",
2047 .id_table = ivbep_uncore_pci_ids,
2050 int ivbep_uncore_pci_init(void)
2052 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2055 uncore_pci_uncores = ivbep_pci_uncores;
2056 uncore_pci_driver = &ivbep_uncore_pci_driver;
2059 /* end of IvyTown uncore support */
2061 /* KNL uncore support */
2062 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2063 &format_attr_event.attr,
2064 &format_attr_umask.attr,
2065 &format_attr_edge.attr,
2066 &format_attr_tid_en.attr,
2067 &format_attr_inv.attr,
2068 &format_attr_thresh5.attr,
2072 static const struct attribute_group knl_uncore_ubox_format_group = {
2074 .attrs = knl_uncore_ubox_formats_attr,
2077 static struct intel_uncore_type knl_uncore_ubox = {
2081 .perf_ctr_bits = 48,
2082 .fixed_ctr_bits = 48,
2083 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2084 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2085 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2086 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2087 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2088 .ops = &snbep_uncore_msr_ops,
2089 .format_group = &knl_uncore_ubox_format_group,
2092 static struct attribute *knl_uncore_cha_formats_attr[] = {
2093 &format_attr_event.attr,
2094 &format_attr_umask.attr,
2095 &format_attr_qor.attr,
2096 &format_attr_edge.attr,
2097 &format_attr_tid_en.attr,
2098 &format_attr_inv.attr,
2099 &format_attr_thresh8.attr,
2100 &format_attr_filter_tid4.attr,
2101 &format_attr_filter_link3.attr,
2102 &format_attr_filter_state4.attr,
2103 &format_attr_filter_local.attr,
2104 &format_attr_filter_all_op.attr,
2105 &format_attr_filter_nnm.attr,
2106 &format_attr_filter_opc3.attr,
2107 &format_attr_filter_nc.attr,
2108 &format_attr_filter_isoc.attr,
2112 static const struct attribute_group knl_uncore_cha_format_group = {
2114 .attrs = knl_uncore_cha_formats_attr,
2117 static struct event_constraint knl_uncore_cha_constraints[] = {
2118 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2119 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2120 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2121 EVENT_CONSTRAINT_END
2124 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2125 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2126 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2127 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2128 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2129 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2133 static u64 knl_cha_filter_mask(int fields)
2138 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2140 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2142 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2146 static struct event_constraint *
2147 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2149 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2152 static int knl_cha_hw_config(struct intel_uncore_box *box,
2153 struct perf_event *event)
2155 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2156 struct extra_reg *er;
2159 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2160 if (er->event != (event->hw.config & er->config_mask))
2166 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2167 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2168 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2170 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2171 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2172 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2178 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2179 struct perf_event *event);
2181 static struct intel_uncore_ops knl_uncore_cha_ops = {
2182 .init_box = snbep_uncore_msr_init_box,
2183 .disable_box = snbep_uncore_msr_disable_box,
2184 .enable_box = snbep_uncore_msr_enable_box,
2185 .disable_event = snbep_uncore_msr_disable_event,
2186 .enable_event = hswep_cbox_enable_event,
2187 .read_counter = uncore_msr_read_counter,
2188 .hw_config = knl_cha_hw_config,
2189 .get_constraint = knl_cha_get_constraint,
2190 .put_constraint = snbep_cbox_put_constraint,
2193 static struct intel_uncore_type knl_uncore_cha = {
2197 .perf_ctr_bits = 48,
2198 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2199 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2200 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2201 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2202 .msr_offset = KNL_CHA_MSR_OFFSET,
2203 .num_shared_regs = 1,
2204 .constraints = knl_uncore_cha_constraints,
2205 .ops = &knl_uncore_cha_ops,
2206 .format_group = &knl_uncore_cha_format_group,
2209 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2210 &format_attr_event2.attr,
2211 &format_attr_use_occ_ctr.attr,
2212 &format_attr_occ_sel.attr,
2213 &format_attr_edge.attr,
2214 &format_attr_tid_en.attr,
2215 &format_attr_inv.attr,
2216 &format_attr_thresh6.attr,
2217 &format_attr_occ_invert.attr,
2218 &format_attr_occ_edge_det.attr,
2222 static const struct attribute_group knl_uncore_pcu_format_group = {
2224 .attrs = knl_uncore_pcu_formats_attr,
2227 static struct intel_uncore_type knl_uncore_pcu = {
2231 .perf_ctr_bits = 48,
2232 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2233 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2234 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2235 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2236 .ops = &snbep_uncore_msr_ops,
2237 .format_group = &knl_uncore_pcu_format_group,
2240 static struct intel_uncore_type *knl_msr_uncores[] = {
2247 void knl_uncore_cpu_init(void)
2249 uncore_msr_uncores = knl_msr_uncores;
2252 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2254 struct pci_dev *pdev = box->pci_dev;
2255 int box_ctl = uncore_pci_box_ctl(box);
2257 pci_write_config_dword(pdev, box_ctl, 0);
2260 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2261 struct perf_event *event)
2263 struct pci_dev *pdev = box->pci_dev;
2264 struct hw_perf_event *hwc = &event->hw;
2266 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2267 == UNCORE_FIXED_EVENT)
2268 pci_write_config_dword(pdev, hwc->config_base,
2269 hwc->config | KNL_PMON_FIXED_CTL_EN);
2271 pci_write_config_dword(pdev, hwc->config_base,
2272 hwc->config | SNBEP_PMON_CTL_EN);
2275 static struct intel_uncore_ops knl_uncore_imc_ops = {
2276 .init_box = snbep_uncore_pci_init_box,
2277 .disable_box = snbep_uncore_pci_disable_box,
2278 .enable_box = knl_uncore_imc_enable_box,
2279 .read_counter = snbep_uncore_pci_read_counter,
2280 .enable_event = knl_uncore_imc_enable_event,
2281 .disable_event = snbep_uncore_pci_disable_event,
2284 static struct intel_uncore_type knl_uncore_imc_uclk = {
2288 .perf_ctr_bits = 48,
2289 .fixed_ctr_bits = 48,
2290 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2291 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2292 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2293 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2294 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2295 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2296 .ops = &knl_uncore_imc_ops,
2297 .format_group = &snbep_uncore_format_group,
2300 static struct intel_uncore_type knl_uncore_imc_dclk = {
2304 .perf_ctr_bits = 48,
2305 .fixed_ctr_bits = 48,
2306 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2307 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2308 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2309 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2310 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2311 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2312 .ops = &knl_uncore_imc_ops,
2313 .format_group = &snbep_uncore_format_group,
2316 static struct intel_uncore_type knl_uncore_edc_uclk = {
2320 .perf_ctr_bits = 48,
2321 .fixed_ctr_bits = 48,
2322 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2323 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2324 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2325 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2326 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2327 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2328 .ops = &knl_uncore_imc_ops,
2329 .format_group = &snbep_uncore_format_group,
2332 static struct intel_uncore_type knl_uncore_edc_eclk = {
2336 .perf_ctr_bits = 48,
2337 .fixed_ctr_bits = 48,
2338 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2339 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2340 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2341 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2342 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2343 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2344 .ops = &knl_uncore_imc_ops,
2345 .format_group = &snbep_uncore_format_group,
2348 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2349 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2350 EVENT_CONSTRAINT_END
2353 static struct intel_uncore_type knl_uncore_m2pcie = {
2357 .perf_ctr_bits = 48,
2358 .constraints = knl_uncore_m2pcie_constraints,
2359 SNBEP_UNCORE_PCI_COMMON_INIT(),
2362 static struct attribute *knl_uncore_irp_formats_attr[] = {
2363 &format_attr_event.attr,
2364 &format_attr_umask.attr,
2365 &format_attr_qor.attr,
2366 &format_attr_edge.attr,
2367 &format_attr_inv.attr,
2368 &format_attr_thresh8.attr,
2372 static const struct attribute_group knl_uncore_irp_format_group = {
2374 .attrs = knl_uncore_irp_formats_attr,
2377 static struct intel_uncore_type knl_uncore_irp = {
2381 .perf_ctr_bits = 48,
2382 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2383 .event_ctl = SNBEP_PCI_PMON_CTL0,
2384 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2385 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2386 .ops = &snbep_uncore_pci_ops,
2387 .format_group = &knl_uncore_irp_format_group,
2391 KNL_PCI_UNCORE_MC_UCLK,
2392 KNL_PCI_UNCORE_MC_DCLK,
2393 KNL_PCI_UNCORE_EDC_UCLK,
2394 KNL_PCI_UNCORE_EDC_ECLK,
2395 KNL_PCI_UNCORE_M2PCIE,
2399 static struct intel_uncore_type *knl_pci_uncores[] = {
2400 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2401 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2402 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2403 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2404 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2405 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2410 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2411 * device type. prior to KNL, each instance of a PMU device type had a unique
2414 * PCI Device ID Uncore PMU Devices
2415 * ----------------------------------
2416 * 0x7841 MC0 UClk, MC1 UClk
2417 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2418 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2419 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2420 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2421 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2422 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2427 static const struct pci_device_id knl_uncore_pci_ids[] = {
2429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2430 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2434 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2436 { /* MC0 DClk CH 0 */
2437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2438 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2440 { /* MC0 DClk CH 1 */
2441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2442 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2444 { /* MC0 DClk CH 2 */
2445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2446 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2448 { /* MC1 DClk CH 0 */
2449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2450 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2452 { /* MC1 DClk CH 1 */
2453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2454 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2456 { /* MC1 DClk CH 2 */
2457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2458 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2462 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2466 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2470 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2474 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2478 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2482 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2486 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2490 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2494 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2498 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2502 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2506 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2510 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2514 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2518 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2522 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2525 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2526 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2529 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2530 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2532 { /* end: all zeroes */ }
2535 static struct pci_driver knl_uncore_pci_driver = {
2536 .name = "knl_uncore",
2537 .id_table = knl_uncore_pci_ids,
2540 int knl_uncore_pci_init(void)
2544 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2545 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2548 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2551 uncore_pci_uncores = knl_pci_uncores;
2552 uncore_pci_driver = &knl_uncore_pci_driver;
2556 /* end of KNL uncore support */
2558 /* Haswell-EP uncore support */
2559 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2560 &format_attr_event.attr,
2561 &format_attr_umask.attr,
2562 &format_attr_edge.attr,
2563 &format_attr_inv.attr,
2564 &format_attr_thresh5.attr,
2565 &format_attr_filter_tid2.attr,
2566 &format_attr_filter_cid.attr,
2570 static const struct attribute_group hswep_uncore_ubox_format_group = {
2572 .attrs = hswep_uncore_ubox_formats_attr,
2575 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2577 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2578 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2579 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2584 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2585 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2586 .hw_config = hswep_ubox_hw_config,
2587 .get_constraint = uncore_get_constraint,
2588 .put_constraint = uncore_put_constraint,
2591 static struct intel_uncore_type hswep_uncore_ubox = {
2595 .perf_ctr_bits = 44,
2596 .fixed_ctr_bits = 48,
2597 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2598 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2599 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2600 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2601 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2602 .num_shared_regs = 1,
2603 .ops = &hswep_uncore_ubox_ops,
2604 .format_group = &hswep_uncore_ubox_format_group,
2607 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2608 &format_attr_event.attr,
2609 &format_attr_umask.attr,
2610 &format_attr_edge.attr,
2611 &format_attr_tid_en.attr,
2612 &format_attr_thresh8.attr,
2613 &format_attr_filter_tid3.attr,
2614 &format_attr_filter_link2.attr,
2615 &format_attr_filter_state3.attr,
2616 &format_attr_filter_nid2.attr,
2617 &format_attr_filter_opc2.attr,
2618 &format_attr_filter_nc.attr,
2619 &format_attr_filter_c6.attr,
2620 &format_attr_filter_isoc.attr,
2624 static const struct attribute_group hswep_uncore_cbox_format_group = {
2626 .attrs = hswep_uncore_cbox_formats_attr,
2629 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2630 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2631 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2632 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2633 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2634 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2635 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2636 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2637 EVENT_CONSTRAINT_END
2640 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2641 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2642 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2643 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2644 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2645 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2646 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2647 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2648 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2649 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2650 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2651 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2652 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2653 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2654 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2655 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2656 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2657 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2658 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2659 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2660 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2671 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2672 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2673 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2674 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2675 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2676 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2677 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2678 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2682 static u64 hswep_cbox_filter_mask(int fields)
2686 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2688 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2690 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2692 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2693 if (fields & 0x10) {
2694 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2695 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2696 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2697 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2702 static struct event_constraint *
2703 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2705 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2708 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2710 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2711 struct extra_reg *er;
2714 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2715 if (er->event != (event->hw.config & er->config_mask))
2721 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2722 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2723 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2729 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2730 struct perf_event *event)
2732 struct hw_perf_event *hwc = &event->hw;
2733 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2735 if (reg1->idx != EXTRA_REG_NONE) {
2736 u64 filter = uncore_shared_reg_config(box, 0);
2737 wrmsrl(reg1->reg, filter & 0xffffffff);
2738 wrmsrl(reg1->reg + 1, filter >> 32);
2741 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2744 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2745 .init_box = snbep_uncore_msr_init_box,
2746 .disable_box = snbep_uncore_msr_disable_box,
2747 .enable_box = snbep_uncore_msr_enable_box,
2748 .disable_event = snbep_uncore_msr_disable_event,
2749 .enable_event = hswep_cbox_enable_event,
2750 .read_counter = uncore_msr_read_counter,
2751 .hw_config = hswep_cbox_hw_config,
2752 .get_constraint = hswep_cbox_get_constraint,
2753 .put_constraint = snbep_cbox_put_constraint,
2756 static struct intel_uncore_type hswep_uncore_cbox = {
2760 .perf_ctr_bits = 48,
2761 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2762 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2763 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2764 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2765 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2766 .num_shared_regs = 1,
2767 .constraints = hswep_uncore_cbox_constraints,
2768 .ops = &hswep_uncore_cbox_ops,
2769 .format_group = &hswep_uncore_cbox_format_group,
2773 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2775 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2777 unsigned msr = uncore_msr_box_ctl(box);
2780 u64 init = SNBEP_PMON_BOX_CTL_INT;
2784 for_each_set_bit(i, (unsigned long *)&init, 64) {
2785 flags |= (1ULL << i);
2791 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2792 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2793 .init_box = hswep_uncore_sbox_msr_init_box
2796 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2797 &format_attr_event.attr,
2798 &format_attr_umask.attr,
2799 &format_attr_edge.attr,
2800 &format_attr_tid_en.attr,
2801 &format_attr_inv.attr,
2802 &format_attr_thresh8.attr,
2806 static const struct attribute_group hswep_uncore_sbox_format_group = {
2808 .attrs = hswep_uncore_sbox_formats_attr,
2811 static struct intel_uncore_type hswep_uncore_sbox = {
2815 .perf_ctr_bits = 44,
2816 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2817 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2818 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2819 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2820 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2821 .ops = &hswep_uncore_sbox_msr_ops,
2822 .format_group = &hswep_uncore_sbox_format_group,
2825 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2827 struct hw_perf_event *hwc = &event->hw;
2828 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2829 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2831 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2832 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2833 reg1->idx = ev_sel - 0xb;
2834 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2839 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2840 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2841 .hw_config = hswep_pcu_hw_config,
2842 .get_constraint = snbep_pcu_get_constraint,
2843 .put_constraint = snbep_pcu_put_constraint,
2846 static struct intel_uncore_type hswep_uncore_pcu = {
2850 .perf_ctr_bits = 48,
2851 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2852 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2853 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2854 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2855 .num_shared_regs = 1,
2856 .ops = &hswep_uncore_pcu_ops,
2857 .format_group = &snbep_uncore_pcu_format_group,
2860 static struct intel_uncore_type *hswep_msr_uncores[] = {
2868 #define HSWEP_PCU_DID 0x2fc0
2869 #define HSWEP_PCU_CAPID4_OFFET 0x94
2870 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2872 static bool hswep_has_limit_sbox(unsigned int device)
2874 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2880 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2881 if (!hswep_get_chop(capid4))
2887 void hswep_uncore_cpu_init(void)
2889 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2890 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2892 /* Detect 6-8 core systems with only two SBOXes */
2893 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2894 hswep_uncore_sbox.num_boxes = 2;
2896 uncore_msr_uncores = hswep_msr_uncores;
2899 static struct intel_uncore_type hswep_uncore_ha = {
2903 .perf_ctr_bits = 48,
2904 SNBEP_UNCORE_PCI_COMMON_INIT(),
2907 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2908 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2909 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2910 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2911 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2912 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2913 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2914 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2915 { /* end: all zeroes */ },
2918 static struct intel_uncore_type hswep_uncore_imc = {
2922 .perf_ctr_bits = 48,
2923 .fixed_ctr_bits = 48,
2924 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2925 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2926 .event_descs = hswep_uncore_imc_events,
2927 SNBEP_UNCORE_PCI_COMMON_INIT(),
2930 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2932 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2934 struct pci_dev *pdev = box->pci_dev;
2935 struct hw_perf_event *hwc = &event->hw;
2938 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2939 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2944 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2945 .init_box = snbep_uncore_pci_init_box,
2946 .disable_box = snbep_uncore_pci_disable_box,
2947 .enable_box = snbep_uncore_pci_enable_box,
2948 .disable_event = ivbep_uncore_irp_disable_event,
2949 .enable_event = ivbep_uncore_irp_enable_event,
2950 .read_counter = hswep_uncore_irp_read_counter,
2953 static struct intel_uncore_type hswep_uncore_irp = {
2957 .perf_ctr_bits = 48,
2958 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2959 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2960 .ops = &hswep_uncore_irp_ops,
2961 .format_group = &snbep_uncore_format_group,
2964 static struct intel_uncore_type hswep_uncore_qpi = {
2968 .perf_ctr_bits = 48,
2969 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2970 .event_ctl = SNBEP_PCI_PMON_CTL0,
2971 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2972 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2973 .num_shared_regs = 1,
2974 .ops = &snbep_uncore_qpi_ops,
2975 .format_group = &snbep_uncore_qpi_format_group,
2978 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2979 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2980 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2981 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2982 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2983 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2984 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2985 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2986 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2987 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2988 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2989 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2990 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2991 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2992 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2993 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2994 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2995 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2996 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2997 EVENT_CONSTRAINT_END
3000 static struct intel_uncore_type hswep_uncore_r2pcie = {
3004 .perf_ctr_bits = 48,
3005 .constraints = hswep_uncore_r2pcie_constraints,
3006 SNBEP_UNCORE_PCI_COMMON_INIT(),
3009 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3010 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3011 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3012 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3013 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3014 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3015 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3016 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3017 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3018 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3019 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3020 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3021 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3022 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3023 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3024 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3025 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3026 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3027 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3028 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3029 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3030 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3031 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3032 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3033 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3034 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3035 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3036 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3037 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3038 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3039 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3040 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3041 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3042 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3043 EVENT_CONSTRAINT_END
3046 static struct intel_uncore_type hswep_uncore_r3qpi = {
3050 .perf_ctr_bits = 44,
3051 .constraints = hswep_uncore_r3qpi_constraints,
3052 SNBEP_UNCORE_PCI_COMMON_INIT(),
3056 HSWEP_PCI_UNCORE_HA,
3057 HSWEP_PCI_UNCORE_IMC,
3058 HSWEP_PCI_UNCORE_IRP,
3059 HSWEP_PCI_UNCORE_QPI,
3060 HSWEP_PCI_UNCORE_R2PCIE,
3061 HSWEP_PCI_UNCORE_R3QPI,
3064 static struct intel_uncore_type *hswep_pci_uncores[] = {
3065 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3066 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3067 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3068 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3069 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3070 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3074 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3075 { /* Home Agent 0 */
3076 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3077 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3079 { /* Home Agent 1 */
3080 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3081 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3083 { /* MC0 Channel 0 */
3084 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3085 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3087 { /* MC0 Channel 1 */
3088 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3089 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3091 { /* MC0 Channel 2 */
3092 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3093 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3095 { /* MC0 Channel 3 */
3096 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3097 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3099 { /* MC1 Channel 0 */
3100 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3101 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3103 { /* MC1 Channel 1 */
3104 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3105 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3107 { /* MC1 Channel 2 */
3108 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3109 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3111 { /* MC1 Channel 3 */
3112 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3113 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3116 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3117 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3120 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3121 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3124 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3125 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3128 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3129 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3132 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3133 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3135 { /* R3QPI0 Link 0 */
3136 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3137 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3139 { /* R3QPI0 Link 1 */
3140 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3141 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3143 { /* R3QPI1 Link 2 */
3144 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3145 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3147 { /* QPI Port 0 filter */
3148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3149 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3150 SNBEP_PCI_QPI_PORT0_FILTER),
3152 { /* QPI Port 1 filter */
3153 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3154 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3155 SNBEP_PCI_QPI_PORT1_FILTER),
3157 { /* end: all zeroes */ }
3160 static struct pci_driver hswep_uncore_pci_driver = {
3161 .name = "hswep_uncore",
3162 .id_table = hswep_uncore_pci_ids,
3165 int hswep_uncore_pci_init(void)
3167 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3170 uncore_pci_uncores = hswep_pci_uncores;
3171 uncore_pci_driver = &hswep_uncore_pci_driver;
3174 /* end of Haswell-EP uncore support */
3176 /* BDX uncore support */
3178 static struct intel_uncore_type bdx_uncore_ubox = {
3182 .perf_ctr_bits = 48,
3183 .fixed_ctr_bits = 48,
3184 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3185 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3186 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3187 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3188 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3189 .num_shared_regs = 1,
3190 .ops = &ivbep_uncore_msr_ops,
3191 .format_group = &ivbep_uncore_ubox_format_group,
3194 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3195 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3196 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3197 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3198 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3199 EVENT_CONSTRAINT_END
3202 static struct intel_uncore_type bdx_uncore_cbox = {
3206 .perf_ctr_bits = 48,
3207 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3208 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3209 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3210 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3211 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3212 .num_shared_regs = 1,
3213 .constraints = bdx_uncore_cbox_constraints,
3214 .ops = &hswep_uncore_cbox_ops,
3215 .format_group = &hswep_uncore_cbox_format_group,
3218 static struct intel_uncore_type bdx_uncore_sbox = {
3222 .perf_ctr_bits = 48,
3223 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3224 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3225 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3226 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3227 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3228 .ops = &hswep_uncore_sbox_msr_ops,
3229 .format_group = &hswep_uncore_sbox_format_group,
3232 #define BDX_MSR_UNCORE_SBOX 3
3234 static struct intel_uncore_type *bdx_msr_uncores[] = {
3242 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3243 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3244 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3245 EVENT_CONSTRAINT_END
3248 #define BDX_PCU_DID 0x6fc0
3250 void bdx_uncore_cpu_init(void)
3252 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3253 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3254 uncore_msr_uncores = bdx_msr_uncores;
3256 /* Detect systems with no SBOXes */
3257 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3258 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3260 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3263 static struct intel_uncore_type bdx_uncore_ha = {
3267 .perf_ctr_bits = 48,
3268 SNBEP_UNCORE_PCI_COMMON_INIT(),
3271 static struct intel_uncore_type bdx_uncore_imc = {
3275 .perf_ctr_bits = 48,
3276 .fixed_ctr_bits = 48,
3277 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3278 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3279 .event_descs = hswep_uncore_imc_events,
3280 SNBEP_UNCORE_PCI_COMMON_INIT(),
3283 static struct intel_uncore_type bdx_uncore_irp = {
3287 .perf_ctr_bits = 48,
3288 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3289 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3290 .ops = &hswep_uncore_irp_ops,
3291 .format_group = &snbep_uncore_format_group,
3294 static struct intel_uncore_type bdx_uncore_qpi = {
3298 .perf_ctr_bits = 48,
3299 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3300 .event_ctl = SNBEP_PCI_PMON_CTL0,
3301 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3302 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3303 .num_shared_regs = 1,
3304 .ops = &snbep_uncore_qpi_ops,
3305 .format_group = &snbep_uncore_qpi_format_group,
3308 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3309 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3310 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3311 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3312 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3313 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3314 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3315 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3316 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3317 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3318 EVENT_CONSTRAINT_END
3321 static struct intel_uncore_type bdx_uncore_r2pcie = {
3325 .perf_ctr_bits = 48,
3326 .constraints = bdx_uncore_r2pcie_constraints,
3327 SNBEP_UNCORE_PCI_COMMON_INIT(),
3330 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3331 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3332 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3333 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3334 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3335 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3336 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3337 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3338 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3339 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3340 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3341 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3342 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3343 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3344 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3345 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3346 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3347 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3348 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3349 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3350 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3351 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3352 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3353 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3354 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3355 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3356 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3357 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3358 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3359 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3360 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3361 EVENT_CONSTRAINT_END
3364 static struct intel_uncore_type bdx_uncore_r3qpi = {
3368 .perf_ctr_bits = 48,
3369 .constraints = bdx_uncore_r3qpi_constraints,
3370 SNBEP_UNCORE_PCI_COMMON_INIT(),
3378 BDX_PCI_UNCORE_R2PCIE,
3379 BDX_PCI_UNCORE_R3QPI,
3382 static struct intel_uncore_type *bdx_pci_uncores[] = {
3383 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3384 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3385 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3386 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3387 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3388 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3392 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3393 { /* Home Agent 0 */
3394 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3395 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3397 { /* Home Agent 1 */
3398 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3399 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3401 { /* MC0 Channel 0 */
3402 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3403 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3405 { /* MC0 Channel 1 */
3406 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3407 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3409 { /* MC0 Channel 2 */
3410 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3411 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3413 { /* MC0 Channel 3 */
3414 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3415 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3417 { /* MC1 Channel 0 */
3418 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3419 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3421 { /* MC1 Channel 1 */
3422 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3423 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3425 { /* MC1 Channel 2 */
3426 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3427 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3429 { /* MC1 Channel 3 */
3430 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3431 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3434 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3435 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3438 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3439 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3442 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3443 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3446 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3447 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3450 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3451 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3453 { /* R3QPI0 Link 0 */
3454 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3455 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3457 { /* R3QPI0 Link 1 */
3458 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3459 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3461 { /* R3QPI1 Link 2 */
3462 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3463 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3465 { /* QPI Port 0 filter */
3466 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3467 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3468 SNBEP_PCI_QPI_PORT0_FILTER),
3470 { /* QPI Port 1 filter */
3471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3472 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3473 SNBEP_PCI_QPI_PORT1_FILTER),
3475 { /* QPI Port 2 filter */
3476 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3477 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3478 BDX_PCI_QPI_PORT2_FILTER),
3480 { /* end: all zeroes */ }
3483 static struct pci_driver bdx_uncore_pci_driver = {
3484 .name = "bdx_uncore",
3485 .id_table = bdx_uncore_pci_ids,
3488 int bdx_uncore_pci_init(void)
3490 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3494 uncore_pci_uncores = bdx_pci_uncores;
3495 uncore_pci_driver = &bdx_uncore_pci_driver;
3499 /* end of BDX uncore support */
3501 /* SKX uncore support */
3503 static struct intel_uncore_type skx_uncore_ubox = {
3507 .perf_ctr_bits = 48,
3508 .fixed_ctr_bits = 48,
3509 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3510 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3511 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3512 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3513 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3514 .ops = &ivbep_uncore_msr_ops,
3515 .format_group = &ivbep_uncore_ubox_format_group,
3518 static struct attribute *skx_uncore_cha_formats_attr[] = {
3519 &format_attr_event.attr,
3520 &format_attr_umask.attr,
3521 &format_attr_edge.attr,
3522 &format_attr_tid_en.attr,
3523 &format_attr_inv.attr,
3524 &format_attr_thresh8.attr,
3525 &format_attr_filter_tid4.attr,
3526 &format_attr_filter_state5.attr,
3527 &format_attr_filter_rem.attr,
3528 &format_attr_filter_loc.attr,
3529 &format_attr_filter_nm.attr,
3530 &format_attr_filter_all_op.attr,
3531 &format_attr_filter_not_nm.attr,
3532 &format_attr_filter_opc_0.attr,
3533 &format_attr_filter_opc_1.attr,
3534 &format_attr_filter_nc.attr,
3535 &format_attr_filter_isoc.attr,
3539 static const struct attribute_group skx_uncore_chabox_format_group = {
3541 .attrs = skx_uncore_cha_formats_attr,
3544 static struct event_constraint skx_uncore_chabox_constraints[] = {
3545 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3546 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3547 EVENT_CONSTRAINT_END
3550 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3551 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3552 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3553 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3554 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3555 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3556 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3557 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3558 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3559 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3563 static u64 skx_cha_filter_mask(int fields)
3568 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3570 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3572 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3574 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3575 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3576 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3577 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3578 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3579 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3580 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3581 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3582 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3587 static struct event_constraint *
3588 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3590 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3593 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3595 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3596 struct extra_reg *er;
3599 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3600 if (er->event != (event->hw.config & er->config_mask))
3606 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3607 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3608 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3614 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3615 /* There is no frz_en for chabox ctl */
3616 .init_box = ivbep_uncore_msr_init_box,
3617 .disable_box = snbep_uncore_msr_disable_box,
3618 .enable_box = snbep_uncore_msr_enable_box,
3619 .disable_event = snbep_uncore_msr_disable_event,
3620 .enable_event = hswep_cbox_enable_event,
3621 .read_counter = uncore_msr_read_counter,
3622 .hw_config = skx_cha_hw_config,
3623 .get_constraint = skx_cha_get_constraint,
3624 .put_constraint = snbep_cbox_put_constraint,
3627 static struct intel_uncore_type skx_uncore_chabox = {
3630 .perf_ctr_bits = 48,
3631 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3632 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3633 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3634 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3635 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3636 .num_shared_regs = 1,
3637 .constraints = skx_uncore_chabox_constraints,
3638 .ops = &skx_uncore_chabox_ops,
3639 .format_group = &skx_uncore_chabox_format_group,
3642 static struct attribute *skx_uncore_iio_formats_attr[] = {
3643 &format_attr_event.attr,
3644 &format_attr_umask.attr,
3645 &format_attr_edge.attr,
3646 &format_attr_inv.attr,
3647 &format_attr_thresh9.attr,
3648 &format_attr_ch_mask.attr,
3649 &format_attr_fc_mask.attr,
3653 static const struct attribute_group skx_uncore_iio_format_group = {
3655 .attrs = skx_uncore_iio_formats_attr,
3658 static struct event_constraint skx_uncore_iio_constraints[] = {
3659 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3660 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3661 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3662 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3663 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3664 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3665 EVENT_CONSTRAINT_END
3668 static void skx_iio_enable_event(struct intel_uncore_box *box,
3669 struct perf_event *event)
3671 struct hw_perf_event *hwc = &event->hw;
3673 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3676 static struct intel_uncore_ops skx_uncore_iio_ops = {
3677 .init_box = ivbep_uncore_msr_init_box,
3678 .disable_box = snbep_uncore_msr_disable_box,
3679 .enable_box = snbep_uncore_msr_enable_box,
3680 .disable_event = snbep_uncore_msr_disable_event,
3681 .enable_event = skx_iio_enable_event,
3682 .read_counter = uncore_msr_read_counter,
3685 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3687 return pmu->type->topology[die].configuration >>
3688 (pmu->pmu_idx * BUS_NUM_STRIDE);
3692 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3693 int die, int zero_bus_pmu)
3695 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3697 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3701 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3703 /* Root bus 0x00 is valid only for pmu_idx = 0. */
3704 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3707 static ssize_t skx_iio_mapping_show(struct device *dev,
3708 struct device_attribute *attr, char *buf)
3710 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3711 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3712 long die = (long)ea->var;
3714 return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3715 skx_iio_stack(pmu, die));
3718 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3722 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3723 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3726 *topology = msr_value;
3731 static int die_to_cpu(int die)
3733 int res = 0, cpu, current_die;
3735 * Using cpus_read_lock() to ensure cpu is not going down between
3736 * looking at cpu_online_mask.
3739 for_each_online_cpu(cpu) {
3740 current_die = topology_logical_die_id(cpu);
3741 if (current_die == die) {
3750 static int skx_iio_get_topology(struct intel_uncore_type *type)
3752 int die, ret = -EPERM;
3754 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3756 if (!type->topology)
3759 for (die = 0; die < uncore_max_dies(); die++) {
3760 ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3761 &type->topology[die].configuration);
3765 ret = uncore_die_to_segment(die);
3769 type->topology[die].segment = ret;
3773 kfree(type->topology);
3774 type->topology = NULL;
3780 static struct attribute_group skx_iio_mapping_group = {
3781 .is_visible = skx_iio_mapping_visible,
3784 static const struct attribute_group *skx_iio_attr_update[] = {
3785 &skx_iio_mapping_group,
3790 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3795 struct attribute **attrs = NULL;
3796 struct dev_ext_attribute *eas = NULL;
3798 ret = type->get_topology(type);
3800 goto clear_attr_update;
3804 /* One more for NULL. */
3805 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3809 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3813 for (die = 0; die < uncore_max_dies(); die++) {
3814 sprintf(buf, "die%ld", die);
3815 sysfs_attr_init(&eas[die].attr.attr);
3816 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3817 if (!eas[die].attr.attr.name)
3819 eas[die].attr.attr.mode = 0444;
3820 eas[die].attr.show = skx_iio_mapping_show;
3821 eas[die].attr.store = NULL;
3822 eas[die].var = (void *)die;
3823 attrs[die] = &eas[die].attr.attr;
3829 for (; die >= 0; die--)
3830 kfree(eas[die].attr.attr.name);
3833 kfree(type->topology);
3835 type->attr_update = NULL;
3839 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3841 return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3844 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3846 struct attribute **attr = skx_iio_mapping_group.attrs;
3851 for (; *attr; attr++)
3852 kfree((*attr)->name);
3853 kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3854 kfree(skx_iio_mapping_group.attrs);
3855 skx_iio_mapping_group.attrs = NULL;
3856 kfree(type->topology);
3859 static struct intel_uncore_type skx_uncore_iio = {
3863 .perf_ctr_bits = 48,
3864 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3865 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3866 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3867 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3868 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3869 .msr_offset = SKX_IIO_MSR_OFFSET,
3870 .constraints = skx_uncore_iio_constraints,
3871 .ops = &skx_uncore_iio_ops,
3872 .format_group = &skx_uncore_iio_format_group,
3873 .attr_update = skx_iio_attr_update,
3874 .get_topology = skx_iio_get_topology,
3875 .set_mapping = skx_iio_set_mapping,
3876 .cleanup_mapping = skx_iio_cleanup_mapping,
3879 enum perf_uncore_iio_freerunning_type_id {
3880 SKX_IIO_MSR_IOCLK = 0,
3882 SKX_IIO_MSR_UTIL = 2,
3884 SKX_IIO_FREERUNNING_TYPE_MAX,
3888 static struct freerunning_counters skx_iio_freerunning[] = {
3889 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3890 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3891 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3894 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3895 /* Free-Running IO CLOCKS Counter */
3896 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3897 /* Free-Running IIO BANDWIDTH Counters */
3898 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3899 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3900 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3901 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3902 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3903 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3904 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3905 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3906 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3907 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3908 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3909 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3910 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3911 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3912 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3913 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3914 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3915 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3916 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3917 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3918 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3919 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3920 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3921 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3922 /* Free-running IIO UTILIZATION Counters */
3923 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3924 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3925 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3926 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3927 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3928 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3929 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3930 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3931 { /* end: all zeroes */ },
3934 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3935 .read_counter = uncore_msr_read_counter,
3936 .hw_config = uncore_freerunning_hw_config,
3939 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3940 &format_attr_event.attr,
3941 &format_attr_umask.attr,
3945 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3947 .attrs = skx_uncore_iio_freerunning_formats_attr,
3950 static struct intel_uncore_type skx_uncore_iio_free_running = {
3951 .name = "iio_free_running",
3954 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3955 .freerunning = skx_iio_freerunning,
3956 .ops = &skx_uncore_iio_freerunning_ops,
3957 .event_descs = skx_uncore_iio_freerunning_events,
3958 .format_group = &skx_uncore_iio_freerunning_format_group,
3961 static struct attribute *skx_uncore_formats_attr[] = {
3962 &format_attr_event.attr,
3963 &format_attr_umask.attr,
3964 &format_attr_edge.attr,
3965 &format_attr_inv.attr,
3966 &format_attr_thresh8.attr,
3970 static const struct attribute_group skx_uncore_format_group = {
3972 .attrs = skx_uncore_formats_attr,
3975 static struct intel_uncore_type skx_uncore_irp = {
3979 .perf_ctr_bits = 48,
3980 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3981 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3982 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3983 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3984 .msr_offset = SKX_IRP_MSR_OFFSET,
3985 .ops = &skx_uncore_iio_ops,
3986 .format_group = &skx_uncore_format_group,
3989 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3990 &format_attr_event.attr,
3991 &format_attr_umask.attr,
3992 &format_attr_edge.attr,
3993 &format_attr_inv.attr,
3994 &format_attr_thresh8.attr,
3995 &format_attr_occ_invert.attr,
3996 &format_attr_occ_edge_det.attr,
3997 &format_attr_filter_band0.attr,
3998 &format_attr_filter_band1.attr,
3999 &format_attr_filter_band2.attr,
4000 &format_attr_filter_band3.attr,
4004 static struct attribute_group skx_uncore_pcu_format_group = {
4006 .attrs = skx_uncore_pcu_formats_attr,
4009 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4010 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4011 .hw_config = hswep_pcu_hw_config,
4012 .get_constraint = snbep_pcu_get_constraint,
4013 .put_constraint = snbep_pcu_put_constraint,
4016 static struct intel_uncore_type skx_uncore_pcu = {
4020 .perf_ctr_bits = 48,
4021 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4022 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4023 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4024 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4025 .num_shared_regs = 1,
4026 .ops = &skx_uncore_pcu_ops,
4027 .format_group = &skx_uncore_pcu_format_group,
4030 static struct intel_uncore_type *skx_msr_uncores[] = {
4034 &skx_uncore_iio_free_running,
4041 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4042 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4044 #define SKX_CAPID6 0x9c
4045 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4047 static int skx_count_chabox(void)
4049 struct pci_dev *dev = NULL;
4052 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4056 pci_read_config_dword(dev, SKX_CAPID6, &val);
4057 val &= SKX_CHA_BIT_MASK;
4060 return hweight32(val);
4063 void skx_uncore_cpu_init(void)
4065 skx_uncore_chabox.num_boxes = skx_count_chabox();
4066 uncore_msr_uncores = skx_msr_uncores;
4069 static struct intel_uncore_type skx_uncore_imc = {
4073 .perf_ctr_bits = 48,
4074 .fixed_ctr_bits = 48,
4075 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4076 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4077 .event_descs = hswep_uncore_imc_events,
4078 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4079 .event_ctl = SNBEP_PCI_PMON_CTL0,
4080 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4081 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4082 .ops = &ivbep_uncore_pci_ops,
4083 .format_group = &skx_uncore_format_group,
4086 static struct attribute *skx_upi_uncore_formats_attr[] = {
4087 &format_attr_event.attr,
4088 &format_attr_umask_ext.attr,
4089 &format_attr_edge.attr,
4090 &format_attr_inv.attr,
4091 &format_attr_thresh8.attr,
4095 static const struct attribute_group skx_upi_uncore_format_group = {
4097 .attrs = skx_upi_uncore_formats_attr,
4100 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4102 struct pci_dev *pdev = box->pci_dev;
4104 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4105 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4108 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4109 .init_box = skx_upi_uncore_pci_init_box,
4110 .disable_box = snbep_uncore_pci_disable_box,
4111 .enable_box = snbep_uncore_pci_enable_box,
4112 .disable_event = snbep_uncore_pci_disable_event,
4113 .enable_event = snbep_uncore_pci_enable_event,
4114 .read_counter = snbep_uncore_pci_read_counter,
4117 static struct intel_uncore_type skx_uncore_upi = {
4121 .perf_ctr_bits = 48,
4122 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4123 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4124 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4125 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4126 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4127 .ops = &skx_upi_uncore_pci_ops,
4128 .format_group = &skx_upi_uncore_format_group,
4131 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4133 struct pci_dev *pdev = box->pci_dev;
4135 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4136 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4139 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4140 .init_box = skx_m2m_uncore_pci_init_box,
4141 .disable_box = snbep_uncore_pci_disable_box,
4142 .enable_box = snbep_uncore_pci_enable_box,
4143 .disable_event = snbep_uncore_pci_disable_event,
4144 .enable_event = snbep_uncore_pci_enable_event,
4145 .read_counter = snbep_uncore_pci_read_counter,
4148 static struct intel_uncore_type skx_uncore_m2m = {
4152 .perf_ctr_bits = 48,
4153 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4154 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4155 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4156 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4157 .ops = &skx_m2m_uncore_pci_ops,
4158 .format_group = &skx_uncore_format_group,
4161 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4162 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4163 EVENT_CONSTRAINT_END
4166 static struct intel_uncore_type skx_uncore_m2pcie = {
4170 .perf_ctr_bits = 48,
4171 .constraints = skx_uncore_m2pcie_constraints,
4172 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4173 .event_ctl = SNBEP_PCI_PMON_CTL0,
4174 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4175 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4176 .ops = &ivbep_uncore_pci_ops,
4177 .format_group = &skx_uncore_format_group,
4180 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4181 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4182 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4183 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4184 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4185 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4186 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4187 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4188 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4189 EVENT_CONSTRAINT_END
4192 static struct intel_uncore_type skx_uncore_m3upi = {
4196 .perf_ctr_bits = 48,
4197 .constraints = skx_uncore_m3upi_constraints,
4198 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4199 .event_ctl = SNBEP_PCI_PMON_CTL0,
4200 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4201 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4202 .ops = &ivbep_uncore_pci_ops,
4203 .format_group = &skx_uncore_format_group,
4210 SKX_PCI_UNCORE_M2PCIE,
4211 SKX_PCI_UNCORE_M3UPI,
4214 static struct intel_uncore_type *skx_pci_uncores[] = {
4215 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4216 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4217 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4218 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4219 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4223 static const struct pci_device_id skx_uncore_pci_ids[] = {
4224 { /* MC0 Channel 0 */
4225 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4226 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4228 { /* MC0 Channel 1 */
4229 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4230 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4232 { /* MC0 Channel 2 */
4233 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4234 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4236 { /* MC1 Channel 0 */
4237 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4238 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4240 { /* MC1 Channel 1 */
4241 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4242 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4244 { /* MC1 Channel 2 */
4245 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4246 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4249 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4250 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4253 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4254 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4257 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4258 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4261 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4262 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4265 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4266 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4269 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4270 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4273 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4274 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4277 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4278 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4281 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4282 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4284 { /* M3UPI0 Link 0 */
4285 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4286 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4288 { /* M3UPI0 Link 1 */
4289 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4290 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4292 { /* M3UPI1 Link 2 */
4293 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4294 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4296 { /* end: all zeroes */ }
4300 static struct pci_driver skx_uncore_pci_driver = {
4301 .name = "skx_uncore",
4302 .id_table = skx_uncore_pci_ids,
4305 int skx_uncore_pci_init(void)
4307 /* need to double check pci address */
4308 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4313 uncore_pci_uncores = skx_pci_uncores;
4314 uncore_pci_driver = &skx_uncore_pci_driver;
4318 /* end of SKX uncore support */
4320 /* SNR uncore support */
4322 static struct intel_uncore_type snr_uncore_ubox = {
4326 .perf_ctr_bits = 48,
4327 .fixed_ctr_bits = 48,
4328 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4329 .event_ctl = SNR_U_MSR_PMON_CTL0,
4330 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4331 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4332 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4333 .ops = &ivbep_uncore_msr_ops,
4334 .format_group = &ivbep_uncore_format_group,
4337 static struct attribute *snr_uncore_cha_formats_attr[] = {
4338 &format_attr_event.attr,
4339 &format_attr_umask_ext2.attr,
4340 &format_attr_edge.attr,
4341 &format_attr_tid_en.attr,
4342 &format_attr_inv.attr,
4343 &format_attr_thresh8.attr,
4344 &format_attr_filter_tid5.attr,
4347 static const struct attribute_group snr_uncore_chabox_format_group = {
4349 .attrs = snr_uncore_cha_formats_attr,
4352 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4354 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4356 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4357 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4358 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4364 static void snr_cha_enable_event(struct intel_uncore_box *box,
4365 struct perf_event *event)
4367 struct hw_perf_event *hwc = &event->hw;
4368 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4370 if (reg1->idx != EXTRA_REG_NONE)
4371 wrmsrl(reg1->reg, reg1->config);
4373 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4376 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4377 .init_box = ivbep_uncore_msr_init_box,
4378 .disable_box = snbep_uncore_msr_disable_box,
4379 .enable_box = snbep_uncore_msr_enable_box,
4380 .disable_event = snbep_uncore_msr_disable_event,
4381 .enable_event = snr_cha_enable_event,
4382 .read_counter = uncore_msr_read_counter,
4383 .hw_config = snr_cha_hw_config,
4386 static struct intel_uncore_type snr_uncore_chabox = {
4390 .perf_ctr_bits = 48,
4391 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4392 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4393 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4394 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4395 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4396 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4397 .ops = &snr_uncore_chabox_ops,
4398 .format_group = &snr_uncore_chabox_format_group,
4401 static struct attribute *snr_uncore_iio_formats_attr[] = {
4402 &format_attr_event.attr,
4403 &format_attr_umask.attr,
4404 &format_attr_edge.attr,
4405 &format_attr_inv.attr,
4406 &format_attr_thresh9.attr,
4407 &format_attr_ch_mask2.attr,
4408 &format_attr_fc_mask2.attr,
4412 static const struct attribute_group snr_uncore_iio_format_group = {
4414 .attrs = snr_uncore_iio_formats_attr,
4418 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4420 /* Root bus 0x00 is valid only for pmu_idx = 1. */
4421 return pmu_iio_mapping_visible(kobj, attr, die, 1);
4424 static struct attribute_group snr_iio_mapping_group = {
4425 .is_visible = snr_iio_mapping_visible,
4428 static const struct attribute_group *snr_iio_attr_update[] = {
4429 &snr_iio_mapping_group,
4433 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4436 int die, stack_id, ret = -EPERM;
4437 struct pci_dev *dev = NULL;
4439 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4441 if (!type->topology)
4444 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4445 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4447 ret = pcibios_err_to_errno(ret);
4451 die = uncore_pcibus_to_dieid(dev->bus);
4452 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4453 if (die < 0 || stack_id >= type->num_boxes) {
4458 /* Convert stack id from SAD_CONTROL to PMON notation. */
4459 stack_id = sad_pmon_mapping[stack_id];
4461 ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4462 type->topology[die].segment = pci_domain_nr(dev->bus);
4466 kfree(type->topology);
4467 type->topology = NULL;
4474 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4478 SNR_CBDMA_DMI_PMON_ID,
4481 SNR_PCIE_GEN3_PMON_ID
4484 static u8 snr_sad_pmon_mapping[] = {
4485 SNR_CBDMA_DMI_PMON_ID,
4486 SNR_PCIE_GEN3_PMON_ID,
4492 static int snr_iio_get_topology(struct intel_uncore_type *type)
4494 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4497 static int snr_iio_set_mapping(struct intel_uncore_type *type)
4499 return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4502 static struct intel_uncore_type snr_uncore_iio = {
4506 .perf_ctr_bits = 48,
4507 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4508 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4509 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4510 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4511 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4512 .msr_offset = SNR_IIO_MSR_OFFSET,
4513 .ops = &ivbep_uncore_msr_ops,
4514 .format_group = &snr_uncore_iio_format_group,
4515 .attr_update = snr_iio_attr_update,
4516 .get_topology = snr_iio_get_topology,
4517 .set_mapping = snr_iio_set_mapping,
4518 .cleanup_mapping = skx_iio_cleanup_mapping,
4521 static struct intel_uncore_type snr_uncore_irp = {
4525 .perf_ctr_bits = 48,
4526 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4527 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4528 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4529 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4530 .msr_offset = SNR_IRP_MSR_OFFSET,
4531 .ops = &ivbep_uncore_msr_ops,
4532 .format_group = &ivbep_uncore_format_group,
4535 static struct intel_uncore_type snr_uncore_m2pcie = {
4539 .perf_ctr_bits = 48,
4540 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4541 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4542 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4543 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4544 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4545 .ops = &ivbep_uncore_msr_ops,
4546 .format_group = &ivbep_uncore_format_group,
4549 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4551 struct hw_perf_event *hwc = &event->hw;
4552 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4553 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4555 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4556 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4557 reg1->idx = ev_sel - 0xb;
4558 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4563 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4564 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4565 .hw_config = snr_pcu_hw_config,
4566 .get_constraint = snbep_pcu_get_constraint,
4567 .put_constraint = snbep_pcu_put_constraint,
4570 static struct intel_uncore_type snr_uncore_pcu = {
4574 .perf_ctr_bits = 48,
4575 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4576 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4577 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4578 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4579 .num_shared_regs = 1,
4580 .ops = &snr_uncore_pcu_ops,
4581 .format_group = &skx_uncore_pcu_format_group,
4584 enum perf_uncore_snr_iio_freerunning_type_id {
4588 SNR_IIO_FREERUNNING_TYPE_MAX,
4591 static struct freerunning_counters snr_iio_freerunning[] = {
4592 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4593 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4596 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4597 /* Free-Running IIO CLOCKS Counter */
4598 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4599 /* Free-Running IIO BANDWIDTH IN Counters */
4600 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4601 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4602 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4603 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4604 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4605 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4606 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4607 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4608 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4609 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4610 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4611 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4612 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4613 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4614 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4615 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4616 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4617 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4618 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4619 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4620 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4621 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4622 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4623 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4624 { /* end: all zeroes */ },
4627 static struct intel_uncore_type snr_uncore_iio_free_running = {
4628 .name = "iio_free_running",
4631 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4632 .freerunning = snr_iio_freerunning,
4633 .ops = &skx_uncore_iio_freerunning_ops,
4634 .event_descs = snr_uncore_iio_freerunning_events,
4635 .format_group = &skx_uncore_iio_freerunning_format_group,
4638 static struct intel_uncore_type *snr_msr_uncores[] = {
4645 &snr_uncore_iio_free_running,
4649 void snr_uncore_cpu_init(void)
4651 uncore_msr_uncores = snr_msr_uncores;
4654 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4656 struct pci_dev *pdev = box->pci_dev;
4657 int box_ctl = uncore_pci_box_ctl(box);
4659 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4660 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4663 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4664 .init_box = snr_m2m_uncore_pci_init_box,
4665 .disable_box = snbep_uncore_pci_disable_box,
4666 .enable_box = snbep_uncore_pci_enable_box,
4667 .disable_event = snbep_uncore_pci_disable_event,
4668 .enable_event = snbep_uncore_pci_enable_event,
4669 .read_counter = snbep_uncore_pci_read_counter,
4672 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4673 &format_attr_event.attr,
4674 &format_attr_umask_ext3.attr,
4675 &format_attr_edge.attr,
4676 &format_attr_inv.attr,
4677 &format_attr_thresh8.attr,
4681 static const struct attribute_group snr_m2m_uncore_format_group = {
4683 .attrs = snr_m2m_uncore_formats_attr,
4686 static struct intel_uncore_type snr_uncore_m2m = {
4690 .perf_ctr_bits = 48,
4691 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4692 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4693 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4694 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4695 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4696 .ops = &snr_m2m_uncore_pci_ops,
4697 .format_group = &snr_m2m_uncore_format_group,
4700 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4702 struct pci_dev *pdev = box->pci_dev;
4703 struct hw_perf_event *hwc = &event->hw;
4705 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4706 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4709 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4710 .init_box = snr_m2m_uncore_pci_init_box,
4711 .disable_box = snbep_uncore_pci_disable_box,
4712 .enable_box = snbep_uncore_pci_enable_box,
4713 .disable_event = snbep_uncore_pci_disable_event,
4714 .enable_event = snr_uncore_pci_enable_event,
4715 .read_counter = snbep_uncore_pci_read_counter,
4718 static struct intel_uncore_type snr_uncore_pcie3 = {
4722 .perf_ctr_bits = 48,
4723 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4724 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4725 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4726 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4727 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4728 .ops = &snr_pcie3_uncore_pci_ops,
4729 .format_group = &skx_uncore_iio_format_group,
4734 SNR_PCI_UNCORE_PCIE3,
4737 static struct intel_uncore_type *snr_pci_uncores[] = {
4738 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4739 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4743 static const struct pci_device_id snr_uncore_pci_ids[] = {
4745 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4746 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4748 { /* end: all zeroes */ }
4751 static struct pci_driver snr_uncore_pci_driver = {
4752 .name = "snr_uncore",
4753 .id_table = snr_uncore_pci_ids,
4756 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4758 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4759 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4761 { /* end: all zeroes */ }
4764 static struct pci_driver snr_uncore_pci_sub_driver = {
4765 .name = "snr_uncore_sub",
4766 .id_table = snr_uncore_pci_sub_ids,
4769 int snr_uncore_pci_init(void)
4772 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4773 SKX_GIDNIDMAP, true);
4778 uncore_pci_uncores = snr_pci_uncores;
4779 uncore_pci_driver = &snr_uncore_pci_driver;
4780 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4784 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4786 struct pci_dev *mc_dev = NULL;
4790 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4793 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4800 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4801 unsigned int box_ctl, int mem_offset)
4803 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4804 struct intel_uncore_type *type = box->pmu->type;
4805 resource_size_t addr;
4811 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4812 addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4814 pci_read_config_dword(pdev, mem_offset, &pci_dword);
4815 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4819 box->io_addr = ioremap(addr, type->mmio_map_size);
4820 if (!box->io_addr) {
4821 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4825 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4828 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4830 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4831 SNR_IMC_MMIO_MEM0_OFFSET);
4834 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4841 config = readl(box->io_addr);
4842 config |= SNBEP_PMON_BOX_CTL_FRZ;
4843 writel(config, box->io_addr);
4846 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4853 config = readl(box->io_addr);
4854 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4855 writel(config, box->io_addr);
4858 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4859 struct perf_event *event)
4861 struct hw_perf_event *hwc = &event->hw;
4866 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4869 writel(hwc->config | SNBEP_PMON_CTL_EN,
4870 box->io_addr + hwc->config_base);
4873 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4874 struct perf_event *event)
4876 struct hw_perf_event *hwc = &event->hw;
4881 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4884 writel(hwc->config, box->io_addr + hwc->config_base);
4887 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4888 .init_box = snr_uncore_mmio_init_box,
4889 .exit_box = uncore_mmio_exit_box,
4890 .disable_box = snr_uncore_mmio_disable_box,
4891 .enable_box = snr_uncore_mmio_enable_box,
4892 .disable_event = snr_uncore_mmio_disable_event,
4893 .enable_event = snr_uncore_mmio_enable_event,
4894 .read_counter = uncore_mmio_read_counter,
4897 static struct uncore_event_desc snr_uncore_imc_events[] = {
4898 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4899 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4900 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4901 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4902 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4903 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4904 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4905 { /* end: all zeroes */ },
4908 static struct intel_uncore_type snr_uncore_imc = {
4912 .perf_ctr_bits = 48,
4913 .fixed_ctr_bits = 48,
4914 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4915 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4916 .event_descs = snr_uncore_imc_events,
4917 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4918 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4919 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4920 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4921 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4922 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4923 .ops = &snr_uncore_mmio_ops,
4924 .format_group = &skx_uncore_format_group,
4927 enum perf_uncore_snr_imc_freerunning_type_id {
4931 SNR_IMC_FREERUNNING_TYPE_MAX,
4934 static struct freerunning_counters snr_imc_freerunning[] = {
4935 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4936 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4939 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4940 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4942 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4943 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4944 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4945 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4946 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4947 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4948 { /* end: all zeroes */ },
4951 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4952 .init_box = snr_uncore_mmio_init_box,
4953 .exit_box = uncore_mmio_exit_box,
4954 .read_counter = uncore_mmio_read_counter,
4955 .hw_config = uncore_freerunning_hw_config,
4958 static struct intel_uncore_type snr_uncore_imc_free_running = {
4959 .name = "imc_free_running",
4962 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4963 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4964 .freerunning = snr_imc_freerunning,
4965 .ops = &snr_uncore_imc_freerunning_ops,
4966 .event_descs = snr_uncore_imc_freerunning_events,
4967 .format_group = &skx_uncore_iio_freerunning_format_group,
4970 static struct intel_uncore_type *snr_mmio_uncores[] = {
4972 &snr_uncore_imc_free_running,
4976 void snr_uncore_mmio_init(void)
4978 uncore_mmio_uncores = snr_mmio_uncores;
4981 /* end of SNR uncore support */
4983 /* ICX uncore support */
4985 static unsigned icx_cha_msr_offsets[] = {
4986 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4987 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4988 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4989 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
4990 0x1c, 0x2a, 0x38, 0x46,
4993 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4995 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4996 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4999 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5000 icx_cha_msr_offsets[box->pmu->pmu_idx];
5001 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5008 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5009 .init_box = ivbep_uncore_msr_init_box,
5010 .disable_box = snbep_uncore_msr_disable_box,
5011 .enable_box = snbep_uncore_msr_enable_box,
5012 .disable_event = snbep_uncore_msr_disable_event,
5013 .enable_event = snr_cha_enable_event,
5014 .read_counter = uncore_msr_read_counter,
5015 .hw_config = icx_cha_hw_config,
5018 static struct intel_uncore_type icx_uncore_chabox = {
5021 .perf_ctr_bits = 48,
5022 .event_ctl = ICX_C34_MSR_PMON_CTL0,
5023 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
5024 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
5025 .msr_offsets = icx_cha_msr_offsets,
5026 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5027 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
5028 .constraints = skx_uncore_chabox_constraints,
5029 .ops = &icx_uncore_chabox_ops,
5030 .format_group = &snr_uncore_chabox_format_group,
5033 static unsigned icx_msr_offsets[] = {
5034 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5037 static struct event_constraint icx_uncore_iio_constraints[] = {
5038 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5039 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5040 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5041 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5042 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5043 EVENT_CONSTRAINT_END
5047 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5049 /* Root bus 0x00 is valid only for pmu_idx = 5. */
5050 return pmu_iio_mapping_visible(kobj, attr, die, 5);
5053 static struct attribute_group icx_iio_mapping_group = {
5054 .is_visible = icx_iio_mapping_visible,
5057 static const struct attribute_group *icx_iio_attr_update[] = {
5058 &icx_iio_mapping_group,
5063 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5071 ICX_CBDMA_DMI_PMON_ID
5074 static u8 icx_sad_pmon_mapping[] = {
5075 ICX_CBDMA_DMI_PMON_ID,
5083 static int icx_iio_get_topology(struct intel_uncore_type *type)
5085 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5088 static int icx_iio_set_mapping(struct intel_uncore_type *type)
5090 return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5093 static struct intel_uncore_type icx_uncore_iio = {
5097 .perf_ctr_bits = 48,
5098 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
5099 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
5100 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5101 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5102 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
5103 .msr_offsets = icx_msr_offsets,
5104 .constraints = icx_uncore_iio_constraints,
5105 .ops = &skx_uncore_iio_ops,
5106 .format_group = &snr_uncore_iio_format_group,
5107 .attr_update = icx_iio_attr_update,
5108 .get_topology = icx_iio_get_topology,
5109 .set_mapping = icx_iio_set_mapping,
5110 .cleanup_mapping = skx_iio_cleanup_mapping,
5113 static struct intel_uncore_type icx_uncore_irp = {
5117 .perf_ctr_bits = 48,
5118 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
5119 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
5120 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5121 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
5122 .msr_offsets = icx_msr_offsets,
5123 .ops = &ivbep_uncore_msr_ops,
5124 .format_group = &ivbep_uncore_format_group,
5127 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5128 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5129 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5130 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5131 EVENT_CONSTRAINT_END
5134 static struct intel_uncore_type icx_uncore_m2pcie = {
5138 .perf_ctr_bits = 48,
5139 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
5140 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
5141 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5142 .msr_offsets = icx_msr_offsets,
5143 .constraints = icx_uncore_m2pcie_constraints,
5144 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5145 .ops = &ivbep_uncore_msr_ops,
5146 .format_group = &ivbep_uncore_format_group,
5149 enum perf_uncore_icx_iio_freerunning_type_id {
5153 ICX_IIO_FREERUNNING_TYPE_MAX,
5156 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5157 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5160 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5161 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5164 static struct freerunning_counters icx_iio_freerunning[] = {
5165 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5166 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5169 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5170 /* Free-Running IIO CLOCKS Counter */
5171 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5172 /* Free-Running IIO BANDWIDTH IN Counters */
5173 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5174 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5175 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5176 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5177 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5178 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5179 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5180 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5181 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5182 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5183 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5184 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5185 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5186 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5187 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5188 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5189 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5190 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5191 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5192 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5193 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5194 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5195 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5196 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5197 { /* end: all zeroes */ },
5200 static struct intel_uncore_type icx_uncore_iio_free_running = {
5201 .name = "iio_free_running",
5204 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5205 .freerunning = icx_iio_freerunning,
5206 .ops = &skx_uncore_iio_freerunning_ops,
5207 .event_descs = icx_uncore_iio_freerunning_events,
5208 .format_group = &skx_uncore_iio_freerunning_format_group,
5211 static struct intel_uncore_type *icx_msr_uncores[] = {
5218 &icx_uncore_iio_free_running,
5223 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5224 * registers which located at Device 30, Function 3
5226 #define ICX_CAPID6 0x9c
5227 #define ICX_CAPID7 0xa0
5229 static u64 icx_count_chabox(void)
5231 struct pci_dev *dev = NULL;
5234 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5238 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5239 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5242 return hweight64(caps);
5245 void icx_uncore_cpu_init(void)
5247 u64 num_boxes = icx_count_chabox();
5249 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5251 icx_uncore_chabox.num_boxes = num_boxes;
5252 uncore_msr_uncores = icx_msr_uncores;
5255 static struct intel_uncore_type icx_uncore_m2m = {
5259 .perf_ctr_bits = 48,
5260 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5261 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5262 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5263 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5264 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5265 .ops = &snr_m2m_uncore_pci_ops,
5266 .format_group = &snr_m2m_uncore_format_group,
5269 static struct attribute *icx_upi_uncore_formats_attr[] = {
5270 &format_attr_event.attr,
5271 &format_attr_umask_ext4.attr,
5272 &format_attr_edge.attr,
5273 &format_attr_inv.attr,
5274 &format_attr_thresh8.attr,
5278 static const struct attribute_group icx_upi_uncore_format_group = {
5280 .attrs = icx_upi_uncore_formats_attr,
5283 static struct intel_uncore_type icx_uncore_upi = {
5287 .perf_ctr_bits = 48,
5288 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5289 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5290 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5291 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5292 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5293 .ops = &skx_upi_uncore_pci_ops,
5294 .format_group = &icx_upi_uncore_format_group,
5297 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5298 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5299 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5300 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5301 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5302 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5303 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5304 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5305 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5306 EVENT_CONSTRAINT_END
5309 static struct intel_uncore_type icx_uncore_m3upi = {
5313 .perf_ctr_bits = 48,
5314 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5315 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5316 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5317 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5318 .constraints = icx_uncore_m3upi_constraints,
5319 .ops = &ivbep_uncore_pci_ops,
5320 .format_group = &skx_uncore_format_group,
5326 ICX_PCI_UNCORE_M3UPI,
5329 static struct intel_uncore_type *icx_pci_uncores[] = {
5330 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5331 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5332 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5336 static const struct pci_device_id icx_uncore_pci_ids[] = {
5338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5339 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5343 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5347 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5350 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5351 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5354 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5355 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5358 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5359 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5362 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5363 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5365 { /* M3UPI Link 0 */
5366 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5367 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5369 { /* M3UPI Link 1 */
5370 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5371 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5373 { /* M3UPI Link 2 */
5374 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5375 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5377 { /* end: all zeroes */ }
5380 static struct pci_driver icx_uncore_pci_driver = {
5381 .name = "icx_uncore",
5382 .id_table = icx_uncore_pci_ids,
5385 int icx_uncore_pci_init(void)
5388 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5389 SKX_GIDNIDMAP, true);
5394 uncore_pci_uncores = icx_pci_uncores;
5395 uncore_pci_driver = &icx_uncore_pci_driver;
5399 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5401 unsigned int box_ctl = box->pmu->type->box_ctl +
5402 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5403 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5404 SNR_IMC_MMIO_MEM0_OFFSET;
5406 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5409 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5410 .init_box = icx_uncore_imc_init_box,
5411 .exit_box = uncore_mmio_exit_box,
5412 .disable_box = snr_uncore_mmio_disable_box,
5413 .enable_box = snr_uncore_mmio_enable_box,
5414 .disable_event = snr_uncore_mmio_disable_event,
5415 .enable_event = snr_uncore_mmio_enable_event,
5416 .read_counter = uncore_mmio_read_counter,
5419 static struct intel_uncore_type icx_uncore_imc = {
5423 .perf_ctr_bits = 48,
5424 .fixed_ctr_bits = 48,
5425 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5426 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5427 .event_descs = hswep_uncore_imc_events,
5428 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5429 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5430 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5431 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5432 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5433 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5434 .ops = &icx_uncore_mmio_ops,
5435 .format_group = &skx_uncore_format_group,
5438 enum perf_uncore_icx_imc_freerunning_type_id {
5443 ICX_IMC_FREERUNNING_TYPE_MAX,
5446 static struct freerunning_counters icx_imc_freerunning[] = {
5447 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5448 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5449 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5452 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5453 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5455 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5456 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5457 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5458 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5459 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5460 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5462 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5463 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5464 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5465 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5466 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5467 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5468 { /* end: all zeroes */ },
5471 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5473 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5474 SNR_IMC_MMIO_MEM0_OFFSET;
5476 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5479 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5480 .init_box = icx_uncore_imc_freerunning_init_box,
5481 .exit_box = uncore_mmio_exit_box,
5482 .read_counter = uncore_mmio_read_counter,
5483 .hw_config = uncore_freerunning_hw_config,
5486 static struct intel_uncore_type icx_uncore_imc_free_running = {
5487 .name = "imc_free_running",
5490 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5491 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5492 .freerunning = icx_imc_freerunning,
5493 .ops = &icx_uncore_imc_freerunning_ops,
5494 .event_descs = icx_uncore_imc_freerunning_events,
5495 .format_group = &skx_uncore_iio_freerunning_format_group,
5498 static struct intel_uncore_type *icx_mmio_uncores[] = {
5500 &icx_uncore_imc_free_running,
5504 void icx_uncore_mmio_init(void)
5506 uncore_mmio_uncores = icx_mmio_uncores;
5509 /* end of ICX uncore support */