Merge tag 'regulator-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[sfrench/cifs-2.6.git] / arch / x86 / events / intel / uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "uncore.h"
3
4 /* SNB-EP Box level control */
5 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
6 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
7 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
8 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
9 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
10                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
11                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
12 /* SNB-EP event control */
13 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
14 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
15 #define SNBEP_PMON_CTL_RST              (1 << 17)
16 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
17 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
18 #define SNBEP_PMON_CTL_EN               (1 << 22)
19 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
20 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
21 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
22                                          SNBEP_PMON_CTL_UMASK_MASK | \
23                                          SNBEP_PMON_CTL_EDGE_DET | \
24                                          SNBEP_PMON_CTL_INVERT | \
25                                          SNBEP_PMON_CTL_TRESH_MASK)
26
27 /* SNB-EP Ubox event control */
28 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
29 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
30                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
31                                  SNBEP_PMON_CTL_UMASK_MASK | \
32                                  SNBEP_PMON_CTL_EDGE_DET | \
33                                  SNBEP_PMON_CTL_INVERT | \
34                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
35
36 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
37 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
38                                                  SNBEP_CBO_PMON_CTL_TID_EN)
39
40 /* SNB-EP PCU event control */
41 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
42 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
43 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
45 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
46                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
47                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
48                                  SNBEP_PMON_CTL_EDGE_DET | \
49                                  SNBEP_PMON_CTL_INVERT | \
50                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
51                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
53
54 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
55                                 (SNBEP_PMON_RAW_EVENT_MASK | \
56                                  SNBEP_PMON_CTL_EV_SEL_EXT)
57
58 /* SNB-EP pci control register */
59 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
60 #define SNBEP_PCI_PMON_CTL0                     0xd8
61 /* SNB-EP pci counter register */
62 #define SNBEP_PCI_PMON_CTR0                     0xa0
63
64 /* SNB-EP home agent register */
65 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
66 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
67 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
68 /* SNB-EP memory controller register */
69 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
70 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
71 /* SNB-EP QPI register */
72 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
73 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
76
77 /* SNB-EP Ubox register */
78 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
79 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
80
81 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
82 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
83
84 /* SNB-EP Cbo register */
85 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
86 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
87 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
88 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
89 #define SNBEP_CBO_MSR_OFFSET                    0x20
90
91 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
92 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
95
96 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
97         .event = (e),                           \
98         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
99         .config_mask = (m),                     \
100         .idx = (i)                              \
101 }
102
103 /* SNB-EP PCU register */
104 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
105 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
106 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
107 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
108 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
109 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
110 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
111
112 /* IVBEP event control */
113 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
114                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
115 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
116                                          SNBEP_PMON_CTL_UMASK_MASK | \
117                                          SNBEP_PMON_CTL_EDGE_DET | \
118                                          SNBEP_PMON_CTL_TRESH_MASK)
119 /* IVBEP Ubox */
120 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
121 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
122 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
123
124 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
125                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
126                                  SNBEP_PMON_CTL_UMASK_MASK | \
127                                  SNBEP_PMON_CTL_EDGE_DET | \
128                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
129 /* IVBEP Cbo */
130 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
131                                                  SNBEP_CBO_PMON_CTL_TID_EN)
132
133 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
134 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
141
142 /* IVBEP home agent */
143 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
144 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
145                                 (IVBEP_PMON_RAW_EVENT_MASK | \
146                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
147 /* IVBEP PCU */
148 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
149                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
150                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
151                                  SNBEP_PMON_CTL_EDGE_DET | \
152                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
153                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
154                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
155 /* IVBEP QPI */
156 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
157                                 (IVBEP_PMON_RAW_EVENT_MASK | \
158                                  SNBEP_PMON_CTL_EV_SEL_EXT)
159
160 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
161                                 ((1ULL << (n)) - 1)))
162
163 /* Haswell-EP Ubox */
164 #define HSWEP_U_MSR_PMON_CTR0                   0x709
165 #define HSWEP_U_MSR_PMON_CTL0                   0x705
166 #define HSWEP_U_MSR_PMON_FILTER                 0x707
167
168 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
169 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
170
171 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
172 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
173 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
174                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
175                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
176
177 /* Haswell-EP CBo */
178 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
179 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
180 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
181 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
182 #define HSWEP_CBO_MSR_OFFSET                    0x10
183
184
185 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
186 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
187 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
193
194
195 /* Haswell-EP Sbox */
196 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
197 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
198 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
199 #define HSWEP_SBOX_MSR_OFFSET                   0xa
200 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
201                                                  SNBEP_CBO_PMON_CTL_TID_EN)
202
203 /* Haswell-EP PCU */
204 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
205 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
206 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
207 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
208
209 /* KNL Ubox */
210 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
211                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
212                                                 SNBEP_CBO_PMON_CTL_TID_EN)
213 /* KNL CHA */
214 #define KNL_CHA_MSR_OFFSET                      0xc
215 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
216 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
217                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
218                                          KNL_CHA_MSR_PMON_CTL_QOR)
219 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
220 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
221 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
222 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
223 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
225
226 /* KNL EDC/MC UCLK */
227 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
228 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
229 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
230 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
231 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
232 #define KNL_PMON_FIXED_CTL_EN                   0x1
233
234 /* KNL EDC */
235 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
236 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
237 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
238 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
239 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
240
241 /* KNL MC */
242 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
243 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
244 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
245 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
246 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
247
248 /* KNL IRP */
249 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
250 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
251                                                  KNL_CHA_MSR_PMON_CTL_QOR)
252 /* KNL PCU */
253 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
254 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
255 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
256 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
257                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
258                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
259                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
260                                  SNBEP_PMON_CTL_EDGE_DET | \
261                                  SNBEP_CBO_PMON_CTL_TID_EN | \
262                                  SNBEP_PMON_CTL_INVERT | \
263                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
264                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
265                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
266
267 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
268 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
269 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
270 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
271 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
272 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
273 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
274 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
275 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
276 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
277 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
278 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
279 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
280 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
281 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
282 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
283 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
284 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
285 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
286 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
287 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
288 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
289 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
290 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
291 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
292 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
293 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
294 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
295 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
296 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
297 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
298 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
299 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
300 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
301 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
302 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
303 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
304 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
305 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
306 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
307 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
308 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
309 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
310 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
311 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
312 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
313 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
314 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
315 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
316 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
317 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
318 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
319 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
320 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
321 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
322 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
323 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
324 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
325 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
326 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
327 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
328
329 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
330 {
331         struct pci_dev *pdev = box->pci_dev;
332         int box_ctl = uncore_pci_box_ctl(box);
333         u32 config = 0;
334
335         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
336                 config |= SNBEP_PMON_BOX_CTL_FRZ;
337                 pci_write_config_dword(pdev, box_ctl, config);
338         }
339 }
340
341 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
342 {
343         struct pci_dev *pdev = box->pci_dev;
344         int box_ctl = uncore_pci_box_ctl(box);
345         u32 config = 0;
346
347         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
348                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
349                 pci_write_config_dword(pdev, box_ctl, config);
350         }
351 }
352
353 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
354 {
355         struct pci_dev *pdev = box->pci_dev;
356         struct hw_perf_event *hwc = &event->hw;
357
358         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
359 }
360
361 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
362 {
363         struct pci_dev *pdev = box->pci_dev;
364         struct hw_perf_event *hwc = &event->hw;
365
366         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
367 }
368
369 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
370 {
371         struct pci_dev *pdev = box->pci_dev;
372         struct hw_perf_event *hwc = &event->hw;
373         u64 count = 0;
374
375         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
376         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
377
378         return count;
379 }
380
381 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
382 {
383         struct pci_dev *pdev = box->pci_dev;
384         int box_ctl = uncore_pci_box_ctl(box);
385
386         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
387 }
388
389 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
390 {
391         u64 config;
392         unsigned msr;
393
394         msr = uncore_msr_box_ctl(box);
395         if (msr) {
396                 rdmsrl(msr, config);
397                 config |= SNBEP_PMON_BOX_CTL_FRZ;
398                 wrmsrl(msr, config);
399         }
400 }
401
402 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
403 {
404         u64 config;
405         unsigned msr;
406
407         msr = uncore_msr_box_ctl(box);
408         if (msr) {
409                 rdmsrl(msr, config);
410                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
411                 wrmsrl(msr, config);
412         }
413 }
414
415 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
416 {
417         struct hw_perf_event *hwc = &event->hw;
418         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
419
420         if (reg1->idx != EXTRA_REG_NONE)
421                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
422
423         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
424 }
425
426 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
427                                         struct perf_event *event)
428 {
429         struct hw_perf_event *hwc = &event->hw;
430
431         wrmsrl(hwc->config_base, hwc->config);
432 }
433
434 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
435 {
436         unsigned msr = uncore_msr_box_ctl(box);
437
438         if (msr)
439                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
440 }
441
442 static struct attribute *snbep_uncore_formats_attr[] = {
443         &format_attr_event.attr,
444         &format_attr_umask.attr,
445         &format_attr_edge.attr,
446         &format_attr_inv.attr,
447         &format_attr_thresh8.attr,
448         NULL,
449 };
450
451 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
452         &format_attr_event.attr,
453         &format_attr_umask.attr,
454         &format_attr_edge.attr,
455         &format_attr_inv.attr,
456         &format_attr_thresh5.attr,
457         NULL,
458 };
459
460 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
461         &format_attr_event.attr,
462         &format_attr_umask.attr,
463         &format_attr_edge.attr,
464         &format_attr_tid_en.attr,
465         &format_attr_inv.attr,
466         &format_attr_thresh8.attr,
467         &format_attr_filter_tid.attr,
468         &format_attr_filter_nid.attr,
469         &format_attr_filter_state.attr,
470         &format_attr_filter_opc.attr,
471         NULL,
472 };
473
474 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
475         &format_attr_event.attr,
476         &format_attr_occ_sel.attr,
477         &format_attr_edge.attr,
478         &format_attr_inv.attr,
479         &format_attr_thresh5.attr,
480         &format_attr_occ_invert.attr,
481         &format_attr_occ_edge.attr,
482         &format_attr_filter_band0.attr,
483         &format_attr_filter_band1.attr,
484         &format_attr_filter_band2.attr,
485         &format_attr_filter_band3.attr,
486         NULL,
487 };
488
489 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
490         &format_attr_event_ext.attr,
491         &format_attr_umask.attr,
492         &format_attr_edge.attr,
493         &format_attr_inv.attr,
494         &format_attr_thresh8.attr,
495         &format_attr_match_rds.attr,
496         &format_attr_match_rnid30.attr,
497         &format_attr_match_rnid4.attr,
498         &format_attr_match_dnid.attr,
499         &format_attr_match_mc.attr,
500         &format_attr_match_opc.attr,
501         &format_attr_match_vnw.attr,
502         &format_attr_match0.attr,
503         &format_attr_match1.attr,
504         &format_attr_mask_rds.attr,
505         &format_attr_mask_rnid30.attr,
506         &format_attr_mask_rnid4.attr,
507         &format_attr_mask_dnid.attr,
508         &format_attr_mask_mc.attr,
509         &format_attr_mask_opc.attr,
510         &format_attr_mask_vnw.attr,
511         &format_attr_mask0.attr,
512         &format_attr_mask1.attr,
513         NULL,
514 };
515
516 static struct uncore_event_desc snbep_uncore_imc_events[] = {
517         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
518         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
519         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
520         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
521         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
522         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
523         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
524         { /* end: all zeroes */ },
525 };
526
527 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
528         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
529         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
530         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
531         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
532         { /* end: all zeroes */ },
533 };
534
535 static struct attribute_group snbep_uncore_format_group = {
536         .name = "format",
537         .attrs = snbep_uncore_formats_attr,
538 };
539
540 static struct attribute_group snbep_uncore_ubox_format_group = {
541         .name = "format",
542         .attrs = snbep_uncore_ubox_formats_attr,
543 };
544
545 static struct attribute_group snbep_uncore_cbox_format_group = {
546         .name = "format",
547         .attrs = snbep_uncore_cbox_formats_attr,
548 };
549
550 static struct attribute_group snbep_uncore_pcu_format_group = {
551         .name = "format",
552         .attrs = snbep_uncore_pcu_formats_attr,
553 };
554
555 static struct attribute_group snbep_uncore_qpi_format_group = {
556         .name = "format",
557         .attrs = snbep_uncore_qpi_formats_attr,
558 };
559
560 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
561         .disable_box    = snbep_uncore_msr_disable_box,         \
562         .enable_box     = snbep_uncore_msr_enable_box,          \
563         .disable_event  = snbep_uncore_msr_disable_event,       \
564         .enable_event   = snbep_uncore_msr_enable_event,        \
565         .read_counter   = uncore_msr_read_counter
566
567 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
568         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
569         .init_box       = snbep_uncore_msr_init_box             \
570
571 static struct intel_uncore_ops snbep_uncore_msr_ops = {
572         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
573 };
574
575 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
576         .init_box       = snbep_uncore_pci_init_box,            \
577         .disable_box    = snbep_uncore_pci_disable_box,         \
578         .enable_box     = snbep_uncore_pci_enable_box,          \
579         .disable_event  = snbep_uncore_pci_disable_event,       \
580         .read_counter   = snbep_uncore_pci_read_counter
581
582 static struct intel_uncore_ops snbep_uncore_pci_ops = {
583         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
584         .enable_event   = snbep_uncore_pci_enable_event,        \
585 };
586
587 static struct event_constraint snbep_uncore_cbox_constraints[] = {
588         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
589         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
590         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
591         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
592         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
593         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
594         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
595         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
596         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
597         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
598         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
599         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
600         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
601         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
602         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
603         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
604         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
605         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
606         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
607         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
608         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
609         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
610         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
611         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
612         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
613         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
614         EVENT_CONSTRAINT_END
615 };
616
617 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
618         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
619         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
620         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
621         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
622         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
623         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
624         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
625         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
626         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
627         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
628         EVENT_CONSTRAINT_END
629 };
630
631 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
632         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
633         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
634         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
635         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
636         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
637         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
638         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
639         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
640         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
641         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
642         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
643         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
644         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
645         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
646         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
647         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
648         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
649         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
650         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
651         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
652         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
653         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
654         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
655         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
656         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
657         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
658         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
659         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
660         EVENT_CONSTRAINT_END
661 };
662
663 static struct intel_uncore_type snbep_uncore_ubox = {
664         .name           = "ubox",
665         .num_counters   = 2,
666         .num_boxes      = 1,
667         .perf_ctr_bits  = 44,
668         .fixed_ctr_bits = 48,
669         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
670         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
671         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
672         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
673         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
674         .ops            = &snbep_uncore_msr_ops,
675         .format_group   = &snbep_uncore_ubox_format_group,
676 };
677
678 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
679         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
680                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
681         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
682         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
683         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
684         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
685         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
686         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
687         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
688         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
689         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
690         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
691         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
692         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
693         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
694         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
695         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
696         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
697         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
698         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
699         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
700         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
701         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
702         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
703         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
704         EVENT_EXTRA_END
705 };
706
707 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
708 {
709         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
710         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
711         int i;
712
713         if (uncore_box_is_fake(box))
714                 return;
715
716         for (i = 0; i < 5; i++) {
717                 if (reg1->alloc & (0x1 << i))
718                         atomic_sub(1 << (i * 6), &er->ref);
719         }
720         reg1->alloc = 0;
721 }
722
723 static struct event_constraint *
724 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
725                             u64 (*cbox_filter_mask)(int fields))
726 {
727         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
728         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
729         int i, alloc = 0;
730         unsigned long flags;
731         u64 mask;
732
733         if (reg1->idx == EXTRA_REG_NONE)
734                 return NULL;
735
736         raw_spin_lock_irqsave(&er->lock, flags);
737         for (i = 0; i < 5; i++) {
738                 if (!(reg1->idx & (0x1 << i)))
739                         continue;
740                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
741                         continue;
742
743                 mask = cbox_filter_mask(0x1 << i);
744                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
745                     !((reg1->config ^ er->config) & mask)) {
746                         atomic_add(1 << (i * 6), &er->ref);
747                         er->config &= ~mask;
748                         er->config |= reg1->config & mask;
749                         alloc |= (0x1 << i);
750                 } else {
751                         break;
752                 }
753         }
754         raw_spin_unlock_irqrestore(&er->lock, flags);
755         if (i < 5)
756                 goto fail;
757
758         if (!uncore_box_is_fake(box))
759                 reg1->alloc |= alloc;
760
761         return NULL;
762 fail:
763         for (; i >= 0; i--) {
764                 if (alloc & (0x1 << i))
765                         atomic_sub(1 << (i * 6), &er->ref);
766         }
767         return &uncore_constraint_empty;
768 }
769
770 static u64 snbep_cbox_filter_mask(int fields)
771 {
772         u64 mask = 0;
773
774         if (fields & 0x1)
775                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
776         if (fields & 0x2)
777                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
778         if (fields & 0x4)
779                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
780         if (fields & 0x8)
781                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
782
783         return mask;
784 }
785
786 static struct event_constraint *
787 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
788 {
789         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
790 }
791
792 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
793 {
794         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
795         struct extra_reg *er;
796         int idx = 0;
797
798         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
799                 if (er->event != (event->hw.config & er->config_mask))
800                         continue;
801                 idx |= er->idx;
802         }
803
804         if (idx) {
805                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
806                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
807                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
808                 reg1->idx = idx;
809         }
810         return 0;
811 }
812
813 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
814         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
815         .hw_config              = snbep_cbox_hw_config,
816         .get_constraint         = snbep_cbox_get_constraint,
817         .put_constraint         = snbep_cbox_put_constraint,
818 };
819
820 static struct intel_uncore_type snbep_uncore_cbox = {
821         .name                   = "cbox",
822         .num_counters           = 4,
823         .num_boxes              = 8,
824         .perf_ctr_bits          = 44,
825         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
826         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
827         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
828         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
829         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
830         .num_shared_regs        = 1,
831         .constraints            = snbep_uncore_cbox_constraints,
832         .ops                    = &snbep_uncore_cbox_ops,
833         .format_group           = &snbep_uncore_cbox_format_group,
834 };
835
836 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
837 {
838         struct hw_perf_event *hwc = &event->hw;
839         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
840         u64 config = reg1->config;
841
842         if (new_idx > reg1->idx)
843                 config <<= 8 * (new_idx - reg1->idx);
844         else
845                 config >>= 8 * (reg1->idx - new_idx);
846
847         if (modify) {
848                 hwc->config += new_idx - reg1->idx;
849                 reg1->config = config;
850                 reg1->idx = new_idx;
851         }
852         return config;
853 }
854
855 static struct event_constraint *
856 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
857 {
858         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
859         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
860         unsigned long flags;
861         int idx = reg1->idx;
862         u64 mask, config1 = reg1->config;
863         bool ok = false;
864
865         if (reg1->idx == EXTRA_REG_NONE ||
866             (!uncore_box_is_fake(box) && reg1->alloc))
867                 return NULL;
868 again:
869         mask = 0xffULL << (idx * 8);
870         raw_spin_lock_irqsave(&er->lock, flags);
871         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
872             !((config1 ^ er->config) & mask)) {
873                 atomic_add(1 << (idx * 8), &er->ref);
874                 er->config &= ~mask;
875                 er->config |= config1 & mask;
876                 ok = true;
877         }
878         raw_spin_unlock_irqrestore(&er->lock, flags);
879
880         if (!ok) {
881                 idx = (idx + 1) % 4;
882                 if (idx != reg1->idx) {
883                         config1 = snbep_pcu_alter_er(event, idx, false);
884                         goto again;
885                 }
886                 return &uncore_constraint_empty;
887         }
888
889         if (!uncore_box_is_fake(box)) {
890                 if (idx != reg1->idx)
891                         snbep_pcu_alter_er(event, idx, true);
892                 reg1->alloc = 1;
893         }
894         return NULL;
895 }
896
897 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
898 {
899         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
900         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
901
902         if (uncore_box_is_fake(box) || !reg1->alloc)
903                 return;
904
905         atomic_sub(1 << (reg1->idx * 8), &er->ref);
906         reg1->alloc = 0;
907 }
908
909 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
910 {
911         struct hw_perf_event *hwc = &event->hw;
912         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
913         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
914
915         if (ev_sel >= 0xb && ev_sel <= 0xe) {
916                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
917                 reg1->idx = ev_sel - 0xb;
918                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
919         }
920         return 0;
921 }
922
923 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
924         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
925         .hw_config              = snbep_pcu_hw_config,
926         .get_constraint         = snbep_pcu_get_constraint,
927         .put_constraint         = snbep_pcu_put_constraint,
928 };
929
930 static struct intel_uncore_type snbep_uncore_pcu = {
931         .name                   = "pcu",
932         .num_counters           = 4,
933         .num_boxes              = 1,
934         .perf_ctr_bits          = 48,
935         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
936         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
937         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
938         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
939         .num_shared_regs        = 1,
940         .ops                    = &snbep_uncore_pcu_ops,
941         .format_group           = &snbep_uncore_pcu_format_group,
942 };
943
944 static struct intel_uncore_type *snbep_msr_uncores[] = {
945         &snbep_uncore_ubox,
946         &snbep_uncore_cbox,
947         &snbep_uncore_pcu,
948         NULL,
949 };
950
951 void snbep_uncore_cpu_init(void)
952 {
953         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
954                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
955         uncore_msr_uncores = snbep_msr_uncores;
956 }
957
958 enum {
959         SNBEP_PCI_QPI_PORT0_FILTER,
960         SNBEP_PCI_QPI_PORT1_FILTER,
961         HSWEP_PCI_PCU_3,
962 };
963
964 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
965 {
966         struct hw_perf_event *hwc = &event->hw;
967         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
968         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
969
970         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
971                 reg1->idx = 0;
972                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
973                 reg1->config = event->attr.config1;
974                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
975                 reg2->config = event->attr.config2;
976         }
977         return 0;
978 }
979
980 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
981 {
982         struct pci_dev *pdev = box->pci_dev;
983         struct hw_perf_event *hwc = &event->hw;
984         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
985         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
986
987         if (reg1->idx != EXTRA_REG_NONE) {
988                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
989                 int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
990                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
991
992                 if (filter_pdev) {
993                         pci_write_config_dword(filter_pdev, reg1->reg,
994                                                 (u32)reg1->config);
995                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
996                                                 (u32)(reg1->config >> 32));
997                         pci_write_config_dword(filter_pdev, reg2->reg,
998                                                 (u32)reg2->config);
999                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1000                                                 (u32)(reg2->config >> 32));
1001                 }
1002         }
1003
1004         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1005 }
1006
1007 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1008         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1009         .enable_event           = snbep_qpi_enable_event,
1010         .hw_config              = snbep_qpi_hw_config,
1011         .get_constraint         = uncore_get_constraint,
1012         .put_constraint         = uncore_put_constraint,
1013 };
1014
1015 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1016         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1017         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1018         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1019         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1020         .ops            = &snbep_uncore_pci_ops,                \
1021         .format_group   = &snbep_uncore_format_group
1022
1023 static struct intel_uncore_type snbep_uncore_ha = {
1024         .name           = "ha",
1025         .num_counters   = 4,
1026         .num_boxes      = 1,
1027         .perf_ctr_bits  = 48,
1028         SNBEP_UNCORE_PCI_COMMON_INIT(),
1029 };
1030
1031 static struct intel_uncore_type snbep_uncore_imc = {
1032         .name           = "imc",
1033         .num_counters   = 4,
1034         .num_boxes      = 4,
1035         .perf_ctr_bits  = 48,
1036         .fixed_ctr_bits = 48,
1037         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1038         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1039         .event_descs    = snbep_uncore_imc_events,
1040         SNBEP_UNCORE_PCI_COMMON_INIT(),
1041 };
1042
1043 static struct intel_uncore_type snbep_uncore_qpi = {
1044         .name                   = "qpi",
1045         .num_counters           = 4,
1046         .num_boxes              = 2,
1047         .perf_ctr_bits          = 48,
1048         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1049         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1050         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1051         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1052         .num_shared_regs        = 1,
1053         .ops                    = &snbep_uncore_qpi_ops,
1054         .event_descs            = snbep_uncore_qpi_events,
1055         .format_group           = &snbep_uncore_qpi_format_group,
1056 };
1057
1058
1059 static struct intel_uncore_type snbep_uncore_r2pcie = {
1060         .name           = "r2pcie",
1061         .num_counters   = 4,
1062         .num_boxes      = 1,
1063         .perf_ctr_bits  = 44,
1064         .constraints    = snbep_uncore_r2pcie_constraints,
1065         SNBEP_UNCORE_PCI_COMMON_INIT(),
1066 };
1067
1068 static struct intel_uncore_type snbep_uncore_r3qpi = {
1069         .name           = "r3qpi",
1070         .num_counters   = 3,
1071         .num_boxes      = 2,
1072         .perf_ctr_bits  = 44,
1073         .constraints    = snbep_uncore_r3qpi_constraints,
1074         SNBEP_UNCORE_PCI_COMMON_INIT(),
1075 };
1076
1077 enum {
1078         SNBEP_PCI_UNCORE_HA,
1079         SNBEP_PCI_UNCORE_IMC,
1080         SNBEP_PCI_UNCORE_QPI,
1081         SNBEP_PCI_UNCORE_R2PCIE,
1082         SNBEP_PCI_UNCORE_R3QPI,
1083 };
1084
1085 static struct intel_uncore_type *snbep_pci_uncores[] = {
1086         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1087         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1088         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1089         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1090         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1091         NULL,
1092 };
1093
1094 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1095         { /* Home Agent */
1096                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1097                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1098         },
1099         { /* MC Channel 0 */
1100                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1101                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1102         },
1103         { /* MC Channel 1 */
1104                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1105                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1106         },
1107         { /* MC Channel 2 */
1108                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1109                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1110         },
1111         { /* MC Channel 3 */
1112                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1113                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1114         },
1115         { /* QPI Port 0 */
1116                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1117                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1118         },
1119         { /* QPI Port 1 */
1120                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1121                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1122         },
1123         { /* R2PCIe */
1124                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1125                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1126         },
1127         { /* R3QPI Link 0 */
1128                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1129                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1130         },
1131         { /* R3QPI Link 1 */
1132                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1133                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1134         },
1135         { /* QPI Port 0 filter  */
1136                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1137                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1138                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1139         },
1140         { /* QPI Port 0 filter  */
1141                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1142                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1143                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1144         },
1145         { /* end: all zeroes */ }
1146 };
1147
1148 static struct pci_driver snbep_uncore_pci_driver = {
1149         .name           = "snbep_uncore",
1150         .id_table       = snbep_uncore_pci_ids,
1151 };
1152
1153 /*
1154  * build pci bus to socket mapping
1155  */
1156 static int snbep_pci2phy_map_init(int devid)
1157 {
1158         struct pci_dev *ubox_dev = NULL;
1159         int i, bus, nodeid, segment;
1160         struct pci2phy_map *map;
1161         int err = 0;
1162         u32 config = 0;
1163
1164         while (1) {
1165                 /* find the UBOX device */
1166                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1167                 if (!ubox_dev)
1168                         break;
1169                 bus = ubox_dev->bus->number;
1170                 /* get the Node ID of the local register */
1171                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1172                 if (err)
1173                         break;
1174                 nodeid = config;
1175                 /* get the Node ID mapping */
1176                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1177                 if (err)
1178                         break;
1179
1180                 segment = pci_domain_nr(ubox_dev->bus);
1181                 raw_spin_lock(&pci2phy_map_lock);
1182                 map = __find_pci2phy_map(segment);
1183                 if (!map) {
1184                         raw_spin_unlock(&pci2phy_map_lock);
1185                         err = -ENOMEM;
1186                         break;
1187                 }
1188
1189                 /*
1190                  * every three bits in the Node ID mapping register maps
1191                  * to a particular node.
1192                  */
1193                 for (i = 0; i < 8; i++) {
1194                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1195                                 map->pbus_to_physid[bus] = i;
1196                                 break;
1197                         }
1198                 }
1199                 raw_spin_unlock(&pci2phy_map_lock);
1200         }
1201
1202         if (!err) {
1203                 /*
1204                  * For PCI bus with no UBOX device, find the next bus
1205                  * that has UBOX device and use its mapping.
1206                  */
1207                 raw_spin_lock(&pci2phy_map_lock);
1208                 list_for_each_entry(map, &pci2phy_map_head, list) {
1209                         i = -1;
1210                         for (bus = 255; bus >= 0; bus--) {
1211                                 if (map->pbus_to_physid[bus] >= 0)
1212                                         i = map->pbus_to_physid[bus];
1213                                 else
1214                                         map->pbus_to_physid[bus] = i;
1215                         }
1216                 }
1217                 raw_spin_unlock(&pci2phy_map_lock);
1218         }
1219
1220         pci_dev_put(ubox_dev);
1221
1222         return err ? pcibios_err_to_errno(err) : 0;
1223 }
1224
1225 int snbep_uncore_pci_init(void)
1226 {
1227         int ret = snbep_pci2phy_map_init(0x3ce0);
1228         if (ret)
1229                 return ret;
1230         uncore_pci_uncores = snbep_pci_uncores;
1231         uncore_pci_driver = &snbep_uncore_pci_driver;
1232         return 0;
1233 }
1234 /* end of Sandy Bridge-EP uncore support */
1235
1236 /* IvyTown uncore support */
1237 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1238 {
1239         unsigned msr = uncore_msr_box_ctl(box);
1240         if (msr)
1241                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1242 }
1243
1244 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1245 {
1246         struct pci_dev *pdev = box->pci_dev;
1247
1248         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1249 }
1250
1251 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1252         .init_box       = ivbep_uncore_msr_init_box,            \
1253         .disable_box    = snbep_uncore_msr_disable_box,         \
1254         .enable_box     = snbep_uncore_msr_enable_box,          \
1255         .disable_event  = snbep_uncore_msr_disable_event,       \
1256         .enable_event   = snbep_uncore_msr_enable_event,        \
1257         .read_counter   = uncore_msr_read_counter
1258
1259 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1260         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1261 };
1262
1263 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1264         .init_box       = ivbep_uncore_pci_init_box,
1265         .disable_box    = snbep_uncore_pci_disable_box,
1266         .enable_box     = snbep_uncore_pci_enable_box,
1267         .disable_event  = snbep_uncore_pci_disable_event,
1268         .enable_event   = snbep_uncore_pci_enable_event,
1269         .read_counter   = snbep_uncore_pci_read_counter,
1270 };
1271
1272 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1273         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1274         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1275         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1276         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1277         .ops            = &ivbep_uncore_pci_ops,                        \
1278         .format_group   = &ivbep_uncore_format_group
1279
1280 static struct attribute *ivbep_uncore_formats_attr[] = {
1281         &format_attr_event.attr,
1282         &format_attr_umask.attr,
1283         &format_attr_edge.attr,
1284         &format_attr_inv.attr,
1285         &format_attr_thresh8.attr,
1286         NULL,
1287 };
1288
1289 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1290         &format_attr_event.attr,
1291         &format_attr_umask.attr,
1292         &format_attr_edge.attr,
1293         &format_attr_inv.attr,
1294         &format_attr_thresh5.attr,
1295         NULL,
1296 };
1297
1298 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1299         &format_attr_event.attr,
1300         &format_attr_umask.attr,
1301         &format_attr_edge.attr,
1302         &format_attr_tid_en.attr,
1303         &format_attr_thresh8.attr,
1304         &format_attr_filter_tid.attr,
1305         &format_attr_filter_link.attr,
1306         &format_attr_filter_state2.attr,
1307         &format_attr_filter_nid2.attr,
1308         &format_attr_filter_opc2.attr,
1309         &format_attr_filter_nc.attr,
1310         &format_attr_filter_c6.attr,
1311         &format_attr_filter_isoc.attr,
1312         NULL,
1313 };
1314
1315 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1316         &format_attr_event.attr,
1317         &format_attr_occ_sel.attr,
1318         &format_attr_edge.attr,
1319         &format_attr_thresh5.attr,
1320         &format_attr_occ_invert.attr,
1321         &format_attr_occ_edge.attr,
1322         &format_attr_filter_band0.attr,
1323         &format_attr_filter_band1.attr,
1324         &format_attr_filter_band2.attr,
1325         &format_attr_filter_band3.attr,
1326         NULL,
1327 };
1328
1329 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1330         &format_attr_event_ext.attr,
1331         &format_attr_umask.attr,
1332         &format_attr_edge.attr,
1333         &format_attr_thresh8.attr,
1334         &format_attr_match_rds.attr,
1335         &format_attr_match_rnid30.attr,
1336         &format_attr_match_rnid4.attr,
1337         &format_attr_match_dnid.attr,
1338         &format_attr_match_mc.attr,
1339         &format_attr_match_opc.attr,
1340         &format_attr_match_vnw.attr,
1341         &format_attr_match0.attr,
1342         &format_attr_match1.attr,
1343         &format_attr_mask_rds.attr,
1344         &format_attr_mask_rnid30.attr,
1345         &format_attr_mask_rnid4.attr,
1346         &format_attr_mask_dnid.attr,
1347         &format_attr_mask_mc.attr,
1348         &format_attr_mask_opc.attr,
1349         &format_attr_mask_vnw.attr,
1350         &format_attr_mask0.attr,
1351         &format_attr_mask1.attr,
1352         NULL,
1353 };
1354
1355 static struct attribute_group ivbep_uncore_format_group = {
1356         .name = "format",
1357         .attrs = ivbep_uncore_formats_attr,
1358 };
1359
1360 static struct attribute_group ivbep_uncore_ubox_format_group = {
1361         .name = "format",
1362         .attrs = ivbep_uncore_ubox_formats_attr,
1363 };
1364
1365 static struct attribute_group ivbep_uncore_cbox_format_group = {
1366         .name = "format",
1367         .attrs = ivbep_uncore_cbox_formats_attr,
1368 };
1369
1370 static struct attribute_group ivbep_uncore_pcu_format_group = {
1371         .name = "format",
1372         .attrs = ivbep_uncore_pcu_formats_attr,
1373 };
1374
1375 static struct attribute_group ivbep_uncore_qpi_format_group = {
1376         .name = "format",
1377         .attrs = ivbep_uncore_qpi_formats_attr,
1378 };
1379
1380 static struct intel_uncore_type ivbep_uncore_ubox = {
1381         .name           = "ubox",
1382         .num_counters   = 2,
1383         .num_boxes      = 1,
1384         .perf_ctr_bits  = 44,
1385         .fixed_ctr_bits = 48,
1386         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1387         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1388         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1389         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1390         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1391         .ops            = &ivbep_uncore_msr_ops,
1392         .format_group   = &ivbep_uncore_ubox_format_group,
1393 };
1394
1395 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1396         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1397                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1398         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1399         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1400         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1401         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1402         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1403         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1404         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1405         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1406         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1407         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1408         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1409         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1410         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1411         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1412         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1413         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1414         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1415         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1416         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1417         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1418         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1419         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1420         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1421         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1422         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1423         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1424         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1425         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1426         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1427         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1428         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1429         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1430         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1431         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1432         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1433         EVENT_EXTRA_END
1434 };
1435
1436 static u64 ivbep_cbox_filter_mask(int fields)
1437 {
1438         u64 mask = 0;
1439
1440         if (fields & 0x1)
1441                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1442         if (fields & 0x2)
1443                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1444         if (fields & 0x4)
1445                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1446         if (fields & 0x8)
1447                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1448         if (fields & 0x10) {
1449                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1450                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1451                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1452                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1453         }
1454
1455         return mask;
1456 }
1457
1458 static struct event_constraint *
1459 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1460 {
1461         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1462 }
1463
1464 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1465 {
1466         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1467         struct extra_reg *er;
1468         int idx = 0;
1469
1470         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1471                 if (er->event != (event->hw.config & er->config_mask))
1472                         continue;
1473                 idx |= er->idx;
1474         }
1475
1476         if (idx) {
1477                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1478                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1479                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1480                 reg1->idx = idx;
1481         }
1482         return 0;
1483 }
1484
1485 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1486 {
1487         struct hw_perf_event *hwc = &event->hw;
1488         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1489
1490         if (reg1->idx != EXTRA_REG_NONE) {
1491                 u64 filter = uncore_shared_reg_config(box, 0);
1492                 wrmsrl(reg1->reg, filter & 0xffffffff);
1493                 wrmsrl(reg1->reg + 6, filter >> 32);
1494         }
1495
1496         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1497 }
1498
1499 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1500         .init_box               = ivbep_uncore_msr_init_box,
1501         .disable_box            = snbep_uncore_msr_disable_box,
1502         .enable_box             = snbep_uncore_msr_enable_box,
1503         .disable_event          = snbep_uncore_msr_disable_event,
1504         .enable_event           = ivbep_cbox_enable_event,
1505         .read_counter           = uncore_msr_read_counter,
1506         .hw_config              = ivbep_cbox_hw_config,
1507         .get_constraint         = ivbep_cbox_get_constraint,
1508         .put_constraint         = snbep_cbox_put_constraint,
1509 };
1510
1511 static struct intel_uncore_type ivbep_uncore_cbox = {
1512         .name                   = "cbox",
1513         .num_counters           = 4,
1514         .num_boxes              = 15,
1515         .perf_ctr_bits          = 44,
1516         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1517         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1518         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1519         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1520         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1521         .num_shared_regs        = 1,
1522         .constraints            = snbep_uncore_cbox_constraints,
1523         .ops                    = &ivbep_uncore_cbox_ops,
1524         .format_group           = &ivbep_uncore_cbox_format_group,
1525 };
1526
1527 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1528         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1529         .hw_config              = snbep_pcu_hw_config,
1530         .get_constraint         = snbep_pcu_get_constraint,
1531         .put_constraint         = snbep_pcu_put_constraint,
1532 };
1533
1534 static struct intel_uncore_type ivbep_uncore_pcu = {
1535         .name                   = "pcu",
1536         .num_counters           = 4,
1537         .num_boxes              = 1,
1538         .perf_ctr_bits          = 48,
1539         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1540         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1541         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1542         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1543         .num_shared_regs        = 1,
1544         .ops                    = &ivbep_uncore_pcu_ops,
1545         .format_group           = &ivbep_uncore_pcu_format_group,
1546 };
1547
1548 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1549         &ivbep_uncore_ubox,
1550         &ivbep_uncore_cbox,
1551         &ivbep_uncore_pcu,
1552         NULL,
1553 };
1554
1555 void ivbep_uncore_cpu_init(void)
1556 {
1557         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1558                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1559         uncore_msr_uncores = ivbep_msr_uncores;
1560 }
1561
1562 static struct intel_uncore_type ivbep_uncore_ha = {
1563         .name           = "ha",
1564         .num_counters   = 4,
1565         .num_boxes      = 2,
1566         .perf_ctr_bits  = 48,
1567         IVBEP_UNCORE_PCI_COMMON_INIT(),
1568 };
1569
1570 static struct intel_uncore_type ivbep_uncore_imc = {
1571         .name           = "imc",
1572         .num_counters   = 4,
1573         .num_boxes      = 8,
1574         .perf_ctr_bits  = 48,
1575         .fixed_ctr_bits = 48,
1576         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1577         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1578         .event_descs    = snbep_uncore_imc_events,
1579         IVBEP_UNCORE_PCI_COMMON_INIT(),
1580 };
1581
1582 /* registers in IRP boxes are not properly aligned */
1583 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1584 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1585
1586 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1587 {
1588         struct pci_dev *pdev = box->pci_dev;
1589         struct hw_perf_event *hwc = &event->hw;
1590
1591         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1592                                hwc->config | SNBEP_PMON_CTL_EN);
1593 }
1594
1595 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1596 {
1597         struct pci_dev *pdev = box->pci_dev;
1598         struct hw_perf_event *hwc = &event->hw;
1599
1600         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1601 }
1602
1603 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1604 {
1605         struct pci_dev *pdev = box->pci_dev;
1606         struct hw_perf_event *hwc = &event->hw;
1607         u64 count = 0;
1608
1609         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1610         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1611
1612         return count;
1613 }
1614
1615 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1616         .init_box       = ivbep_uncore_pci_init_box,
1617         .disable_box    = snbep_uncore_pci_disable_box,
1618         .enable_box     = snbep_uncore_pci_enable_box,
1619         .disable_event  = ivbep_uncore_irp_disable_event,
1620         .enable_event   = ivbep_uncore_irp_enable_event,
1621         .read_counter   = ivbep_uncore_irp_read_counter,
1622 };
1623
1624 static struct intel_uncore_type ivbep_uncore_irp = {
1625         .name                   = "irp",
1626         .num_counters           = 4,
1627         .num_boxes              = 1,
1628         .perf_ctr_bits          = 48,
1629         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1630         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1631         .ops                    = &ivbep_uncore_irp_ops,
1632         .format_group           = &ivbep_uncore_format_group,
1633 };
1634
1635 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1636         .init_box       = ivbep_uncore_pci_init_box,
1637         .disable_box    = snbep_uncore_pci_disable_box,
1638         .enable_box     = snbep_uncore_pci_enable_box,
1639         .disable_event  = snbep_uncore_pci_disable_event,
1640         .enable_event   = snbep_qpi_enable_event,
1641         .read_counter   = snbep_uncore_pci_read_counter,
1642         .hw_config      = snbep_qpi_hw_config,
1643         .get_constraint = uncore_get_constraint,
1644         .put_constraint = uncore_put_constraint,
1645 };
1646
1647 static struct intel_uncore_type ivbep_uncore_qpi = {
1648         .name                   = "qpi",
1649         .num_counters           = 4,
1650         .num_boxes              = 3,
1651         .perf_ctr_bits          = 48,
1652         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1653         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1654         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1655         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1656         .num_shared_regs        = 1,
1657         .ops                    = &ivbep_uncore_qpi_ops,
1658         .format_group           = &ivbep_uncore_qpi_format_group,
1659 };
1660
1661 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1662         .name           = "r2pcie",
1663         .num_counters   = 4,
1664         .num_boxes      = 1,
1665         .perf_ctr_bits  = 44,
1666         .constraints    = snbep_uncore_r2pcie_constraints,
1667         IVBEP_UNCORE_PCI_COMMON_INIT(),
1668 };
1669
1670 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1671         .name           = "r3qpi",
1672         .num_counters   = 3,
1673         .num_boxes      = 2,
1674         .perf_ctr_bits  = 44,
1675         .constraints    = snbep_uncore_r3qpi_constraints,
1676         IVBEP_UNCORE_PCI_COMMON_INIT(),
1677 };
1678
1679 enum {
1680         IVBEP_PCI_UNCORE_HA,
1681         IVBEP_PCI_UNCORE_IMC,
1682         IVBEP_PCI_UNCORE_IRP,
1683         IVBEP_PCI_UNCORE_QPI,
1684         IVBEP_PCI_UNCORE_R2PCIE,
1685         IVBEP_PCI_UNCORE_R3QPI,
1686 };
1687
1688 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1689         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1690         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1691         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1692         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1693         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1694         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1695         NULL,
1696 };
1697
1698 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1699         { /* Home Agent 0 */
1700                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1701                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1702         },
1703         { /* Home Agent 1 */
1704                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1705                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1706         },
1707         { /* MC0 Channel 0 */
1708                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1709                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1710         },
1711         { /* MC0 Channel 1 */
1712                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1713                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1714         },
1715         { /* MC0 Channel 3 */
1716                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1717                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1718         },
1719         { /* MC0 Channel 4 */
1720                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1721                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1722         },
1723         { /* MC1 Channel 0 */
1724                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1725                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1726         },
1727         { /* MC1 Channel 1 */
1728                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1729                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1730         },
1731         { /* MC1 Channel 3 */
1732                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1733                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1734         },
1735         { /* MC1 Channel 4 */
1736                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1737                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1738         },
1739         { /* IRP */
1740                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1741                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1742         },
1743         { /* QPI0 Port 0 */
1744                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1745                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1746         },
1747         { /* QPI0 Port 1 */
1748                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1749                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1750         },
1751         { /* QPI1 Port 2 */
1752                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1753                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1754         },
1755         { /* R2PCIe */
1756                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1757                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1758         },
1759         { /* R3QPI0 Link 0 */
1760                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1761                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1762         },
1763         { /* R3QPI0 Link 1 */
1764                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1765                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1766         },
1767         { /* R3QPI1 Link 2 */
1768                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1769                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1770         },
1771         { /* QPI Port 0 filter  */
1772                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1773                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1774                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1775         },
1776         { /* QPI Port 0 filter  */
1777                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1778                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1779                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1780         },
1781         { /* end: all zeroes */ }
1782 };
1783
1784 static struct pci_driver ivbep_uncore_pci_driver = {
1785         .name           = "ivbep_uncore",
1786         .id_table       = ivbep_uncore_pci_ids,
1787 };
1788
1789 int ivbep_uncore_pci_init(void)
1790 {
1791         int ret = snbep_pci2phy_map_init(0x0e1e);
1792         if (ret)
1793                 return ret;
1794         uncore_pci_uncores = ivbep_pci_uncores;
1795         uncore_pci_driver = &ivbep_uncore_pci_driver;
1796         return 0;
1797 }
1798 /* end of IvyTown uncore support */
1799
1800 /* KNL uncore support */
1801 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1802         &format_attr_event.attr,
1803         &format_attr_umask.attr,
1804         &format_attr_edge.attr,
1805         &format_attr_tid_en.attr,
1806         &format_attr_inv.attr,
1807         &format_attr_thresh5.attr,
1808         NULL,
1809 };
1810
1811 static struct attribute_group knl_uncore_ubox_format_group = {
1812         .name = "format",
1813         .attrs = knl_uncore_ubox_formats_attr,
1814 };
1815
1816 static struct intel_uncore_type knl_uncore_ubox = {
1817         .name                   = "ubox",
1818         .num_counters           = 2,
1819         .num_boxes              = 1,
1820         .perf_ctr_bits          = 48,
1821         .fixed_ctr_bits         = 48,
1822         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1823         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1824         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1825         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1826         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1827         .ops                    = &snbep_uncore_msr_ops,
1828         .format_group           = &knl_uncore_ubox_format_group,
1829 };
1830
1831 static struct attribute *knl_uncore_cha_formats_attr[] = {
1832         &format_attr_event.attr,
1833         &format_attr_umask.attr,
1834         &format_attr_qor.attr,
1835         &format_attr_edge.attr,
1836         &format_attr_tid_en.attr,
1837         &format_attr_inv.attr,
1838         &format_attr_thresh8.attr,
1839         &format_attr_filter_tid4.attr,
1840         &format_attr_filter_link3.attr,
1841         &format_attr_filter_state4.attr,
1842         &format_attr_filter_local.attr,
1843         &format_attr_filter_all_op.attr,
1844         &format_attr_filter_nnm.attr,
1845         &format_attr_filter_opc3.attr,
1846         &format_attr_filter_nc.attr,
1847         &format_attr_filter_isoc.attr,
1848         NULL,
1849 };
1850
1851 static struct attribute_group knl_uncore_cha_format_group = {
1852         .name = "format",
1853         .attrs = knl_uncore_cha_formats_attr,
1854 };
1855
1856 static struct event_constraint knl_uncore_cha_constraints[] = {
1857         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1858         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1859         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1860         EVENT_CONSTRAINT_END
1861 };
1862
1863 static struct extra_reg knl_uncore_cha_extra_regs[] = {
1864         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1865                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1866         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1867         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1868         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1869         EVENT_EXTRA_END
1870 };
1871
1872 static u64 knl_cha_filter_mask(int fields)
1873 {
1874         u64 mask = 0;
1875
1876         if (fields & 0x1)
1877                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1878         if (fields & 0x2)
1879                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1880         if (fields & 0x4)
1881                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1882         return mask;
1883 }
1884
1885 static struct event_constraint *
1886 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1887 {
1888         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1889 }
1890
1891 static int knl_cha_hw_config(struct intel_uncore_box *box,
1892                              struct perf_event *event)
1893 {
1894         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1895         struct extra_reg *er;
1896         int idx = 0;
1897
1898         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1899                 if (er->event != (event->hw.config & er->config_mask))
1900                         continue;
1901                 idx |= er->idx;
1902         }
1903
1904         if (idx) {
1905                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1906                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1907                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1908
1909                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1910                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1911                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
1912                 reg1->idx = idx;
1913         }
1914         return 0;
1915 }
1916
1917 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1918                                     struct perf_event *event);
1919
1920 static struct intel_uncore_ops knl_uncore_cha_ops = {
1921         .init_box               = snbep_uncore_msr_init_box,
1922         .disable_box            = snbep_uncore_msr_disable_box,
1923         .enable_box             = snbep_uncore_msr_enable_box,
1924         .disable_event          = snbep_uncore_msr_disable_event,
1925         .enable_event           = hswep_cbox_enable_event,
1926         .read_counter           = uncore_msr_read_counter,
1927         .hw_config              = knl_cha_hw_config,
1928         .get_constraint         = knl_cha_get_constraint,
1929         .put_constraint         = snbep_cbox_put_constraint,
1930 };
1931
1932 static struct intel_uncore_type knl_uncore_cha = {
1933         .name                   = "cha",
1934         .num_counters           = 4,
1935         .num_boxes              = 38,
1936         .perf_ctr_bits          = 48,
1937         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
1938         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
1939         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
1940         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
1941         .msr_offset             = KNL_CHA_MSR_OFFSET,
1942         .num_shared_regs        = 1,
1943         .constraints            = knl_uncore_cha_constraints,
1944         .ops                    = &knl_uncore_cha_ops,
1945         .format_group           = &knl_uncore_cha_format_group,
1946 };
1947
1948 static struct attribute *knl_uncore_pcu_formats_attr[] = {
1949         &format_attr_event2.attr,
1950         &format_attr_use_occ_ctr.attr,
1951         &format_attr_occ_sel.attr,
1952         &format_attr_edge.attr,
1953         &format_attr_tid_en.attr,
1954         &format_attr_inv.attr,
1955         &format_attr_thresh6.attr,
1956         &format_attr_occ_invert.attr,
1957         &format_attr_occ_edge_det.attr,
1958         NULL,
1959 };
1960
1961 static struct attribute_group knl_uncore_pcu_format_group = {
1962         .name = "format",
1963         .attrs = knl_uncore_pcu_formats_attr,
1964 };
1965
1966 static struct intel_uncore_type knl_uncore_pcu = {
1967         .name                   = "pcu",
1968         .num_counters           = 4,
1969         .num_boxes              = 1,
1970         .perf_ctr_bits          = 48,
1971         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
1972         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
1973         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
1974         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
1975         .ops                    = &snbep_uncore_msr_ops,
1976         .format_group           = &knl_uncore_pcu_format_group,
1977 };
1978
1979 static struct intel_uncore_type *knl_msr_uncores[] = {
1980         &knl_uncore_ubox,
1981         &knl_uncore_cha,
1982         &knl_uncore_pcu,
1983         NULL,
1984 };
1985
1986 void knl_uncore_cpu_init(void)
1987 {
1988         uncore_msr_uncores = knl_msr_uncores;
1989 }
1990
1991 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
1992 {
1993         struct pci_dev *pdev = box->pci_dev;
1994         int box_ctl = uncore_pci_box_ctl(box);
1995
1996         pci_write_config_dword(pdev, box_ctl, 0);
1997 }
1998
1999 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2000                                         struct perf_event *event)
2001 {
2002         struct pci_dev *pdev = box->pci_dev;
2003         struct hw_perf_event *hwc = &event->hw;
2004
2005         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2006                                                         == UNCORE_FIXED_EVENT)
2007                 pci_write_config_dword(pdev, hwc->config_base,
2008                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2009         else
2010                 pci_write_config_dword(pdev, hwc->config_base,
2011                                        hwc->config | SNBEP_PMON_CTL_EN);
2012 }
2013
2014 static struct intel_uncore_ops knl_uncore_imc_ops = {
2015         .init_box       = snbep_uncore_pci_init_box,
2016         .disable_box    = snbep_uncore_pci_disable_box,
2017         .enable_box     = knl_uncore_imc_enable_box,
2018         .read_counter   = snbep_uncore_pci_read_counter,
2019         .enable_event   = knl_uncore_imc_enable_event,
2020         .disable_event  = snbep_uncore_pci_disable_event,
2021 };
2022
2023 static struct intel_uncore_type knl_uncore_imc_uclk = {
2024         .name                   = "imc_uclk",
2025         .num_counters           = 4,
2026         .num_boxes              = 2,
2027         .perf_ctr_bits          = 48,
2028         .fixed_ctr_bits         = 48,
2029         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2030         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2031         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2032         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2033         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2034         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2035         .ops                    = &knl_uncore_imc_ops,
2036         .format_group           = &snbep_uncore_format_group,
2037 };
2038
2039 static struct intel_uncore_type knl_uncore_imc_dclk = {
2040         .name                   = "imc",
2041         .num_counters           = 4,
2042         .num_boxes              = 6,
2043         .perf_ctr_bits          = 48,
2044         .fixed_ctr_bits         = 48,
2045         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2046         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2047         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2048         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2049         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2050         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2051         .ops                    = &knl_uncore_imc_ops,
2052         .format_group           = &snbep_uncore_format_group,
2053 };
2054
2055 static struct intel_uncore_type knl_uncore_edc_uclk = {
2056         .name                   = "edc_uclk",
2057         .num_counters           = 4,
2058         .num_boxes              = 8,
2059         .perf_ctr_bits          = 48,
2060         .fixed_ctr_bits         = 48,
2061         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2062         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2063         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2064         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2065         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2066         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2067         .ops                    = &knl_uncore_imc_ops,
2068         .format_group           = &snbep_uncore_format_group,
2069 };
2070
2071 static struct intel_uncore_type knl_uncore_edc_eclk = {
2072         .name                   = "edc_eclk",
2073         .num_counters           = 4,
2074         .num_boxes              = 8,
2075         .perf_ctr_bits          = 48,
2076         .fixed_ctr_bits         = 48,
2077         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2078         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2079         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2080         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2081         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2082         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2083         .ops                    = &knl_uncore_imc_ops,
2084         .format_group           = &snbep_uncore_format_group,
2085 };
2086
2087 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2088         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2089         EVENT_CONSTRAINT_END
2090 };
2091
2092 static struct intel_uncore_type knl_uncore_m2pcie = {
2093         .name           = "m2pcie",
2094         .num_counters   = 4,
2095         .num_boxes      = 1,
2096         .perf_ctr_bits  = 48,
2097         .constraints    = knl_uncore_m2pcie_constraints,
2098         SNBEP_UNCORE_PCI_COMMON_INIT(),
2099 };
2100
2101 static struct attribute *knl_uncore_irp_formats_attr[] = {
2102         &format_attr_event.attr,
2103         &format_attr_umask.attr,
2104         &format_attr_qor.attr,
2105         &format_attr_edge.attr,
2106         &format_attr_inv.attr,
2107         &format_attr_thresh8.attr,
2108         NULL,
2109 };
2110
2111 static struct attribute_group knl_uncore_irp_format_group = {
2112         .name = "format",
2113         .attrs = knl_uncore_irp_formats_attr,
2114 };
2115
2116 static struct intel_uncore_type knl_uncore_irp = {
2117         .name                   = "irp",
2118         .num_counters           = 2,
2119         .num_boxes              = 1,
2120         .perf_ctr_bits          = 48,
2121         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2122         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2123         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2124         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2125         .ops                    = &snbep_uncore_pci_ops,
2126         .format_group           = &knl_uncore_irp_format_group,
2127 };
2128
2129 enum {
2130         KNL_PCI_UNCORE_MC_UCLK,
2131         KNL_PCI_UNCORE_MC_DCLK,
2132         KNL_PCI_UNCORE_EDC_UCLK,
2133         KNL_PCI_UNCORE_EDC_ECLK,
2134         KNL_PCI_UNCORE_M2PCIE,
2135         KNL_PCI_UNCORE_IRP,
2136 };
2137
2138 static struct intel_uncore_type *knl_pci_uncores[] = {
2139         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2140         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2141         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2142         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2143         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2144         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2145         NULL,
2146 };
2147
2148 /*
2149  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2150  * device type. prior to KNL, each instance of a PMU device type had a unique
2151  * device ID.
2152  *
2153  *      PCI Device ID   Uncore PMU Devices
2154  *      ----------------------------------
2155  *      0x7841          MC0 UClk, MC1 UClk
2156  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2157  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2158  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2159  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2160  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2161  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2162  *      0x7817          M2PCIe
2163  *      0x7814          IRP
2164 */
2165
2166 static const struct pci_device_id knl_uncore_pci_ids[] = {
2167         { /* MC0 UClk */
2168                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2169                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2170         },
2171         { /* MC1 UClk */
2172                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2173                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2174         },
2175         { /* MC0 DClk CH 0 */
2176                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2177                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2178         },
2179         { /* MC0 DClk CH 1 */
2180                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2181                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2182         },
2183         { /* MC0 DClk CH 2 */
2184                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2185                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2186         },
2187         { /* MC1 DClk CH 0 */
2188                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2189                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2190         },
2191         { /* MC1 DClk CH 1 */
2192                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2193                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2194         },
2195         { /* MC1 DClk CH 2 */
2196                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2197                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2198         },
2199         { /* EDC0 UClk */
2200                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2201                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2202         },
2203         { /* EDC1 UClk */
2204                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2205                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2206         },
2207         { /* EDC2 UClk */
2208                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2209                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2210         },
2211         { /* EDC3 UClk */
2212                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2213                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2214         },
2215         { /* EDC4 UClk */
2216                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2217                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2218         },
2219         { /* EDC5 UClk */
2220                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2221                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2222         },
2223         { /* EDC6 UClk */
2224                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2225                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2226         },
2227         { /* EDC7 UClk */
2228                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2229                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2230         },
2231         { /* EDC0 EClk */
2232                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2233                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2234         },
2235         { /* EDC1 EClk */
2236                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2237                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2238         },
2239         { /* EDC2 EClk */
2240                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2241                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2242         },
2243         { /* EDC3 EClk */
2244                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2245                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2246         },
2247         { /* EDC4 EClk */
2248                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2249                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2250         },
2251         { /* EDC5 EClk */
2252                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2253                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2254         },
2255         { /* EDC6 EClk */
2256                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2257                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2258         },
2259         { /* EDC7 EClk */
2260                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2261                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2262         },
2263         { /* M2PCIe */
2264                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2265                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2266         },
2267         { /* IRP */
2268                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2269                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2270         },
2271         { /* end: all zeroes */ }
2272 };
2273
2274 static struct pci_driver knl_uncore_pci_driver = {
2275         .name           = "knl_uncore",
2276         .id_table       = knl_uncore_pci_ids,
2277 };
2278
2279 int knl_uncore_pci_init(void)
2280 {
2281         int ret;
2282
2283         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2284         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2285         if (ret)
2286                 return ret;
2287         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2288         if (ret)
2289                 return ret;
2290         uncore_pci_uncores = knl_pci_uncores;
2291         uncore_pci_driver = &knl_uncore_pci_driver;
2292         return 0;
2293 }
2294
2295 /* end of KNL uncore support */
2296
2297 /* Haswell-EP uncore support */
2298 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2299         &format_attr_event.attr,
2300         &format_attr_umask.attr,
2301         &format_attr_edge.attr,
2302         &format_attr_inv.attr,
2303         &format_attr_thresh5.attr,
2304         &format_attr_filter_tid2.attr,
2305         &format_attr_filter_cid.attr,
2306         NULL,
2307 };
2308
2309 static struct attribute_group hswep_uncore_ubox_format_group = {
2310         .name = "format",
2311         .attrs = hswep_uncore_ubox_formats_attr,
2312 };
2313
2314 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2315 {
2316         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2317         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2318         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2319         reg1->idx = 0;
2320         return 0;
2321 }
2322
2323 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2324         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2325         .hw_config              = hswep_ubox_hw_config,
2326         .get_constraint         = uncore_get_constraint,
2327         .put_constraint         = uncore_put_constraint,
2328 };
2329
2330 static struct intel_uncore_type hswep_uncore_ubox = {
2331         .name                   = "ubox",
2332         .num_counters           = 2,
2333         .num_boxes              = 1,
2334         .perf_ctr_bits          = 44,
2335         .fixed_ctr_bits         = 48,
2336         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2337         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2338         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2339         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2340         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2341         .num_shared_regs        = 1,
2342         .ops                    = &hswep_uncore_ubox_ops,
2343         .format_group           = &hswep_uncore_ubox_format_group,
2344 };
2345
2346 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2347         &format_attr_event.attr,
2348         &format_attr_umask.attr,
2349         &format_attr_edge.attr,
2350         &format_attr_tid_en.attr,
2351         &format_attr_thresh8.attr,
2352         &format_attr_filter_tid3.attr,
2353         &format_attr_filter_link2.attr,
2354         &format_attr_filter_state3.attr,
2355         &format_attr_filter_nid2.attr,
2356         &format_attr_filter_opc2.attr,
2357         &format_attr_filter_nc.attr,
2358         &format_attr_filter_c6.attr,
2359         &format_attr_filter_isoc.attr,
2360         NULL,
2361 };
2362
2363 static struct attribute_group hswep_uncore_cbox_format_group = {
2364         .name = "format",
2365         .attrs = hswep_uncore_cbox_formats_attr,
2366 };
2367
2368 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2369         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2370         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2371         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2372         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2373         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2374         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2375         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2376         EVENT_CONSTRAINT_END
2377 };
2378
2379 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2380         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2381                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2382         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2383         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2384         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2385         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2386         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2387         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2388         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2389         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2390         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2391         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2392         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2393         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2394         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2395         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2396         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2397         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2398         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2399         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2400         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2401         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2402         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2403         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2404         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2405         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2406         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2407         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2408         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2409         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2410         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2411         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2412         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2413         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2414         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2415         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2416         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2417         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2418         EVENT_EXTRA_END
2419 };
2420
2421 static u64 hswep_cbox_filter_mask(int fields)
2422 {
2423         u64 mask = 0;
2424         if (fields & 0x1)
2425                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2426         if (fields & 0x2)
2427                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2428         if (fields & 0x4)
2429                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2430         if (fields & 0x8)
2431                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2432         if (fields & 0x10) {
2433                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2434                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2435                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2436                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2437         }
2438         return mask;
2439 }
2440
2441 static struct event_constraint *
2442 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2443 {
2444         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2445 }
2446
2447 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2448 {
2449         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2450         struct extra_reg *er;
2451         int idx = 0;
2452
2453         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2454                 if (er->event != (event->hw.config & er->config_mask))
2455                         continue;
2456                 idx |= er->idx;
2457         }
2458
2459         if (idx) {
2460                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2461                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2462                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2463                 reg1->idx = idx;
2464         }
2465         return 0;
2466 }
2467
2468 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2469                                   struct perf_event *event)
2470 {
2471         struct hw_perf_event *hwc = &event->hw;
2472         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2473
2474         if (reg1->idx != EXTRA_REG_NONE) {
2475                 u64 filter = uncore_shared_reg_config(box, 0);
2476                 wrmsrl(reg1->reg, filter & 0xffffffff);
2477                 wrmsrl(reg1->reg + 1, filter >> 32);
2478         }
2479
2480         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2481 }
2482
2483 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2484         .init_box               = snbep_uncore_msr_init_box,
2485         .disable_box            = snbep_uncore_msr_disable_box,
2486         .enable_box             = snbep_uncore_msr_enable_box,
2487         .disable_event          = snbep_uncore_msr_disable_event,
2488         .enable_event           = hswep_cbox_enable_event,
2489         .read_counter           = uncore_msr_read_counter,
2490         .hw_config              = hswep_cbox_hw_config,
2491         .get_constraint         = hswep_cbox_get_constraint,
2492         .put_constraint         = snbep_cbox_put_constraint,
2493 };
2494
2495 static struct intel_uncore_type hswep_uncore_cbox = {
2496         .name                   = "cbox",
2497         .num_counters           = 4,
2498         .num_boxes              = 18,
2499         .perf_ctr_bits          = 48,
2500         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2501         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2502         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2503         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2504         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2505         .num_shared_regs        = 1,
2506         .constraints            = hswep_uncore_cbox_constraints,
2507         .ops                    = &hswep_uncore_cbox_ops,
2508         .format_group           = &hswep_uncore_cbox_format_group,
2509 };
2510
2511 /*
2512  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2513  */
2514 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2515 {
2516         unsigned msr = uncore_msr_box_ctl(box);
2517
2518         if (msr) {
2519                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2520                 u64 flags = 0;
2521                 int i;
2522
2523                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2524                         flags |= (1ULL << i);
2525                         wrmsrl(msr, flags);
2526                 }
2527         }
2528 }
2529
2530 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2531         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2532         .init_box               = hswep_uncore_sbox_msr_init_box
2533 };
2534
2535 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2536         &format_attr_event.attr,
2537         &format_attr_umask.attr,
2538         &format_attr_edge.attr,
2539         &format_attr_tid_en.attr,
2540         &format_attr_inv.attr,
2541         &format_attr_thresh8.attr,
2542         NULL,
2543 };
2544
2545 static struct attribute_group hswep_uncore_sbox_format_group = {
2546         .name = "format",
2547         .attrs = hswep_uncore_sbox_formats_attr,
2548 };
2549
2550 static struct intel_uncore_type hswep_uncore_sbox = {
2551         .name                   = "sbox",
2552         .num_counters           = 4,
2553         .num_boxes              = 4,
2554         .perf_ctr_bits          = 44,
2555         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2556         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2557         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2558         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2559         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2560         .ops                    = &hswep_uncore_sbox_msr_ops,
2561         .format_group           = &hswep_uncore_sbox_format_group,
2562 };
2563
2564 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2565 {
2566         struct hw_perf_event *hwc = &event->hw;
2567         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2568         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2569
2570         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2571                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2572                 reg1->idx = ev_sel - 0xb;
2573                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2574         }
2575         return 0;
2576 }
2577
2578 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2579         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2580         .hw_config              = hswep_pcu_hw_config,
2581         .get_constraint         = snbep_pcu_get_constraint,
2582         .put_constraint         = snbep_pcu_put_constraint,
2583 };
2584
2585 static struct intel_uncore_type hswep_uncore_pcu = {
2586         .name                   = "pcu",
2587         .num_counters           = 4,
2588         .num_boxes              = 1,
2589         .perf_ctr_bits          = 48,
2590         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2591         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2592         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2593         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2594         .num_shared_regs        = 1,
2595         .ops                    = &hswep_uncore_pcu_ops,
2596         .format_group           = &snbep_uncore_pcu_format_group,
2597 };
2598
2599 static struct intel_uncore_type *hswep_msr_uncores[] = {
2600         &hswep_uncore_ubox,
2601         &hswep_uncore_cbox,
2602         &hswep_uncore_sbox,
2603         &hswep_uncore_pcu,
2604         NULL,
2605 };
2606
2607 void hswep_uncore_cpu_init(void)
2608 {
2609         int pkg = topology_phys_to_logical_pkg(0);
2610
2611         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2612                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2613
2614         /* Detect 6-8 core systems with only two SBOXes */
2615         if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2616                 u32 capid4;
2617
2618                 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2619                                       0x94, &capid4);
2620                 if (((capid4 >> 6) & 0x3) == 0)
2621                         hswep_uncore_sbox.num_boxes = 2;
2622         }
2623
2624         uncore_msr_uncores = hswep_msr_uncores;
2625 }
2626
2627 static struct intel_uncore_type hswep_uncore_ha = {
2628         .name           = "ha",
2629         .num_counters   = 5,
2630         .num_boxes      = 2,
2631         .perf_ctr_bits  = 48,
2632         SNBEP_UNCORE_PCI_COMMON_INIT(),
2633 };
2634
2635 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2636         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2637         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2638         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2639         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2640         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2641         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2642         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2643         { /* end: all zeroes */ },
2644 };
2645
2646 static struct intel_uncore_type hswep_uncore_imc = {
2647         .name           = "imc",
2648         .num_counters   = 5,
2649         .num_boxes      = 8,
2650         .perf_ctr_bits  = 48,
2651         .fixed_ctr_bits = 48,
2652         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2653         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2654         .event_descs    = hswep_uncore_imc_events,
2655         SNBEP_UNCORE_PCI_COMMON_INIT(),
2656 };
2657
2658 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2659
2660 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2661 {
2662         struct pci_dev *pdev = box->pci_dev;
2663         struct hw_perf_event *hwc = &event->hw;
2664         u64 count = 0;
2665
2666         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2667         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2668
2669         return count;
2670 }
2671
2672 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2673         .init_box       = snbep_uncore_pci_init_box,
2674         .disable_box    = snbep_uncore_pci_disable_box,
2675         .enable_box     = snbep_uncore_pci_enable_box,
2676         .disable_event  = ivbep_uncore_irp_disable_event,
2677         .enable_event   = ivbep_uncore_irp_enable_event,
2678         .read_counter   = hswep_uncore_irp_read_counter,
2679 };
2680
2681 static struct intel_uncore_type hswep_uncore_irp = {
2682         .name                   = "irp",
2683         .num_counters           = 4,
2684         .num_boxes              = 1,
2685         .perf_ctr_bits          = 48,
2686         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2687         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2688         .ops                    = &hswep_uncore_irp_ops,
2689         .format_group           = &snbep_uncore_format_group,
2690 };
2691
2692 static struct intel_uncore_type hswep_uncore_qpi = {
2693         .name                   = "qpi",
2694         .num_counters           = 5,
2695         .num_boxes              = 3,
2696         .perf_ctr_bits          = 48,
2697         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2698         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2699         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2700         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2701         .num_shared_regs        = 1,
2702         .ops                    = &snbep_uncore_qpi_ops,
2703         .format_group           = &snbep_uncore_qpi_format_group,
2704 };
2705
2706 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2707         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2708         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2709         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2710         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2711         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2712         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2713         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2714         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2715         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2716         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2717         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2718         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2719         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2720         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2721         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2722         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2723         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2724         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2725         EVENT_CONSTRAINT_END
2726 };
2727
2728 static struct intel_uncore_type hswep_uncore_r2pcie = {
2729         .name           = "r2pcie",
2730         .num_counters   = 4,
2731         .num_boxes      = 1,
2732         .perf_ctr_bits  = 48,
2733         .constraints    = hswep_uncore_r2pcie_constraints,
2734         SNBEP_UNCORE_PCI_COMMON_INIT(),
2735 };
2736
2737 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2738         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2739         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2740         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2741         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2742         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2743         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2744         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2745         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2746         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2747         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2748         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2749         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2750         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2751         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2752         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2753         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2754         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2755         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2756         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2757         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2758         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2759         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2760         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2761         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2762         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2763         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2764         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2765         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2766         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2767         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2768         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2769         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2770         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2771         EVENT_CONSTRAINT_END
2772 };
2773
2774 static struct intel_uncore_type hswep_uncore_r3qpi = {
2775         .name           = "r3qpi",
2776         .num_counters   = 4,
2777         .num_boxes      = 3,
2778         .perf_ctr_bits  = 44,
2779         .constraints    = hswep_uncore_r3qpi_constraints,
2780         SNBEP_UNCORE_PCI_COMMON_INIT(),
2781 };
2782
2783 enum {
2784         HSWEP_PCI_UNCORE_HA,
2785         HSWEP_PCI_UNCORE_IMC,
2786         HSWEP_PCI_UNCORE_IRP,
2787         HSWEP_PCI_UNCORE_QPI,
2788         HSWEP_PCI_UNCORE_R2PCIE,
2789         HSWEP_PCI_UNCORE_R3QPI,
2790 };
2791
2792 static struct intel_uncore_type *hswep_pci_uncores[] = {
2793         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2794         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2795         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2796         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2797         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2798         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2799         NULL,
2800 };
2801
2802 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2803         { /* Home Agent 0 */
2804                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2805                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2806         },
2807         { /* Home Agent 1 */
2808                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2809                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2810         },
2811         { /* MC0 Channel 0 */
2812                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2813                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2814         },
2815         { /* MC0 Channel 1 */
2816                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2817                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2818         },
2819         { /* MC0 Channel 2 */
2820                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2821                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2822         },
2823         { /* MC0 Channel 3 */
2824                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2825                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2826         },
2827         { /* MC1 Channel 0 */
2828                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2829                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2830         },
2831         { /* MC1 Channel 1 */
2832                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2833                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2834         },
2835         { /* MC1 Channel 2 */
2836                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2837                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2838         },
2839         { /* MC1 Channel 3 */
2840                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2841                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2842         },
2843         { /* IRP */
2844                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2845                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2846         },
2847         { /* QPI0 Port 0 */
2848                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2849                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2850         },
2851         { /* QPI0 Port 1 */
2852                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2853                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2854         },
2855         { /* QPI1 Port 2 */
2856                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2857                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2858         },
2859         { /* R2PCIe */
2860                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2861                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2862         },
2863         { /* R3QPI0 Link 0 */
2864                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2865                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2866         },
2867         { /* R3QPI0 Link 1 */
2868                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2869                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2870         },
2871         { /* R3QPI1 Link 2 */
2872                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2873                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2874         },
2875         { /* QPI Port 0 filter  */
2876                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2877                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2878                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2879         },
2880         { /* QPI Port 1 filter  */
2881                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2882                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2883                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2884         },
2885         { /* PCU.3 (for Capability registers) */
2886                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2887                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2888                                                    HSWEP_PCI_PCU_3),
2889         },
2890         { /* end: all zeroes */ }
2891 };
2892
2893 static struct pci_driver hswep_uncore_pci_driver = {
2894         .name           = "hswep_uncore",
2895         .id_table       = hswep_uncore_pci_ids,
2896 };
2897
2898 int hswep_uncore_pci_init(void)
2899 {
2900         int ret = snbep_pci2phy_map_init(0x2f1e);
2901         if (ret)
2902                 return ret;
2903         uncore_pci_uncores = hswep_pci_uncores;
2904         uncore_pci_driver = &hswep_uncore_pci_driver;
2905         return 0;
2906 }
2907 /* end of Haswell-EP uncore support */
2908
2909 /* BDX uncore support */
2910
2911 static struct intel_uncore_type bdx_uncore_ubox = {
2912         .name                   = "ubox",
2913         .num_counters           = 2,
2914         .num_boxes              = 1,
2915         .perf_ctr_bits          = 48,
2916         .fixed_ctr_bits         = 48,
2917         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2918         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2919         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2920         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2921         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2922         .num_shared_regs        = 1,
2923         .ops                    = &ivbep_uncore_msr_ops,
2924         .format_group           = &ivbep_uncore_ubox_format_group,
2925 };
2926
2927 static struct event_constraint bdx_uncore_cbox_constraints[] = {
2928         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
2929         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2930         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2931         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2932         EVENT_CONSTRAINT_END
2933 };
2934
2935 static struct intel_uncore_type bdx_uncore_cbox = {
2936         .name                   = "cbox",
2937         .num_counters           = 4,
2938         .num_boxes              = 24,
2939         .perf_ctr_bits          = 48,
2940         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2941         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2942         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2943         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2944         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2945         .num_shared_regs        = 1,
2946         .constraints            = bdx_uncore_cbox_constraints,
2947         .ops                    = &hswep_uncore_cbox_ops,
2948         .format_group           = &hswep_uncore_cbox_format_group,
2949 };
2950
2951 static struct intel_uncore_type *bdx_msr_uncores[] = {
2952         &bdx_uncore_ubox,
2953         &bdx_uncore_cbox,
2954         &hswep_uncore_pcu,
2955         NULL,
2956 };
2957
2958 void bdx_uncore_cpu_init(void)
2959 {
2960         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2961                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2962         uncore_msr_uncores = bdx_msr_uncores;
2963 }
2964
2965 static struct intel_uncore_type bdx_uncore_ha = {
2966         .name           = "ha",
2967         .num_counters   = 4,
2968         .num_boxes      = 2,
2969         .perf_ctr_bits  = 48,
2970         SNBEP_UNCORE_PCI_COMMON_INIT(),
2971 };
2972
2973 static struct intel_uncore_type bdx_uncore_imc = {
2974         .name           = "imc",
2975         .num_counters   = 5,
2976         .num_boxes      = 8,
2977         .perf_ctr_bits  = 48,
2978         .fixed_ctr_bits = 48,
2979         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2980         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2981         .event_descs    = hswep_uncore_imc_events,
2982         SNBEP_UNCORE_PCI_COMMON_INIT(),
2983 };
2984
2985 static struct intel_uncore_type bdx_uncore_irp = {
2986         .name                   = "irp",
2987         .num_counters           = 4,
2988         .num_boxes              = 1,
2989         .perf_ctr_bits          = 48,
2990         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2991         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2992         .ops                    = &hswep_uncore_irp_ops,
2993         .format_group           = &snbep_uncore_format_group,
2994 };
2995
2996 static struct intel_uncore_type bdx_uncore_qpi = {
2997         .name                   = "qpi",
2998         .num_counters           = 4,
2999         .num_boxes              = 3,
3000         .perf_ctr_bits          = 48,
3001         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3002         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3003         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3004         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3005         .num_shared_regs        = 1,
3006         .ops                    = &snbep_uncore_qpi_ops,
3007         .format_group           = &snbep_uncore_qpi_format_group,
3008 };
3009
3010 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3011         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3012         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3013         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3014         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3015         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3016         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3017         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3018         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3019         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3020         EVENT_CONSTRAINT_END
3021 };
3022
3023 static struct intel_uncore_type bdx_uncore_r2pcie = {
3024         .name           = "r2pcie",
3025         .num_counters   = 4,
3026         .num_boxes      = 1,
3027         .perf_ctr_bits  = 48,
3028         .constraints    = bdx_uncore_r2pcie_constraints,
3029         SNBEP_UNCORE_PCI_COMMON_INIT(),
3030 };
3031
3032 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3033         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3034         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3035         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3036         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3037         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3038         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3039         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3040         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3041         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3042         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3043         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3044         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3045         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3046         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3047         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3048         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3049         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3050         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3051         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3052         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3053         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3054         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3055         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3056         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3057         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3058         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3059         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3060         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3061         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3062         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3063         EVENT_CONSTRAINT_END
3064 };
3065
3066 static struct intel_uncore_type bdx_uncore_r3qpi = {
3067         .name           = "r3qpi",
3068         .num_counters   = 3,
3069         .num_boxes      = 3,
3070         .perf_ctr_bits  = 48,
3071         .constraints    = bdx_uncore_r3qpi_constraints,
3072         SNBEP_UNCORE_PCI_COMMON_INIT(),
3073 };
3074
3075 enum {
3076         BDX_PCI_UNCORE_HA,
3077         BDX_PCI_UNCORE_IMC,
3078         BDX_PCI_UNCORE_IRP,
3079         BDX_PCI_UNCORE_QPI,
3080         BDX_PCI_UNCORE_R2PCIE,
3081         BDX_PCI_UNCORE_R3QPI,
3082 };
3083
3084 static struct intel_uncore_type *bdx_pci_uncores[] = {
3085         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3086         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3087         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3088         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3089         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3090         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3091         NULL,
3092 };
3093
3094 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3095         { /* Home Agent 0 */
3096                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3097                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3098         },
3099         { /* Home Agent 1 */
3100                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3101                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3102         },
3103         { /* MC0 Channel 0 */
3104                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3105                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3106         },
3107         { /* MC0 Channel 1 */
3108                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3109                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3110         },
3111         { /* MC0 Channel 2 */
3112                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3113                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3114         },
3115         { /* MC0 Channel 3 */
3116                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3117                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3118         },
3119         { /* MC1 Channel 0 */
3120                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3121                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3122         },
3123         { /* MC1 Channel 1 */
3124                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3125                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3126         },
3127         { /* MC1 Channel 2 */
3128                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3129                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3130         },
3131         { /* MC1 Channel 3 */
3132                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3133                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3134         },
3135         { /* IRP */
3136                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3137                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3138         },
3139         { /* QPI0 Port 0 */
3140                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3141                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3142         },
3143         { /* QPI0 Port 1 */
3144                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3145                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3146         },
3147         { /* QPI1 Port 2 */
3148                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3149                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3150         },
3151         { /* R2PCIe */
3152                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3153                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3154         },
3155         { /* R3QPI0 Link 0 */
3156                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3157                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3158         },
3159         { /* R3QPI0 Link 1 */
3160                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3161                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3162         },
3163         { /* R3QPI1 Link 2 */
3164                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3165                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3166         },
3167         { /* QPI Port 0 filter  */
3168                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3169                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3170         },
3171         { /* QPI Port 1 filter  */
3172                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3173                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3174         },
3175         { /* QPI Port 2 filter  */
3176                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3177                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3178         },
3179         { /* end: all zeroes */ }
3180 };
3181
3182 static struct pci_driver bdx_uncore_pci_driver = {
3183         .name           = "bdx_uncore",
3184         .id_table       = bdx_uncore_pci_ids,
3185 };
3186
3187 int bdx_uncore_pci_init(void)
3188 {
3189         int ret = snbep_pci2phy_map_init(0x6f1e);
3190
3191         if (ret)
3192                 return ret;
3193         uncore_pci_uncores = bdx_pci_uncores;
3194         uncore_pci_driver = &bdx_uncore_pci_driver;
3195         return 0;
3196 }
3197
3198 /* end of BDX uncore support */