Merge branch 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
1 #include "perf_event_intel_uncore.h"
2
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9 static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
12
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask;
15
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed =
18         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
19 static struct event_constraint constraint_empty =
20         EVENT_CONSTRAINT(0, 0, 0);
21
22 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
23                                 ((1ULL << (n)) - 1)))
24
25 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
68
69 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
70 {
71         u64 count;
72
73         rdmsrl(event->hw.event_base, count);
74
75         return count;
76 }
77
78 /*
79  * generic get constraint function for shared match/mask registers.
80  */
81 static struct event_constraint *
82 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
83 {
84         struct intel_uncore_extra_reg *er;
85         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
86         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
87         unsigned long flags;
88         bool ok = false;
89
90         /*
91          * reg->alloc can be set due to existing state, so for fake box we
92          * need to ignore this, otherwise we might fail to allocate proper
93          * fake state for this extra reg constraint.
94          */
95         if (reg1->idx == EXTRA_REG_NONE ||
96             (!uncore_box_is_fake(box) && reg1->alloc))
97                 return NULL;
98
99         er = &box->shared_regs[reg1->idx];
100         raw_spin_lock_irqsave(&er->lock, flags);
101         if (!atomic_read(&er->ref) ||
102             (er->config1 == reg1->config && er->config2 == reg2->config)) {
103                 atomic_inc(&er->ref);
104                 er->config1 = reg1->config;
105                 er->config2 = reg2->config;
106                 ok = true;
107         }
108         raw_spin_unlock_irqrestore(&er->lock, flags);
109
110         if (ok) {
111                 if (!uncore_box_is_fake(box))
112                         reg1->alloc = 1;
113                 return NULL;
114         }
115
116         return &constraint_empty;
117 }
118
119 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
120 {
121         struct intel_uncore_extra_reg *er;
122         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123
124         /*
125          * Only put constraint if extra reg was actually allocated. Also
126          * takes care of event which do not use an extra shared reg.
127          *
128          * Also, if this is a fake box we shouldn't touch any event state
129          * (reg->alloc) and we don't care about leaving inconsistent box
130          * state either since it will be thrown out.
131          */
132         if (uncore_box_is_fake(box) || !reg1->alloc)
133                 return;
134
135         er = &box->shared_regs[reg1->idx];
136         atomic_dec(&er->ref);
137         reg1->alloc = 0;
138 }
139
140 static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
141 {
142         struct intel_uncore_extra_reg *er;
143         unsigned long flags;
144         u64 config;
145
146         er = &box->shared_regs[idx];
147
148         raw_spin_lock_irqsave(&er->lock, flags);
149         config = er->config;
150         raw_spin_unlock_irqrestore(&er->lock, flags);
151
152         return config;
153 }
154
155 /* Sandy Bridge-EP uncore support */
156 static struct intel_uncore_type snbep_uncore_cbox;
157 static struct intel_uncore_type snbep_uncore_pcu;
158
159 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
160 {
161         struct pci_dev *pdev = box->pci_dev;
162         int box_ctl = uncore_pci_box_ctl(box);
163         u32 config = 0;
164
165         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
166                 config |= SNBEP_PMON_BOX_CTL_FRZ;
167                 pci_write_config_dword(pdev, box_ctl, config);
168         }
169 }
170
171 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
172 {
173         struct pci_dev *pdev = box->pci_dev;
174         int box_ctl = uncore_pci_box_ctl(box);
175         u32 config = 0;
176
177         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
178                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
179                 pci_write_config_dword(pdev, box_ctl, config);
180         }
181 }
182
183 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
184 {
185         struct pci_dev *pdev = box->pci_dev;
186         struct hw_perf_event *hwc = &event->hw;
187
188         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
189 }
190
191 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
192 {
193         struct pci_dev *pdev = box->pci_dev;
194         struct hw_perf_event *hwc = &event->hw;
195
196         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
197 }
198
199 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
200 {
201         struct pci_dev *pdev = box->pci_dev;
202         struct hw_perf_event *hwc = &event->hw;
203         u64 count = 0;
204
205         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
206         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
207
208         return count;
209 }
210
211 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
212 {
213         struct pci_dev *pdev = box->pci_dev;
214
215         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
216 }
217
218 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
219 {
220         u64 config;
221         unsigned msr;
222
223         msr = uncore_msr_box_ctl(box);
224         if (msr) {
225                 rdmsrl(msr, config);
226                 config |= SNBEP_PMON_BOX_CTL_FRZ;
227                 wrmsrl(msr, config);
228         }
229 }
230
231 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
232 {
233         u64 config;
234         unsigned msr;
235
236         msr = uncore_msr_box_ctl(box);
237         if (msr) {
238                 rdmsrl(msr, config);
239                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
240                 wrmsrl(msr, config);
241         }
242 }
243
244 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
245 {
246         struct hw_perf_event *hwc = &event->hw;
247         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
248
249         if (reg1->idx != EXTRA_REG_NONE)
250                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
251
252         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
253 }
254
255 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
256                                         struct perf_event *event)
257 {
258         struct hw_perf_event *hwc = &event->hw;
259
260         wrmsrl(hwc->config_base, hwc->config);
261 }
262
263 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
264 {
265         unsigned msr = uncore_msr_box_ctl(box);
266
267         if (msr)
268                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
269 }
270
271 static struct attribute *snbep_uncore_formats_attr[] = {
272         &format_attr_event.attr,
273         &format_attr_umask.attr,
274         &format_attr_edge.attr,
275         &format_attr_inv.attr,
276         &format_attr_thresh8.attr,
277         NULL,
278 };
279
280 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
281         &format_attr_event.attr,
282         &format_attr_umask.attr,
283         &format_attr_edge.attr,
284         &format_attr_inv.attr,
285         &format_attr_thresh5.attr,
286         NULL,
287 };
288
289 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
290         &format_attr_event.attr,
291         &format_attr_umask.attr,
292         &format_attr_edge.attr,
293         &format_attr_tid_en.attr,
294         &format_attr_inv.attr,
295         &format_attr_thresh8.attr,
296         &format_attr_filter_tid.attr,
297         &format_attr_filter_nid.attr,
298         &format_attr_filter_state.attr,
299         &format_attr_filter_opc.attr,
300         NULL,
301 };
302
303 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
304         &format_attr_event_ext.attr,
305         &format_attr_occ_sel.attr,
306         &format_attr_edge.attr,
307         &format_attr_inv.attr,
308         &format_attr_thresh5.attr,
309         &format_attr_occ_invert.attr,
310         &format_attr_occ_edge.attr,
311         &format_attr_filter_band0.attr,
312         &format_attr_filter_band1.attr,
313         &format_attr_filter_band2.attr,
314         &format_attr_filter_band3.attr,
315         NULL,
316 };
317
318 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
319         &format_attr_event_ext.attr,
320         &format_attr_umask.attr,
321         &format_attr_edge.attr,
322         &format_attr_inv.attr,
323         &format_attr_thresh8.attr,
324         &format_attr_match_rds.attr,
325         &format_attr_match_rnid30.attr,
326         &format_attr_match_rnid4.attr,
327         &format_attr_match_dnid.attr,
328         &format_attr_match_mc.attr,
329         &format_attr_match_opc.attr,
330         &format_attr_match_vnw.attr,
331         &format_attr_match0.attr,
332         &format_attr_match1.attr,
333         &format_attr_mask_rds.attr,
334         &format_attr_mask_rnid30.attr,
335         &format_attr_mask_rnid4.attr,
336         &format_attr_mask_dnid.attr,
337         &format_attr_mask_mc.attr,
338         &format_attr_mask_opc.attr,
339         &format_attr_mask_vnw.attr,
340         &format_attr_mask0.attr,
341         &format_attr_mask1.attr,
342         NULL,
343 };
344
345 static struct uncore_event_desc snbep_uncore_imc_events[] = {
346         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
347         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
348         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
349         { /* end: all zeroes */ },
350 };
351
352 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
353         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
354         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
355         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
356         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
357         { /* end: all zeroes */ },
358 };
359
360 static struct attribute_group snbep_uncore_format_group = {
361         .name = "format",
362         .attrs = snbep_uncore_formats_attr,
363 };
364
365 static struct attribute_group snbep_uncore_ubox_format_group = {
366         .name = "format",
367         .attrs = snbep_uncore_ubox_formats_attr,
368 };
369
370 static struct attribute_group snbep_uncore_cbox_format_group = {
371         .name = "format",
372         .attrs = snbep_uncore_cbox_formats_attr,
373 };
374
375 static struct attribute_group snbep_uncore_pcu_format_group = {
376         .name = "format",
377         .attrs = snbep_uncore_pcu_formats_attr,
378 };
379
380 static struct attribute_group snbep_uncore_qpi_format_group = {
381         .name = "format",
382         .attrs = snbep_uncore_qpi_formats_attr,
383 };
384
385 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
386         .init_box       = snbep_uncore_msr_init_box,            \
387         .disable_box    = snbep_uncore_msr_disable_box,         \
388         .enable_box     = snbep_uncore_msr_enable_box,          \
389         .disable_event  = snbep_uncore_msr_disable_event,       \
390         .enable_event   = snbep_uncore_msr_enable_event,        \
391         .read_counter   = uncore_msr_read_counter
392
393 static struct intel_uncore_ops snbep_uncore_msr_ops = {
394         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
395 };
396
397 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
398         .init_box       = snbep_uncore_pci_init_box,            \
399         .disable_box    = snbep_uncore_pci_disable_box,         \
400         .enable_box     = snbep_uncore_pci_enable_box,          \
401         .disable_event  = snbep_uncore_pci_disable_event,       \
402         .read_counter   = snbep_uncore_pci_read_counter
403
404 static struct intel_uncore_ops snbep_uncore_pci_ops = {
405         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
406         .enable_event   = snbep_uncore_pci_enable_event,        \
407 };
408
409 static struct event_constraint snbep_uncore_cbox_constraints[] = {
410         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
411         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
412         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
413         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
414         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
415         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
416         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
417         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
418         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
419         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
420         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
421         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
422         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
423         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
424         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
425         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
426         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
427         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
428         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
429         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
430         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
431         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
432         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
433         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
434         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
435         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
436         EVENT_CONSTRAINT_END
437 };
438
439 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
440         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
441         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
442         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
443         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
444         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
445         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
446         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
447         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
448         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
449         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
450         EVENT_CONSTRAINT_END
451 };
452
453 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
454         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
455         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
456         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
457         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
458         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
459         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
460         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
461         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
462         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
463         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
464         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
465         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
466         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
467         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
468         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
469         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
470         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
471         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
472         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
473         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
474         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
475         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
476         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
477         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
478         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
479         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
480         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
481         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
482         EVENT_CONSTRAINT_END
483 };
484
485 static struct intel_uncore_type snbep_uncore_ubox = {
486         .name           = "ubox",
487         .num_counters   = 2,
488         .num_boxes      = 1,
489         .perf_ctr_bits  = 44,
490         .fixed_ctr_bits = 48,
491         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
492         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
493         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
494         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
495         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
496         .ops            = &snbep_uncore_msr_ops,
497         .format_group   = &snbep_uncore_ubox_format_group,
498 };
499
500 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
501         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
502                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
503         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
504         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
505         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
506         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
507         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
508         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
509         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
510         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
511         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
512         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
513         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
514         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
515         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
516         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
517         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
518         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
519         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
520         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
521         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
522         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
523         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
524         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
525         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
526         EVENT_EXTRA_END
527 };
528
529 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
530 {
531         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
532         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
533         int i;
534
535         if (uncore_box_is_fake(box))
536                 return;
537
538         for (i = 0; i < 5; i++) {
539                 if (reg1->alloc & (0x1 << i))
540                         atomic_sub(1 << (i * 6), &er->ref);
541         }
542         reg1->alloc = 0;
543 }
544
545 static struct event_constraint *
546 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
547                             u64 (*cbox_filter_mask)(int fields))
548 {
549         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
550         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
551         int i, alloc = 0;
552         unsigned long flags;
553         u64 mask;
554
555         if (reg1->idx == EXTRA_REG_NONE)
556                 return NULL;
557
558         raw_spin_lock_irqsave(&er->lock, flags);
559         for (i = 0; i < 5; i++) {
560                 if (!(reg1->idx & (0x1 << i)))
561                         continue;
562                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
563                         continue;
564
565                 mask = cbox_filter_mask(0x1 << i);
566                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
567                     !((reg1->config ^ er->config) & mask)) {
568                         atomic_add(1 << (i * 6), &er->ref);
569                         er->config &= ~mask;
570                         er->config |= reg1->config & mask;
571                         alloc |= (0x1 << i);
572                 } else {
573                         break;
574                 }
575         }
576         raw_spin_unlock_irqrestore(&er->lock, flags);
577         if (i < 5)
578                 goto fail;
579
580         if (!uncore_box_is_fake(box))
581                 reg1->alloc |= alloc;
582
583         return NULL;
584 fail:
585         for (; i >= 0; i--) {
586                 if (alloc & (0x1 << i))
587                         atomic_sub(1 << (i * 6), &er->ref);
588         }
589         return &constraint_empty;
590 }
591
592 static u64 snbep_cbox_filter_mask(int fields)
593 {
594         u64 mask = 0;
595
596         if (fields & 0x1)
597                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
598         if (fields & 0x2)
599                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
600         if (fields & 0x4)
601                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
602         if (fields & 0x8)
603                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
604
605         return mask;
606 }
607
608 static struct event_constraint *
609 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
610 {
611         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
612 }
613
614 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
615 {
616         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
617         struct extra_reg *er;
618         int idx = 0;
619
620         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
621                 if (er->event != (event->hw.config & er->config_mask))
622                         continue;
623                 idx |= er->idx;
624         }
625
626         if (idx) {
627                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
628                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
629                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
630                 reg1->idx = idx;
631         }
632         return 0;
633 }
634
635 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
636         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
637         .hw_config              = snbep_cbox_hw_config,
638         .get_constraint         = snbep_cbox_get_constraint,
639         .put_constraint         = snbep_cbox_put_constraint,
640 };
641
642 static struct intel_uncore_type snbep_uncore_cbox = {
643         .name                   = "cbox",
644         .num_counters           = 4,
645         .num_boxes              = 8,
646         .perf_ctr_bits          = 44,
647         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
648         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
649         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
650         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
651         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
652         .num_shared_regs        = 1,
653         .constraints            = snbep_uncore_cbox_constraints,
654         .ops                    = &snbep_uncore_cbox_ops,
655         .format_group           = &snbep_uncore_cbox_format_group,
656 };
657
658 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
659 {
660         struct hw_perf_event *hwc = &event->hw;
661         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
662         u64 config = reg1->config;
663
664         if (new_idx > reg1->idx)
665                 config <<= 8 * (new_idx - reg1->idx);
666         else
667                 config >>= 8 * (reg1->idx - new_idx);
668
669         if (modify) {
670                 hwc->config += new_idx - reg1->idx;
671                 reg1->config = config;
672                 reg1->idx = new_idx;
673         }
674         return config;
675 }
676
677 static struct event_constraint *
678 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
679 {
680         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
681         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
682         unsigned long flags;
683         int idx = reg1->idx;
684         u64 mask, config1 = reg1->config;
685         bool ok = false;
686
687         if (reg1->idx == EXTRA_REG_NONE ||
688             (!uncore_box_is_fake(box) && reg1->alloc))
689                 return NULL;
690 again:
691         mask = 0xffULL << (idx * 8);
692         raw_spin_lock_irqsave(&er->lock, flags);
693         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
694             !((config1 ^ er->config) & mask)) {
695                 atomic_add(1 << (idx * 8), &er->ref);
696                 er->config &= ~mask;
697                 er->config |= config1 & mask;
698                 ok = true;
699         }
700         raw_spin_unlock_irqrestore(&er->lock, flags);
701
702         if (!ok) {
703                 idx = (idx + 1) % 4;
704                 if (idx != reg1->idx) {
705                         config1 = snbep_pcu_alter_er(event, idx, false);
706                         goto again;
707                 }
708                 return &constraint_empty;
709         }
710
711         if (!uncore_box_is_fake(box)) {
712                 if (idx != reg1->idx)
713                         snbep_pcu_alter_er(event, idx, true);
714                 reg1->alloc = 1;
715         }
716         return NULL;
717 }
718
719 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
720 {
721         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
722         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
723
724         if (uncore_box_is_fake(box) || !reg1->alloc)
725                 return;
726
727         atomic_sub(1 << (reg1->idx * 8), &er->ref);
728         reg1->alloc = 0;
729 }
730
731 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
732 {
733         struct hw_perf_event *hwc = &event->hw;
734         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
735         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
736
737         if (ev_sel >= 0xb && ev_sel <= 0xe) {
738                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
739                 reg1->idx = ev_sel - 0xb;
740                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
741         }
742         return 0;
743 }
744
745 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
746         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
747         .hw_config              = snbep_pcu_hw_config,
748         .get_constraint         = snbep_pcu_get_constraint,
749         .put_constraint         = snbep_pcu_put_constraint,
750 };
751
752 static struct intel_uncore_type snbep_uncore_pcu = {
753         .name                   = "pcu",
754         .num_counters           = 4,
755         .num_boxes              = 1,
756         .perf_ctr_bits          = 48,
757         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
758         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
759         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
760         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
761         .num_shared_regs        = 1,
762         .ops                    = &snbep_uncore_pcu_ops,
763         .format_group           = &snbep_uncore_pcu_format_group,
764 };
765
766 static struct intel_uncore_type *snbep_msr_uncores[] = {
767         &snbep_uncore_ubox,
768         &snbep_uncore_cbox,
769         &snbep_uncore_pcu,
770         NULL,
771 };
772
773 enum {
774         SNBEP_PCI_QPI_PORT0_FILTER,
775         SNBEP_PCI_QPI_PORT1_FILTER,
776 };
777
778 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
779 {
780         struct hw_perf_event *hwc = &event->hw;
781         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
782         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
783
784         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
785                 reg1->idx = 0;
786                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
787                 reg1->config = event->attr.config1;
788                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
789                 reg2->config = event->attr.config2;
790         }
791         return 0;
792 }
793
794 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
795 {
796         struct pci_dev *pdev = box->pci_dev;
797         struct hw_perf_event *hwc = &event->hw;
798         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
799         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
800
801         if (reg1->idx != EXTRA_REG_NONE) {
802                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
803                 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
804                 WARN_ON_ONCE(!filter_pdev);
805                 if (filter_pdev) {
806                         pci_write_config_dword(filter_pdev, reg1->reg,
807                                                 (u32)reg1->config);
808                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
809                                                 (u32)(reg1->config >> 32));
810                         pci_write_config_dword(filter_pdev, reg2->reg,
811                                                 (u32)reg2->config);
812                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
813                                                 (u32)(reg2->config >> 32));
814                 }
815         }
816
817         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
818 }
819
820 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
821         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
822         .enable_event           = snbep_qpi_enable_event,
823         .hw_config              = snbep_qpi_hw_config,
824         .get_constraint         = uncore_get_constraint,
825         .put_constraint         = uncore_put_constraint,
826 };
827
828 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
829         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
830         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
831         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
832         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
833         .ops            = &snbep_uncore_pci_ops,                \
834         .format_group   = &snbep_uncore_format_group
835
836 static struct intel_uncore_type snbep_uncore_ha = {
837         .name           = "ha",
838         .num_counters   = 4,
839         .num_boxes      = 1,
840         .perf_ctr_bits  = 48,
841         SNBEP_UNCORE_PCI_COMMON_INIT(),
842 };
843
844 static struct intel_uncore_type snbep_uncore_imc = {
845         .name           = "imc",
846         .num_counters   = 4,
847         .num_boxes      = 4,
848         .perf_ctr_bits  = 48,
849         .fixed_ctr_bits = 48,
850         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
851         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
852         .event_descs    = snbep_uncore_imc_events,
853         SNBEP_UNCORE_PCI_COMMON_INIT(),
854 };
855
856 static struct intel_uncore_type snbep_uncore_qpi = {
857         .name                   = "qpi",
858         .num_counters           = 4,
859         .num_boxes              = 2,
860         .perf_ctr_bits          = 48,
861         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
862         .event_ctl              = SNBEP_PCI_PMON_CTL0,
863         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
864         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
865         .num_shared_regs        = 1,
866         .ops                    = &snbep_uncore_qpi_ops,
867         .event_descs            = snbep_uncore_qpi_events,
868         .format_group           = &snbep_uncore_qpi_format_group,
869 };
870
871
872 static struct intel_uncore_type snbep_uncore_r2pcie = {
873         .name           = "r2pcie",
874         .num_counters   = 4,
875         .num_boxes      = 1,
876         .perf_ctr_bits  = 44,
877         .constraints    = snbep_uncore_r2pcie_constraints,
878         SNBEP_UNCORE_PCI_COMMON_INIT(),
879 };
880
881 static struct intel_uncore_type snbep_uncore_r3qpi = {
882         .name           = "r3qpi",
883         .num_counters   = 3,
884         .num_boxes      = 2,
885         .perf_ctr_bits  = 44,
886         .constraints    = snbep_uncore_r3qpi_constraints,
887         SNBEP_UNCORE_PCI_COMMON_INIT(),
888 };
889
890 enum {
891         SNBEP_PCI_UNCORE_HA,
892         SNBEP_PCI_UNCORE_IMC,
893         SNBEP_PCI_UNCORE_QPI,
894         SNBEP_PCI_UNCORE_R2PCIE,
895         SNBEP_PCI_UNCORE_R3QPI,
896 };
897
898 static struct intel_uncore_type *snbep_pci_uncores[] = {
899         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
900         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
901         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
902         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
903         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
904         NULL,
905 };
906
907 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
908         { /* Home Agent */
909                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
910                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
911         },
912         { /* MC Channel 0 */
913                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
914                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
915         },
916         { /* MC Channel 1 */
917                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
918                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
919         },
920         { /* MC Channel 2 */
921                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
922                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
923         },
924         { /* MC Channel 3 */
925                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
926                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
927         },
928         { /* QPI Port 0 */
929                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
930                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
931         },
932         { /* QPI Port 1 */
933                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
934                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
935         },
936         { /* R2PCIe */
937                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
938                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
939         },
940         { /* R3QPI Link 0 */
941                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
942                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
943         },
944         { /* R3QPI Link 1 */
945                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
946                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
947         },
948         { /* QPI Port 0 filter  */
949                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
950                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
951                                                    SNBEP_PCI_QPI_PORT0_FILTER),
952         },
953         { /* QPI Port 0 filter  */
954                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
955                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
956                                                    SNBEP_PCI_QPI_PORT1_FILTER),
957         },
958         { /* end: all zeroes */ }
959 };
960
961 static struct pci_driver snbep_uncore_pci_driver = {
962         .name           = "snbep_uncore",
963         .id_table       = snbep_uncore_pci_ids,
964 };
965
966 /*
967  * build pci bus to socket mapping
968  */
969 static int snbep_pci2phy_map_init(int devid)
970 {
971         struct pci_dev *ubox_dev = NULL;
972         int i, bus, nodeid;
973         int err = 0;
974         u32 config = 0;
975
976         while (1) {
977                 /* find the UBOX device */
978                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
979                 if (!ubox_dev)
980                         break;
981                 bus = ubox_dev->bus->number;
982                 /* get the Node ID of the local register */
983                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
984                 if (err)
985                         break;
986                 nodeid = config;
987                 /* get the Node ID mapping */
988                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
989                 if (err)
990                         break;
991                 /*
992                  * every three bits in the Node ID mapping register maps
993                  * to a particular node.
994                  */
995                 for (i = 0; i < 8; i++) {
996                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
997                                 pcibus_to_physid[bus] = i;
998                                 break;
999                         }
1000                 }
1001         }
1002
1003         if (!err) {
1004                 /*
1005                  * For PCI bus with no UBOX device, find the next bus
1006                  * that has UBOX device and use its mapping.
1007                  */
1008                 i = -1;
1009                 for (bus = 255; bus >= 0; bus--) {
1010                         if (pcibus_to_physid[bus] >= 0)
1011                                 i = pcibus_to_physid[bus];
1012                         else
1013                                 pcibus_to_physid[bus] = i;
1014                 }
1015         }
1016
1017         if (ubox_dev)
1018                 pci_dev_put(ubox_dev);
1019
1020         return err ? pcibios_err_to_errno(err) : 0;
1021 }
1022 /* end of Sandy Bridge-EP uncore support */
1023
1024 /* IvyTown uncore support */
1025 static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1026 {
1027         unsigned msr = uncore_msr_box_ctl(box);
1028         if (msr)
1029                 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1030 }
1031
1032 static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1033 {
1034         struct pci_dev *pdev = box->pci_dev;
1035
1036         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1037 }
1038
1039 #define IVT_UNCORE_MSR_OPS_COMMON_INIT()                        \
1040         .init_box       = ivt_uncore_msr_init_box,              \
1041         .disable_box    = snbep_uncore_msr_disable_box,         \
1042         .enable_box     = snbep_uncore_msr_enable_box,          \
1043         .disable_event  = snbep_uncore_msr_disable_event,       \
1044         .enable_event   = snbep_uncore_msr_enable_event,        \
1045         .read_counter   = uncore_msr_read_counter
1046
1047 static struct intel_uncore_ops ivt_uncore_msr_ops = {
1048         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1049 };
1050
1051 static struct intel_uncore_ops ivt_uncore_pci_ops = {
1052         .init_box       = ivt_uncore_pci_init_box,
1053         .disable_box    = snbep_uncore_pci_disable_box,
1054         .enable_box     = snbep_uncore_pci_enable_box,
1055         .disable_event  = snbep_uncore_pci_disable_event,
1056         .enable_event   = snbep_uncore_pci_enable_event,
1057         .read_counter   = snbep_uncore_pci_read_counter,
1058 };
1059
1060 #define IVT_UNCORE_PCI_COMMON_INIT()                            \
1061         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1062         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1063         .event_mask     = IVT_PMON_RAW_EVENT_MASK,              \
1064         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1065         .ops            = &ivt_uncore_pci_ops,                  \
1066         .format_group   = &ivt_uncore_format_group
1067
1068 static struct attribute *ivt_uncore_formats_attr[] = {
1069         &format_attr_event.attr,
1070         &format_attr_umask.attr,
1071         &format_attr_edge.attr,
1072         &format_attr_inv.attr,
1073         &format_attr_thresh8.attr,
1074         NULL,
1075 };
1076
1077 static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1078         &format_attr_event.attr,
1079         &format_attr_umask.attr,
1080         &format_attr_edge.attr,
1081         &format_attr_inv.attr,
1082         &format_attr_thresh5.attr,
1083         NULL,
1084 };
1085
1086 static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1087         &format_attr_event.attr,
1088         &format_attr_umask.attr,
1089         &format_attr_edge.attr,
1090         &format_attr_tid_en.attr,
1091         &format_attr_thresh8.attr,
1092         &format_attr_filter_tid.attr,
1093         &format_attr_filter_link.attr,
1094         &format_attr_filter_state2.attr,
1095         &format_attr_filter_nid2.attr,
1096         &format_attr_filter_opc2.attr,
1097         NULL,
1098 };
1099
1100 static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1101         &format_attr_event_ext.attr,
1102         &format_attr_occ_sel.attr,
1103         &format_attr_edge.attr,
1104         &format_attr_thresh5.attr,
1105         &format_attr_occ_invert.attr,
1106         &format_attr_occ_edge.attr,
1107         &format_attr_filter_band0.attr,
1108         &format_attr_filter_band1.attr,
1109         &format_attr_filter_band2.attr,
1110         &format_attr_filter_band3.attr,
1111         NULL,
1112 };
1113
1114 static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1115         &format_attr_event_ext.attr,
1116         &format_attr_umask.attr,
1117         &format_attr_edge.attr,
1118         &format_attr_thresh8.attr,
1119         &format_attr_match_rds.attr,
1120         &format_attr_match_rnid30.attr,
1121         &format_attr_match_rnid4.attr,
1122         &format_attr_match_dnid.attr,
1123         &format_attr_match_mc.attr,
1124         &format_attr_match_opc.attr,
1125         &format_attr_match_vnw.attr,
1126         &format_attr_match0.attr,
1127         &format_attr_match1.attr,
1128         &format_attr_mask_rds.attr,
1129         &format_attr_mask_rnid30.attr,
1130         &format_attr_mask_rnid4.attr,
1131         &format_attr_mask_dnid.attr,
1132         &format_attr_mask_mc.attr,
1133         &format_attr_mask_opc.attr,
1134         &format_attr_mask_vnw.attr,
1135         &format_attr_mask0.attr,
1136         &format_attr_mask1.attr,
1137         NULL,
1138 };
1139
1140 static struct attribute_group ivt_uncore_format_group = {
1141         .name = "format",
1142         .attrs = ivt_uncore_formats_attr,
1143 };
1144
1145 static struct attribute_group ivt_uncore_ubox_format_group = {
1146         .name = "format",
1147         .attrs = ivt_uncore_ubox_formats_attr,
1148 };
1149
1150 static struct attribute_group ivt_uncore_cbox_format_group = {
1151         .name = "format",
1152         .attrs = ivt_uncore_cbox_formats_attr,
1153 };
1154
1155 static struct attribute_group ivt_uncore_pcu_format_group = {
1156         .name = "format",
1157         .attrs = ivt_uncore_pcu_formats_attr,
1158 };
1159
1160 static struct attribute_group ivt_uncore_qpi_format_group = {
1161         .name = "format",
1162         .attrs = ivt_uncore_qpi_formats_attr,
1163 };
1164
1165 static struct intel_uncore_type ivt_uncore_ubox = {
1166         .name           = "ubox",
1167         .num_counters   = 2,
1168         .num_boxes      = 1,
1169         .perf_ctr_bits  = 44,
1170         .fixed_ctr_bits = 48,
1171         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1172         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1173         .event_mask     = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1174         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1175         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1176         .ops            = &ivt_uncore_msr_ops,
1177         .format_group   = &ivt_uncore_ubox_format_group,
1178 };
1179
1180 static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1181         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1182                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1183         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1184         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1185         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1186         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1187         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1188         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1189         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1190         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1191         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1192         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1193         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1194         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1195         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1196         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1197         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1198         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1199         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1200         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1201         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1202         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1203         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1204         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1205         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1206         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1207         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1208         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1209         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1210         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1211         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1212         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1213         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1214         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1215         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1216         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1217         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1218         EVENT_EXTRA_END
1219 };
1220
1221 static u64 ivt_cbox_filter_mask(int fields)
1222 {
1223         u64 mask = 0;
1224
1225         if (fields & 0x1)
1226                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1227         if (fields & 0x2)
1228                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1229         if (fields & 0x4)
1230                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1231         if (fields & 0x8)
1232                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1233         if (fields & 0x10)
1234                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1235
1236         return mask;
1237 }
1238
1239 static struct event_constraint *
1240 ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1241 {
1242         return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1243 }
1244
1245 static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1246 {
1247         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1248         struct extra_reg *er;
1249         int idx = 0;
1250
1251         for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1252                 if (er->event != (event->hw.config & er->config_mask))
1253                         continue;
1254                 idx |= er->idx;
1255         }
1256
1257         if (idx) {
1258                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1259                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1260                 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1261                 reg1->idx = idx;
1262         }
1263         return 0;
1264 }
1265
1266 static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1267 {
1268         struct hw_perf_event *hwc = &event->hw;
1269         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1270
1271         if (reg1->idx != EXTRA_REG_NONE) {
1272                 u64 filter = uncore_shared_reg_config(box, 0);
1273                 wrmsrl(reg1->reg, filter & 0xffffffff);
1274                 wrmsrl(reg1->reg + 6, filter >> 32);
1275         }
1276
1277         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1278 }
1279
1280 static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1281         .init_box               = ivt_uncore_msr_init_box,
1282         .disable_box            = snbep_uncore_msr_disable_box,
1283         .enable_box             = snbep_uncore_msr_enable_box,
1284         .disable_event          = snbep_uncore_msr_disable_event,
1285         .enable_event           = ivt_cbox_enable_event,
1286         .read_counter           = uncore_msr_read_counter,
1287         .hw_config              = ivt_cbox_hw_config,
1288         .get_constraint         = ivt_cbox_get_constraint,
1289         .put_constraint         = snbep_cbox_put_constraint,
1290 };
1291
1292 static struct intel_uncore_type ivt_uncore_cbox = {
1293         .name                   = "cbox",
1294         .num_counters           = 4,
1295         .num_boxes              = 15,
1296         .perf_ctr_bits          = 44,
1297         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1298         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1299         .event_mask             = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1300         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1301         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1302         .num_shared_regs        = 1,
1303         .constraints            = snbep_uncore_cbox_constraints,
1304         .ops                    = &ivt_uncore_cbox_ops,
1305         .format_group           = &ivt_uncore_cbox_format_group,
1306 };
1307
1308 static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1309         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1310         .hw_config              = snbep_pcu_hw_config,
1311         .get_constraint         = snbep_pcu_get_constraint,
1312         .put_constraint         = snbep_pcu_put_constraint,
1313 };
1314
1315 static struct intel_uncore_type ivt_uncore_pcu = {
1316         .name                   = "pcu",
1317         .num_counters           = 4,
1318         .num_boxes              = 1,
1319         .perf_ctr_bits          = 48,
1320         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1321         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1322         .event_mask             = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1323         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1324         .num_shared_regs        = 1,
1325         .ops                    = &ivt_uncore_pcu_ops,
1326         .format_group           = &ivt_uncore_pcu_format_group,
1327 };
1328
1329 static struct intel_uncore_type *ivt_msr_uncores[] = {
1330         &ivt_uncore_ubox,
1331         &ivt_uncore_cbox,
1332         &ivt_uncore_pcu,
1333         NULL,
1334 };
1335
1336 static struct intel_uncore_type ivt_uncore_ha = {
1337         .name           = "ha",
1338         .num_counters   = 4,
1339         .num_boxes      = 2,
1340         .perf_ctr_bits  = 48,
1341         IVT_UNCORE_PCI_COMMON_INIT(),
1342 };
1343
1344 static struct intel_uncore_type ivt_uncore_imc = {
1345         .name           = "imc",
1346         .num_counters   = 4,
1347         .num_boxes      = 8,
1348         .perf_ctr_bits  = 48,
1349         .fixed_ctr_bits = 48,
1350         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1351         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1352         IVT_UNCORE_PCI_COMMON_INIT(),
1353 };
1354
1355 /* registers in IRP boxes are not properly aligned */
1356 static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1357 static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1358
1359 static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1360 {
1361         struct pci_dev *pdev = box->pci_dev;
1362         struct hw_perf_event *hwc = &event->hw;
1363
1364         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1365                                hwc->config | SNBEP_PMON_CTL_EN);
1366 }
1367
1368 static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1369 {
1370         struct pci_dev *pdev = box->pci_dev;
1371         struct hw_perf_event *hwc = &event->hw;
1372
1373         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1374 }
1375
1376 static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1377 {
1378         struct pci_dev *pdev = box->pci_dev;
1379         struct hw_perf_event *hwc = &event->hw;
1380         u64 count = 0;
1381
1382         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1383         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1384
1385         return count;
1386 }
1387
1388 static struct intel_uncore_ops ivt_uncore_irp_ops = {
1389         .init_box       = ivt_uncore_pci_init_box,
1390         .disable_box    = snbep_uncore_pci_disable_box,
1391         .enable_box     = snbep_uncore_pci_enable_box,
1392         .disable_event  = ivt_uncore_irp_disable_event,
1393         .enable_event   = ivt_uncore_irp_enable_event,
1394         .read_counter   = ivt_uncore_irp_read_counter,
1395 };
1396
1397 static struct intel_uncore_type ivt_uncore_irp = {
1398         .name                   = "irp",
1399         .num_counters           = 4,
1400         .num_boxes              = 1,
1401         .perf_ctr_bits          = 48,
1402         .event_mask             = IVT_PMON_RAW_EVENT_MASK,
1403         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1404         .ops                    = &ivt_uncore_irp_ops,
1405         .format_group           = &ivt_uncore_format_group,
1406 };
1407
1408 static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1409         .init_box       = ivt_uncore_pci_init_box,
1410         .disable_box    = snbep_uncore_pci_disable_box,
1411         .enable_box     = snbep_uncore_pci_enable_box,
1412         .disable_event  = snbep_uncore_pci_disable_event,
1413         .enable_event   = snbep_qpi_enable_event,
1414         .read_counter   = snbep_uncore_pci_read_counter,
1415         .hw_config      = snbep_qpi_hw_config,
1416         .get_constraint = uncore_get_constraint,
1417         .put_constraint = uncore_put_constraint,
1418 };
1419
1420 static struct intel_uncore_type ivt_uncore_qpi = {
1421         .name                   = "qpi",
1422         .num_counters           = 4,
1423         .num_boxes              = 3,
1424         .perf_ctr_bits          = 48,
1425         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1426         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1427         .event_mask             = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1428         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1429         .num_shared_regs        = 1,
1430         .ops                    = &ivt_uncore_qpi_ops,
1431         .format_group           = &ivt_uncore_qpi_format_group,
1432 };
1433
1434 static struct intel_uncore_type ivt_uncore_r2pcie = {
1435         .name           = "r2pcie",
1436         .num_counters   = 4,
1437         .num_boxes      = 1,
1438         .perf_ctr_bits  = 44,
1439         .constraints    = snbep_uncore_r2pcie_constraints,
1440         IVT_UNCORE_PCI_COMMON_INIT(),
1441 };
1442
1443 static struct intel_uncore_type ivt_uncore_r3qpi = {
1444         .name           = "r3qpi",
1445         .num_counters   = 3,
1446         .num_boxes      = 2,
1447         .perf_ctr_bits  = 44,
1448         .constraints    = snbep_uncore_r3qpi_constraints,
1449         IVT_UNCORE_PCI_COMMON_INIT(),
1450 };
1451
1452 enum {
1453         IVT_PCI_UNCORE_HA,
1454         IVT_PCI_UNCORE_IMC,
1455         IVT_PCI_UNCORE_IRP,
1456         IVT_PCI_UNCORE_QPI,
1457         IVT_PCI_UNCORE_R2PCIE,
1458         IVT_PCI_UNCORE_R3QPI,
1459 };
1460
1461 static struct intel_uncore_type *ivt_pci_uncores[] = {
1462         [IVT_PCI_UNCORE_HA]     = &ivt_uncore_ha,
1463         [IVT_PCI_UNCORE_IMC]    = &ivt_uncore_imc,
1464         [IVT_PCI_UNCORE_IRP]    = &ivt_uncore_irp,
1465         [IVT_PCI_UNCORE_QPI]    = &ivt_uncore_qpi,
1466         [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1467         [IVT_PCI_UNCORE_R3QPI]  = &ivt_uncore_r3qpi,
1468         NULL,
1469 };
1470
1471 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1472         { /* Home Agent 0 */
1473                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1474                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1475         },
1476         { /* Home Agent 1 */
1477                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1478                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1479         },
1480         { /* MC0 Channel 0 */
1481                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1482                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1483         },
1484         { /* MC0 Channel 1 */
1485                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1486                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1487         },
1488         { /* MC0 Channel 3 */
1489                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1490                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1491         },
1492         { /* MC0 Channel 4 */
1493                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1494                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1495         },
1496         { /* MC1 Channel 0 */
1497                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1498                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1499         },
1500         { /* MC1 Channel 1 */
1501                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1502                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1503         },
1504         { /* MC1 Channel 3 */
1505                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1506                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1507         },
1508         { /* MC1 Channel 4 */
1509                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1510                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1511         },
1512         { /* IRP */
1513                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1514                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1515         },
1516         { /* QPI0 Port 0 */
1517                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1518                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1519         },
1520         { /* QPI0 Port 1 */
1521                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1522                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1523         },
1524         { /* QPI1 Port 2 */
1525                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1526                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1527         },
1528         { /* R2PCIe */
1529                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1530                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1531         },
1532         { /* R3QPI0 Link 0 */
1533                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1534                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1535         },
1536         { /* R3QPI0 Link 1 */
1537                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1538                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1539         },
1540         { /* R3QPI1 Link 2 */
1541                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1542                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1543         },
1544         { /* QPI Port 0 filter  */
1545                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1546                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1547                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1548         },
1549         { /* QPI Port 0 filter  */
1550                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1551                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1552                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1553         },
1554         { /* end: all zeroes */ }
1555 };
1556
1557 static struct pci_driver ivt_uncore_pci_driver = {
1558         .name           = "ivt_uncore",
1559         .id_table       = ivt_uncore_pci_ids,
1560 };
1561 /* end of IvyTown uncore support */
1562
1563 /* Sandy Bridge uncore support */
1564 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1565 {
1566         struct hw_perf_event *hwc = &event->hw;
1567
1568         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1569                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1570         else
1571                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1572 }
1573
1574 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1575 {
1576         wrmsrl(event->hw.config_base, 0);
1577 }
1578
1579 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1580 {
1581         if (box->pmu->pmu_idx == 0) {
1582                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1583                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1584         }
1585 }
1586
1587 static struct uncore_event_desc snb_uncore_events[] = {
1588         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1589         { /* end: all zeroes */ },
1590 };
1591
1592 static struct attribute *snb_uncore_formats_attr[] = {
1593         &format_attr_event.attr,
1594         &format_attr_umask.attr,
1595         &format_attr_edge.attr,
1596         &format_attr_inv.attr,
1597         &format_attr_cmask5.attr,
1598         NULL,
1599 };
1600
1601 static struct attribute_group snb_uncore_format_group = {
1602         .name           = "format",
1603         .attrs          = snb_uncore_formats_attr,
1604 };
1605
1606 static struct intel_uncore_ops snb_uncore_msr_ops = {
1607         .init_box       = snb_uncore_msr_init_box,
1608         .disable_event  = snb_uncore_msr_disable_event,
1609         .enable_event   = snb_uncore_msr_enable_event,
1610         .read_counter   = uncore_msr_read_counter,
1611 };
1612
1613 static struct event_constraint snb_uncore_cbox_constraints[] = {
1614         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1615         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1616         EVENT_CONSTRAINT_END
1617 };
1618
1619 static struct intel_uncore_type snb_uncore_cbox = {
1620         .name           = "cbox",
1621         .num_counters   = 2,
1622         .num_boxes      = 4,
1623         .perf_ctr_bits  = 44,
1624         .fixed_ctr_bits = 48,
1625         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
1626         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
1627         .fixed_ctr      = SNB_UNC_FIXED_CTR,
1628         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
1629         .single_fixed   = 1,
1630         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
1631         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
1632         .constraints    = snb_uncore_cbox_constraints,
1633         .ops            = &snb_uncore_msr_ops,
1634         .format_group   = &snb_uncore_format_group,
1635         .event_descs    = snb_uncore_events,
1636 };
1637
1638 static struct intel_uncore_type *snb_msr_uncores[] = {
1639         &snb_uncore_cbox,
1640         NULL,
1641 };
1642 /* end of Sandy Bridge uncore support */
1643
1644 /* Nehalem uncore support */
1645 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1646 {
1647         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1648 }
1649
1650 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1651 {
1652         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1653 }
1654
1655 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1656 {
1657         struct hw_perf_event *hwc = &event->hw;
1658
1659         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1660                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1661         else
1662                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1663 }
1664
1665 static struct attribute *nhm_uncore_formats_attr[] = {
1666         &format_attr_event.attr,
1667         &format_attr_umask.attr,
1668         &format_attr_edge.attr,
1669         &format_attr_inv.attr,
1670         &format_attr_cmask8.attr,
1671         NULL,
1672 };
1673
1674 static struct attribute_group nhm_uncore_format_group = {
1675         .name = "format",
1676         .attrs = nhm_uncore_formats_attr,
1677 };
1678
1679 static struct uncore_event_desc nhm_uncore_events[] = {
1680         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
1681         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
1682         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
1683         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
1684         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
1685         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
1686         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1687         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
1688         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
1689         { /* end: all zeroes */ },
1690 };
1691
1692 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1693         .disable_box    = nhm_uncore_msr_disable_box,
1694         .enable_box     = nhm_uncore_msr_enable_box,
1695         .disable_event  = snb_uncore_msr_disable_event,
1696         .enable_event   = nhm_uncore_msr_enable_event,
1697         .read_counter   = uncore_msr_read_counter,
1698 };
1699
1700 static struct intel_uncore_type nhm_uncore = {
1701         .name           = "",
1702         .num_counters   = 8,
1703         .num_boxes      = 1,
1704         .perf_ctr_bits  = 48,
1705         .fixed_ctr_bits = 48,
1706         .event_ctl      = NHM_UNC_PERFEVTSEL0,
1707         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
1708         .fixed_ctr      = NHM_UNC_FIXED_CTR,
1709         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
1710         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
1711         .event_descs    = nhm_uncore_events,
1712         .ops            = &nhm_uncore_msr_ops,
1713         .format_group   = &nhm_uncore_format_group,
1714 };
1715
1716 static struct intel_uncore_type *nhm_msr_uncores[] = {
1717         &nhm_uncore,
1718         NULL,
1719 };
1720 /* end of Nehalem uncore support */
1721
1722 /* Nehalem-EX uncore support */
1723 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1724 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
1725 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
1726 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
1727
1728 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
1729 {
1730         wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
1731 }
1732
1733 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
1734 {
1735         unsigned msr = uncore_msr_box_ctl(box);
1736         u64 config;
1737
1738         if (msr) {
1739                 rdmsrl(msr, config);
1740                 config &= ~((1ULL << uncore_num_counters(box)) - 1);
1741                 /* WBox has a fixed counter */
1742                 if (uncore_msr_fixed_ctl(box))
1743                         config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
1744                 wrmsrl(msr, config);
1745         }
1746 }
1747
1748 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
1749 {
1750         unsigned msr = uncore_msr_box_ctl(box);
1751         u64 config;
1752
1753         if (msr) {
1754                 rdmsrl(msr, config);
1755                 config |= (1ULL << uncore_num_counters(box)) - 1;
1756                 /* WBox has a fixed counter */
1757                 if (uncore_msr_fixed_ctl(box))
1758                         config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
1759                 wrmsrl(msr, config);
1760         }
1761 }
1762
1763 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1764 {
1765         wrmsrl(event->hw.config_base, 0);
1766 }
1767
1768 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1769 {
1770         struct hw_perf_event *hwc = &event->hw;
1771
1772         if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1773                 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1774         else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1775                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1776         else
1777                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1778 }
1779
1780 #define NHMEX_UNCORE_OPS_COMMON_INIT()                          \
1781         .init_box       = nhmex_uncore_msr_init_box,            \
1782         .disable_box    = nhmex_uncore_msr_disable_box,         \
1783         .enable_box     = nhmex_uncore_msr_enable_box,          \
1784         .disable_event  = nhmex_uncore_msr_disable_event,       \
1785         .read_counter   = uncore_msr_read_counter
1786
1787 static struct intel_uncore_ops nhmex_uncore_ops = {
1788         NHMEX_UNCORE_OPS_COMMON_INIT(),
1789         .enable_event   = nhmex_uncore_msr_enable_event,
1790 };
1791
1792 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
1793         &format_attr_event.attr,
1794         &format_attr_edge.attr,
1795         NULL,
1796 };
1797
1798 static struct attribute_group nhmex_uncore_ubox_format_group = {
1799         .name           = "format",
1800         .attrs          = nhmex_uncore_ubox_formats_attr,
1801 };
1802
1803 static struct intel_uncore_type nhmex_uncore_ubox = {
1804         .name           = "ubox",
1805         .num_counters   = 1,
1806         .num_boxes      = 1,
1807         .perf_ctr_bits  = 48,
1808         .event_ctl      = NHMEX_U_MSR_PMON_EV_SEL,
1809         .perf_ctr       = NHMEX_U_MSR_PMON_CTR,
1810         .event_mask     = NHMEX_U_PMON_RAW_EVENT_MASK,
1811         .box_ctl        = NHMEX_U_MSR_PMON_GLOBAL_CTL,
1812         .ops            = &nhmex_uncore_ops,
1813         .format_group   = &nhmex_uncore_ubox_format_group
1814 };
1815
1816 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
1817         &format_attr_event.attr,
1818         &format_attr_umask.attr,
1819         &format_attr_edge.attr,
1820         &format_attr_inv.attr,
1821         &format_attr_thresh8.attr,
1822         NULL,
1823 };
1824
1825 static struct attribute_group nhmex_uncore_cbox_format_group = {
1826         .name = "format",
1827         .attrs = nhmex_uncore_cbox_formats_attr,
1828 };
1829
1830 /* msr offset for each instance of cbox */
1831 static unsigned nhmex_cbox_msr_offsets[] = {
1832         0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1833 };
1834
1835 static struct intel_uncore_type nhmex_uncore_cbox = {
1836         .name                   = "cbox",
1837         .num_counters           = 6,
1838         .num_boxes              = 10,
1839         .perf_ctr_bits          = 48,
1840         .event_ctl              = NHMEX_C0_MSR_PMON_EV_SEL0,
1841         .perf_ctr               = NHMEX_C0_MSR_PMON_CTR0,
1842         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1843         .box_ctl                = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
1844         .msr_offsets            = nhmex_cbox_msr_offsets,
1845         .pair_ctr_ctl           = 1,
1846         .ops                    = &nhmex_uncore_ops,
1847         .format_group           = &nhmex_uncore_cbox_format_group
1848 };
1849
1850 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
1851         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
1852         { /* end: all zeroes */ },
1853 };
1854
1855 static struct intel_uncore_type nhmex_uncore_wbox = {
1856         .name                   = "wbox",
1857         .num_counters           = 4,
1858         .num_boxes              = 1,
1859         .perf_ctr_bits          = 48,
1860         .event_ctl              = NHMEX_W_MSR_PMON_CNT0,
1861         .perf_ctr               = NHMEX_W_MSR_PMON_EVT_SEL0,
1862         .fixed_ctr              = NHMEX_W_MSR_PMON_FIXED_CTR,
1863         .fixed_ctl              = NHMEX_W_MSR_PMON_FIXED_CTL,
1864         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1865         .box_ctl                = NHMEX_W_MSR_GLOBAL_CTL,
1866         .pair_ctr_ctl           = 1,
1867         .event_descs            = nhmex_uncore_wbox_events,
1868         .ops                    = &nhmex_uncore_ops,
1869         .format_group           = &nhmex_uncore_cbox_format_group
1870 };
1871
1872 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1873 {
1874         struct hw_perf_event *hwc = &event->hw;
1875         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1876         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1877         int ctr, ev_sel;
1878
1879         ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
1880                 NHMEX_B_PMON_CTR_SHIFT;
1881         ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
1882                   NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
1883
1884         /* events that do not use the match/mask registers */
1885         if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
1886             (ctr == 2 && ev_sel != 0x4) || ctr == 3)
1887                 return 0;
1888
1889         if (box->pmu->pmu_idx == 0)
1890                 reg1->reg = NHMEX_B0_MSR_MATCH;
1891         else
1892                 reg1->reg = NHMEX_B1_MSR_MATCH;
1893         reg1->idx = 0;
1894         reg1->config = event->attr.config1;
1895         reg2->config = event->attr.config2;
1896         return 0;
1897 }
1898
1899 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1900 {
1901         struct hw_perf_event *hwc = &event->hw;
1902         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1903         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1904
1905         if (reg1->idx != EXTRA_REG_NONE) {
1906                 wrmsrl(reg1->reg, reg1->config);
1907                 wrmsrl(reg1->reg + 1, reg2->config);
1908         }
1909         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1910                 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1911 }
1912
1913 /*
1914  * The Bbox has 4 counters, but each counter monitors different events.
1915  * Use bits 6-7 in the event config to select counter.
1916  */
1917 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1918         EVENT_CONSTRAINT(0 , 1, 0xc0),
1919         EVENT_CONSTRAINT(0x40, 2, 0xc0),
1920         EVENT_CONSTRAINT(0x80, 4, 0xc0),
1921         EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1922         EVENT_CONSTRAINT_END,
1923 };
1924
1925 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1926         &format_attr_event5.attr,
1927         &format_attr_counter.attr,
1928         &format_attr_match.attr,
1929         &format_attr_mask.attr,
1930         NULL,
1931 };
1932
1933 static struct attribute_group nhmex_uncore_bbox_format_group = {
1934         .name = "format",
1935         .attrs = nhmex_uncore_bbox_formats_attr,
1936 };
1937
1938 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1939         NHMEX_UNCORE_OPS_COMMON_INIT(),
1940         .enable_event           = nhmex_bbox_msr_enable_event,
1941         .hw_config              = nhmex_bbox_hw_config,
1942         .get_constraint         = uncore_get_constraint,
1943         .put_constraint         = uncore_put_constraint,
1944 };
1945
1946 static struct intel_uncore_type nhmex_uncore_bbox = {
1947         .name                   = "bbox",
1948         .num_counters           = 4,
1949         .num_boxes              = 2,
1950         .perf_ctr_bits          = 48,
1951         .event_ctl              = NHMEX_B0_MSR_PMON_CTL0,
1952         .perf_ctr               = NHMEX_B0_MSR_PMON_CTR0,
1953         .event_mask             = NHMEX_B_PMON_RAW_EVENT_MASK,
1954         .box_ctl                = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1955         .msr_offset             = NHMEX_B_MSR_OFFSET,
1956         .pair_ctr_ctl           = 1,
1957         .num_shared_regs        = 1,
1958         .constraints            = nhmex_uncore_bbox_constraints,
1959         .ops                    = &nhmex_uncore_bbox_ops,
1960         .format_group           = &nhmex_uncore_bbox_format_group
1961 };
1962
1963 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1964 {
1965         struct hw_perf_event *hwc = &event->hw;
1966         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1967         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1968
1969         /* only TO_R_PROG_EV event uses the match/mask register */
1970         if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1971             NHMEX_S_EVENT_TO_R_PROG_EV)
1972                 return 0;
1973
1974         if (box->pmu->pmu_idx == 0)
1975                 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1976         else
1977                 reg1->reg = NHMEX_S1_MSR_MM_CFG;
1978         reg1->idx = 0;
1979         reg1->config = event->attr.config1;
1980         reg2->config = event->attr.config2;
1981         return 0;
1982 }
1983
1984 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1985 {
1986         struct hw_perf_event *hwc = &event->hw;
1987         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1988         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1989
1990         if (reg1->idx != EXTRA_REG_NONE) {
1991                 wrmsrl(reg1->reg, 0);
1992                 wrmsrl(reg1->reg + 1, reg1->config);
1993                 wrmsrl(reg1->reg + 2, reg2->config);
1994                 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1995         }
1996         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1997 }
1998
1999 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
2000         &format_attr_event.attr,
2001         &format_attr_umask.attr,
2002         &format_attr_edge.attr,
2003         &format_attr_inv.attr,
2004         &format_attr_thresh8.attr,
2005         &format_attr_match.attr,
2006         &format_attr_mask.attr,
2007         NULL,
2008 };
2009
2010 static struct attribute_group nhmex_uncore_sbox_format_group = {
2011         .name                   = "format",
2012         .attrs                  = nhmex_uncore_sbox_formats_attr,
2013 };
2014
2015 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2016         NHMEX_UNCORE_OPS_COMMON_INIT(),
2017         .enable_event           = nhmex_sbox_msr_enable_event,
2018         .hw_config              = nhmex_sbox_hw_config,
2019         .get_constraint         = uncore_get_constraint,
2020         .put_constraint         = uncore_put_constraint,
2021 };
2022
2023 static struct intel_uncore_type nhmex_uncore_sbox = {
2024         .name                   = "sbox",
2025         .num_counters           = 4,
2026         .num_boxes              = 2,
2027         .perf_ctr_bits          = 48,
2028         .event_ctl              = NHMEX_S0_MSR_PMON_CTL0,
2029         .perf_ctr               = NHMEX_S0_MSR_PMON_CTR0,
2030         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2031         .box_ctl                = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2032         .msr_offset             = NHMEX_S_MSR_OFFSET,
2033         .pair_ctr_ctl           = 1,
2034         .num_shared_regs        = 1,
2035         .ops                    = &nhmex_uncore_sbox_ops,
2036         .format_group           = &nhmex_uncore_sbox_format_group
2037 };
2038
2039 enum {
2040         EXTRA_REG_NHMEX_M_FILTER,
2041         EXTRA_REG_NHMEX_M_DSP,
2042         EXTRA_REG_NHMEX_M_ISS,
2043         EXTRA_REG_NHMEX_M_MAP,
2044         EXTRA_REG_NHMEX_M_MSC_THR,
2045         EXTRA_REG_NHMEX_M_PGT,
2046         EXTRA_REG_NHMEX_M_PLD,
2047         EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2048 };
2049
2050 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2051         MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2052         MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2053         MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2054         MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2055         /* event 0xa uses two extra registers */
2056         MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2057         MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2058         MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2059         /* events 0xd ~ 0x10 use the same extra register */
2060         MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2061         MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2062         MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2063         MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2064         MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2065         MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2066         MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2067         MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2068         MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2069         EVENT_EXTRA_END
2070 };
2071
2072 /* Nehalem-EX or Westmere-EX ? */
2073 static bool uncore_nhmex;
2074
2075 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2076 {
2077         struct intel_uncore_extra_reg *er;
2078         unsigned long flags;
2079         bool ret = false;
2080         u64 mask;
2081
2082         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2083                 er = &box->shared_regs[idx];
2084                 raw_spin_lock_irqsave(&er->lock, flags);
2085                 if (!atomic_read(&er->ref) || er->config == config) {
2086                         atomic_inc(&er->ref);
2087                         er->config = config;
2088                         ret = true;
2089                 }
2090                 raw_spin_unlock_irqrestore(&er->lock, flags);
2091
2092                 return ret;
2093         }
2094         /*
2095          * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2096          * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2097          * fields which are shared.
2098          */
2099         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2100         if (WARN_ON_ONCE(idx >= 4))
2101                 return false;
2102
2103         /* mask of the shared fields */
2104         if (uncore_nhmex)
2105                 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2106         else
2107                 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
2108         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2109
2110         raw_spin_lock_irqsave(&er->lock, flags);
2111         /* add mask of the non-shared field if it's in use */
2112         if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2113                 if (uncore_nhmex)
2114                         mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2115                 else
2116                         mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2117         }
2118
2119         if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2120                 atomic_add(1 << (idx * 8), &er->ref);
2121                 if (uncore_nhmex)
2122                         mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2123                                 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2124                 else
2125                         mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2126                                 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2127                 er->config &= ~mask;
2128                 er->config |= (config & mask);
2129                 ret = true;
2130         }
2131         raw_spin_unlock_irqrestore(&er->lock, flags);
2132
2133         return ret;
2134 }
2135
2136 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2137 {
2138         struct intel_uncore_extra_reg *er;
2139
2140         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2141                 er = &box->shared_regs[idx];
2142                 atomic_dec(&er->ref);
2143                 return;
2144         }
2145
2146         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2147         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2148         atomic_sub(1 << (idx * 8), &er->ref);
2149 }
2150
2151 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
2152 {
2153         struct hw_perf_event *hwc = &event->hw;
2154         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2155         u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
2156         u64 config = reg1->config;
2157
2158         /* get the non-shared control bits and shift them */
2159         idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2160         if (uncore_nhmex)
2161                 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2162         else
2163                 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2164         if (new_idx > orig_idx) {
2165                 idx = new_idx - orig_idx;
2166                 config <<= 3 * idx;
2167         } else {
2168                 idx = orig_idx - new_idx;
2169                 config >>= 3 * idx;
2170         }
2171
2172         /* add the shared control bits back */
2173         if (uncore_nhmex)
2174                 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2175         else
2176                 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2177         config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2178         if (modify) {
2179                 /* adjust the main event selector */
2180                 if (new_idx > orig_idx)
2181                         hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2182                 else
2183                         hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2184                 reg1->config = config;
2185                 reg1->idx = ~0xff | new_idx;
2186         }
2187         return config;
2188 }
2189
2190 static struct event_constraint *
2191 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2192 {
2193         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2194         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2195         int i, idx[2], alloc = 0;
2196         u64 config1 = reg1->config;
2197
2198         idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2199         idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2200 again:
2201         for (i = 0; i < 2; i++) {
2202                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2203                         idx[i] = 0xff;
2204
2205                 if (idx[i] == 0xff)
2206                         continue;
2207
2208                 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2209                                 __BITS_VALUE(config1, i, 32)))
2210                         goto fail;
2211                 alloc |= (0x1 << i);
2212         }
2213
2214         /* for the match/mask registers */
2215         if (reg2->idx != EXTRA_REG_NONE &&
2216             (uncore_box_is_fake(box) || !reg2->alloc) &&
2217             !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2218                 goto fail;
2219
2220         /*
2221          * If it's a fake box -- as per validate_{group,event}() we
2222          * shouldn't touch event state and we can avoid doing so
2223          * since both will only call get_event_constraints() once
2224          * on each event, this avoids the need for reg->alloc.
2225          */
2226         if (!uncore_box_is_fake(box)) {
2227                 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2228                         nhmex_mbox_alter_er(event, idx[0], true);
2229                 reg1->alloc |= alloc;
2230                 if (reg2->idx != EXTRA_REG_NONE)
2231                         reg2->alloc = 1;
2232         }
2233         return NULL;
2234 fail:
2235         if (idx[0] != 0xff && !(alloc & 0x1) &&
2236             idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2237                 /*
2238                  * events 0xd ~ 0x10 are functional identical, but are
2239                  * controlled by different fields in the ZDP_CTL_FVC
2240                  * register. If we failed to take one field, try the
2241                  * rest 3 choices.
2242                  */
2243                 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2244                 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2245                 idx[0] = (idx[0] + 1) % 4;
2246                 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2247                 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2248                         config1 = nhmex_mbox_alter_er(event, idx[0], false);
2249                         goto again;
2250                 }
2251         }
2252
2253         if (alloc & 0x1)
2254                 nhmex_mbox_put_shared_reg(box, idx[0]);
2255         if (alloc & 0x2)
2256                 nhmex_mbox_put_shared_reg(box, idx[1]);
2257         return &constraint_empty;
2258 }
2259
2260 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2261 {
2262         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2263         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2264
2265         if (uncore_box_is_fake(box))
2266                 return;
2267
2268         if (reg1->alloc & 0x1)
2269                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2270         if (reg1->alloc & 0x2)
2271                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2272         reg1->alloc = 0;
2273
2274         if (reg2->alloc) {
2275                 nhmex_mbox_put_shared_reg(box, reg2->idx);
2276                 reg2->alloc = 0;
2277         }
2278 }
2279
2280 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
2281 {
2282         if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2283                 return er->idx;
2284         return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
2285 }
2286
2287 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2288 {
2289         struct intel_uncore_type *type = box->pmu->type;
2290         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2291         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2292         struct extra_reg *er;
2293         unsigned msr;
2294         int reg_idx = 0;
2295         /*
2296          * The mbox events may require 2 extra MSRs at the most. But only
2297          * the lower 32 bits in these MSRs are significant, so we can use
2298          * config1 to pass two MSRs' config.
2299          */
2300         for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2301                 if (er->event != (event->hw.config & er->config_mask))
2302                         continue;
2303                 if (event->attr.config1 & ~er->valid_mask)
2304                         return -EINVAL;
2305
2306                 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2307                 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2308                         return -EINVAL;
2309
2310                 /* always use the 32~63 bits to pass the PLD config */
2311                 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2312                         reg_idx = 1;
2313                 else if (WARN_ON_ONCE(reg_idx > 0))
2314                         return -EINVAL;
2315
2316                 reg1->idx &= ~(0xff << (reg_idx * 8));
2317                 reg1->reg &= ~(0xffff << (reg_idx * 16));
2318                 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2319                 reg1->reg |= msr << (reg_idx * 16);
2320                 reg1->config = event->attr.config1;
2321                 reg_idx++;
2322         }
2323         /*
2324          * The mbox only provides ability to perform address matching
2325          * for the PLD events.
2326          */
2327         if (reg_idx == 2) {
2328                 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2329                 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2330                         reg2->config = event->attr.config2;
2331                 else
2332                         reg2->config = ~0ULL;
2333                 if (box->pmu->pmu_idx == 0)
2334                         reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2335                 else
2336                         reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2337         }
2338         return 0;
2339 }
2340
2341 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
2342 {
2343         struct intel_uncore_extra_reg *er;
2344         unsigned long flags;
2345         u64 config;
2346
2347         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2348                 return box->shared_regs[idx].config;
2349
2350         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2351         raw_spin_lock_irqsave(&er->lock, flags);
2352         config = er->config;
2353         raw_spin_unlock_irqrestore(&er->lock, flags);
2354         return config;
2355 }
2356
2357 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2358 {
2359         struct hw_perf_event *hwc = &event->hw;
2360         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2361         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2362         int idx;
2363
2364         idx = __BITS_VALUE(reg1->idx, 0, 8);
2365         if (idx != 0xff)
2366                 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2367                         nhmex_mbox_shared_reg_config(box, idx));
2368         idx = __BITS_VALUE(reg1->idx, 1, 8);
2369         if (idx != 0xff)
2370                 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2371                         nhmex_mbox_shared_reg_config(box, idx));
2372
2373         if (reg2->idx != EXTRA_REG_NONE) {
2374                 wrmsrl(reg2->reg, 0);
2375                 if (reg2->config != ~0ULL) {
2376                         wrmsrl(reg2->reg + 1,
2377                                 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2378                         wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2379                                 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2380                         wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2381                 }
2382         }
2383
2384         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2385 }
2386
2387 DEFINE_UNCORE_FORMAT_ATTR(count_mode,           count_mode,     "config:2-3");
2388 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,         storage_mode,   "config:4-5");
2389 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,            wrap_mode,      "config:6");
2390 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,            flag_mode,      "config:7");
2391 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,              inc_sel,        "config:9-13");
2392 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,         set_flag_sel,   "config:19-21");
2393 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,        filter_cfg_en,  "config2:63");
2394 DEFINE_UNCORE_FORMAT_ATTR(filter_match,         filter_match,   "config2:0-33");
2395 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,          filter_mask,    "config2:34-61");
2396 DEFINE_UNCORE_FORMAT_ATTR(dsp,                  dsp,            "config1:0-31");
2397 DEFINE_UNCORE_FORMAT_ATTR(thr,                  thr,            "config1:0-31");
2398 DEFINE_UNCORE_FORMAT_ATTR(fvc,                  fvc,            "config1:0-31");
2399 DEFINE_UNCORE_FORMAT_ATTR(pgt,                  pgt,            "config1:0-31");
2400 DEFINE_UNCORE_FORMAT_ATTR(map,                  map,            "config1:0-31");
2401 DEFINE_UNCORE_FORMAT_ATTR(iss,                  iss,            "config1:0-31");
2402 DEFINE_UNCORE_FORMAT_ATTR(pld,                  pld,            "config1:32-63");
2403
2404 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2405         &format_attr_count_mode.attr,
2406         &format_attr_storage_mode.attr,
2407         &format_attr_wrap_mode.attr,
2408         &format_attr_flag_mode.attr,
2409         &format_attr_inc_sel.attr,
2410         &format_attr_set_flag_sel.attr,
2411         &format_attr_filter_cfg_en.attr,
2412         &format_attr_filter_match.attr,
2413         &format_attr_filter_mask.attr,
2414         &format_attr_dsp.attr,
2415         &format_attr_thr.attr,
2416         &format_attr_fvc.attr,
2417         &format_attr_pgt.attr,
2418         &format_attr_map.attr,
2419         &format_attr_iss.attr,
2420         &format_attr_pld.attr,
2421         NULL,
2422 };
2423
2424 static struct attribute_group nhmex_uncore_mbox_format_group = {
2425         .name           = "format",
2426         .attrs          = nhmex_uncore_mbox_formats_attr,
2427 };
2428
2429 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2430         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2431         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2432         { /* end: all zeroes */ },
2433 };
2434
2435 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2436         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2437         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2438         { /* end: all zeroes */ },
2439 };
2440
2441 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2442         NHMEX_UNCORE_OPS_COMMON_INIT(),
2443         .enable_event   = nhmex_mbox_msr_enable_event,
2444         .hw_config      = nhmex_mbox_hw_config,
2445         .get_constraint = nhmex_mbox_get_constraint,
2446         .put_constraint = nhmex_mbox_put_constraint,
2447 };
2448
2449 static struct intel_uncore_type nhmex_uncore_mbox = {
2450         .name                   = "mbox",
2451         .num_counters           = 6,
2452         .num_boxes              = 2,
2453         .perf_ctr_bits          = 48,
2454         .event_ctl              = NHMEX_M0_MSR_PMU_CTL0,
2455         .perf_ctr               = NHMEX_M0_MSR_PMU_CNT0,
2456         .event_mask             = NHMEX_M_PMON_RAW_EVENT_MASK,
2457         .box_ctl                = NHMEX_M0_MSR_GLOBAL_CTL,
2458         .msr_offset             = NHMEX_M_MSR_OFFSET,
2459         .pair_ctr_ctl           = 1,
2460         .num_shared_regs        = 8,
2461         .event_descs            = nhmex_uncore_mbox_events,
2462         .ops                    = &nhmex_uncore_mbox_ops,
2463         .format_group           = &nhmex_uncore_mbox_format_group,
2464 };
2465
2466 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
2467 {
2468         struct hw_perf_event *hwc = &event->hw;
2469         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2470
2471         /* adjust the main event selector and extra register index */
2472         if (reg1->idx % 2) {
2473                 reg1->idx--;
2474                 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2475         } else {
2476                 reg1->idx++;
2477                 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2478         }
2479
2480         /* adjust extra register config */
2481         switch (reg1->idx % 6) {
2482         case 2:
2483                 /* shift the 8~15 bits to the 0~7 bits */
2484                 reg1->config >>= 8;
2485                 break;
2486         case 3:
2487                 /* shift the 0~7 bits to the 8~15 bits */
2488                 reg1->config <<= 8;
2489                 break;
2490         };
2491 }
2492
2493 /*
2494  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2495  * An event set consists of 6 events, the 3rd and 4th events in
2496  * an event set use the same extra register. So an event set uses
2497  * 5 extra registers.
2498  */
2499 static struct event_constraint *
2500 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2501 {
2502         struct hw_perf_event *hwc = &event->hw;
2503         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2504         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2505         struct intel_uncore_extra_reg *er;
2506         unsigned long flags;
2507         int idx, er_idx;
2508         u64 config1;
2509         bool ok = false;
2510
2511         if (!uncore_box_is_fake(box) && reg1->alloc)
2512                 return NULL;
2513
2514         idx = reg1->idx % 6;
2515         config1 = reg1->config;
2516 again:
2517         er_idx = idx;
2518         /* the 3rd and 4th events use the same extra register */
2519         if (er_idx > 2)
2520                 er_idx--;
2521         er_idx += (reg1->idx / 6) * 5;
2522
2523         er = &box->shared_regs[er_idx];
2524         raw_spin_lock_irqsave(&er->lock, flags);
2525         if (idx < 2) {
2526                 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2527                         atomic_inc(&er->ref);
2528                         er->config = reg1->config;
2529                         ok = true;
2530                 }
2531         } else if (idx == 2 || idx == 3) {
2532                 /*
2533                  * these two events use different fields in a extra register,
2534                  * the 0~7 bits and the 8~15 bits respectively.
2535                  */
2536                 u64 mask = 0xff << ((idx - 2) * 8);
2537                 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2538                                 !((er->config ^ config1) & mask)) {
2539                         atomic_add(1 << ((idx - 2) * 8), &er->ref);
2540                         er->config &= ~mask;
2541                         er->config |= config1 & mask;
2542                         ok = true;
2543                 }
2544         } else {
2545                 if (!atomic_read(&er->ref) ||
2546                                 (er->config == (hwc->config >> 32) &&
2547                                  er->config1 == reg1->config &&
2548                                  er->config2 == reg2->config)) {
2549                         atomic_inc(&er->ref);
2550                         er->config = (hwc->config >> 32);
2551                         er->config1 = reg1->config;
2552                         er->config2 = reg2->config;
2553                         ok = true;
2554                 }
2555         }
2556         raw_spin_unlock_irqrestore(&er->lock, flags);
2557
2558         if (!ok) {
2559                 /*
2560                  * The Rbox events are always in pairs. The paired
2561                  * events are functional identical, but use different
2562                  * extra registers. If we failed to take an extra
2563                  * register, try the alternative.
2564                  */
2565                 if (idx % 2)
2566                         idx--;
2567                 else
2568                         idx++;
2569                 if (idx != reg1->idx % 6) {
2570                         if (idx == 2)
2571                                 config1 >>= 8;
2572                         else if (idx == 3)
2573                                 config1 <<= 8;
2574                         goto again;
2575                 }
2576         } else {
2577                 if (!uncore_box_is_fake(box)) {
2578                         if (idx != reg1->idx % 6)
2579                                 nhmex_rbox_alter_er(box, event);
2580                         reg1->alloc = 1;
2581                 }
2582                 return NULL;
2583         }
2584         return &constraint_empty;
2585 }
2586
2587 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2588 {
2589         struct intel_uncore_extra_reg *er;
2590         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2591         int idx, er_idx;
2592
2593         if (uncore_box_is_fake(box) || !reg1->alloc)
2594                 return;
2595
2596         idx = reg1->idx % 6;
2597         er_idx = idx;
2598         if (er_idx > 2)
2599                 er_idx--;
2600         er_idx += (reg1->idx / 6) * 5;
2601
2602         er = &box->shared_regs[er_idx];
2603         if (idx == 2 || idx == 3)
2604                 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2605         else
2606                 atomic_dec(&er->ref);
2607
2608         reg1->alloc = 0;
2609 }
2610
2611 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2612 {
2613         struct hw_perf_event *hwc = &event->hw;
2614         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2615         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2616         int idx;
2617
2618         idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
2619                 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2620         if (idx >= 0x18)
2621                 return -EINVAL;
2622
2623         reg1->idx = idx;
2624         reg1->config = event->attr.config1;
2625
2626         switch (idx % 6) {
2627         case 4:
2628         case 5:
2629                 hwc->config |= event->attr.config & (~0ULL << 32);
2630                 reg2->config = event->attr.config2;
2631                 break;
2632         };
2633         return 0;
2634 }
2635
2636 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2637 {
2638         struct hw_perf_event *hwc = &event->hw;
2639         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2640         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2641         int idx, port;
2642
2643         idx = reg1->idx;
2644         port = idx / 6 + box->pmu->pmu_idx * 4;
2645
2646         switch (idx % 6) {
2647         case 0:
2648                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
2649                 break;
2650         case 1:
2651                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
2652                 break;
2653         case 2:
2654         case 3:
2655                 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
2656                         uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
2657                 break;
2658         case 4:
2659                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
2660                         hwc->config >> 32);
2661                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
2662                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
2663                 break;
2664         case 5:
2665                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
2666                         hwc->config >> 32);
2667                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
2668                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
2669                 break;
2670         };
2671
2672         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2673                 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
2674 }
2675
2676 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
2677 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
2678 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
2679 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
2680 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
2681
2682 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
2683         &format_attr_event5.attr,
2684         &format_attr_xbr_mm_cfg.attr,
2685         &format_attr_xbr_match.attr,
2686         &format_attr_xbr_mask.attr,
2687         &format_attr_qlx_cfg.attr,
2688         &format_attr_iperf_cfg.attr,
2689         NULL,
2690 };
2691
2692 static struct attribute_group nhmex_uncore_rbox_format_group = {
2693         .name = "format",
2694         .attrs = nhmex_uncore_rbox_formats_attr,
2695 };
2696
2697 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
2698         INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,         "event=0x0,iperf_cfg=0x80000000"),
2699         INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,         "event=0x6,iperf_cfg=0x80000000"),
2700         INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,         "event=0x0,iperf_cfg=0x40000000"),
2701         INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,         "event=0x6,iperf_cfg=0x40000000"),
2702         INTEL_UNCORE_EVENT_DESC(qpi0_date_response,     "event=0x0,iperf_cfg=0xc4"),
2703         INTEL_UNCORE_EVENT_DESC(qpi1_date_response,     "event=0x6,iperf_cfg=0xc4"),
2704         { /* end: all zeroes */ },
2705 };
2706
2707 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
2708         NHMEX_UNCORE_OPS_COMMON_INIT(),
2709         .enable_event           = nhmex_rbox_msr_enable_event,
2710         .hw_config              = nhmex_rbox_hw_config,
2711         .get_constraint         = nhmex_rbox_get_constraint,
2712         .put_constraint         = nhmex_rbox_put_constraint,
2713 };
2714
2715 static struct intel_uncore_type nhmex_uncore_rbox = {
2716         .name                   = "rbox",
2717         .num_counters           = 8,
2718         .num_boxes              = 2,
2719         .perf_ctr_bits          = 48,
2720         .event_ctl              = NHMEX_R_MSR_PMON_CTL0,
2721         .perf_ctr               = NHMEX_R_MSR_PMON_CNT0,
2722         .event_mask             = NHMEX_R_PMON_RAW_EVENT_MASK,
2723         .box_ctl                = NHMEX_R_MSR_GLOBAL_CTL,
2724         .msr_offset             = NHMEX_R_MSR_OFFSET,
2725         .pair_ctr_ctl           = 1,
2726         .num_shared_regs        = 20,
2727         .event_descs            = nhmex_uncore_rbox_events,
2728         .ops                    = &nhmex_uncore_rbox_ops,
2729         .format_group           = &nhmex_uncore_rbox_format_group
2730 };
2731
2732 static struct intel_uncore_type *nhmex_msr_uncores[] = {
2733         &nhmex_uncore_ubox,
2734         &nhmex_uncore_cbox,
2735         &nhmex_uncore_bbox,
2736         &nhmex_uncore_sbox,
2737         &nhmex_uncore_mbox,
2738         &nhmex_uncore_rbox,
2739         &nhmex_uncore_wbox,
2740         NULL,
2741 };
2742 /* end of Nehalem-EX uncore support */
2743
2744 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
2745 {
2746         struct hw_perf_event *hwc = &event->hw;
2747
2748         hwc->idx = idx;
2749         hwc->last_tag = ++box->tags[idx];
2750
2751         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
2752                 hwc->event_base = uncore_fixed_ctr(box);
2753                 hwc->config_base = uncore_fixed_ctl(box);
2754                 return;
2755         }
2756
2757         hwc->config_base = uncore_event_ctl(box, hwc->idx);
2758         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
2759 }
2760
2761 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
2762 {
2763         u64 prev_count, new_count, delta;
2764         int shift;
2765
2766         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
2767                 shift = 64 - uncore_fixed_ctr_bits(box);
2768         else
2769                 shift = 64 - uncore_perf_ctr_bits(box);
2770
2771         /* the hrtimer might modify the previous event value */
2772 again:
2773         prev_count = local64_read(&event->hw.prev_count);
2774         new_count = uncore_read_counter(box, event);
2775         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2776                 goto again;
2777
2778         delta = (new_count << shift) - (prev_count << shift);
2779         delta >>= shift;
2780
2781         local64_add(delta, &event->count);
2782 }
2783
2784 /*
2785  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2786  * for SandyBridge. So we use hrtimer to periodically poll the counter
2787  * to avoid overflow.
2788  */
2789 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
2790 {
2791         struct intel_uncore_box *box;
2792         unsigned long flags;
2793         int bit;
2794
2795         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
2796         if (!box->n_active || box->cpu != smp_processor_id())
2797                 return HRTIMER_NORESTART;
2798         /*
2799          * disable local interrupt to prevent uncore_pmu_event_start/stop
2800          * to interrupt the update process
2801          */
2802         local_irq_save(flags);
2803
2804         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
2805                 uncore_perf_event_update(box, box->events[bit]);
2806
2807         local_irq_restore(flags);
2808
2809         hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
2810         return HRTIMER_RESTART;
2811 }
2812
2813 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
2814 {
2815         __hrtimer_start_range_ns(&box->hrtimer,
2816                         ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
2817                         HRTIMER_MODE_REL_PINNED, 0);
2818 }
2819
2820 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
2821 {
2822         hrtimer_cancel(&box->hrtimer);
2823 }
2824
2825 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2826 {
2827         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2828         box->hrtimer.function = uncore_pmu_hrtimer;
2829 }
2830
2831 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
2832 {
2833         struct intel_uncore_box *box;
2834         int i, size;
2835
2836         size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2837
2838         box = kzalloc_node(size, GFP_KERNEL, node);
2839         if (!box)
2840                 return NULL;
2841
2842         for (i = 0; i < type->num_shared_regs; i++)
2843                 raw_spin_lock_init(&box->shared_regs[i].lock);
2844
2845         uncore_pmu_init_hrtimer(box);
2846         atomic_set(&box->refcnt, 1);
2847         box->cpu = -1;
2848         box->phys_id = -1;
2849
2850         return box;
2851 }
2852
2853 static struct intel_uncore_box *
2854 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
2855 {
2856         struct intel_uncore_box *box;
2857
2858         box = *per_cpu_ptr(pmu->box, cpu);
2859         if (box)
2860                 return box;
2861
2862         raw_spin_lock(&uncore_box_lock);
2863         list_for_each_entry(box, &pmu->box_list, list) {
2864                 if (box->phys_id == topology_physical_package_id(cpu)) {
2865                         atomic_inc(&box->refcnt);
2866                         *per_cpu_ptr(pmu->box, cpu) = box;
2867                         break;
2868                 }
2869         }
2870         raw_spin_unlock(&uncore_box_lock);
2871
2872         return *per_cpu_ptr(pmu->box, cpu);
2873 }
2874
2875 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
2876 {
2877         return container_of(event->pmu, struct intel_uncore_pmu, pmu);
2878 }
2879
2880 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
2881 {
2882         /*
2883          * perf core schedules event on the basis of cpu, uncore events are
2884          * collected by one of the cpus inside a physical package.
2885          */
2886         return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
2887 }
2888
2889 static int
2890 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
2891 {
2892         struct perf_event *event;
2893         int n, max_count;
2894
2895         max_count = box->pmu->type->num_counters;
2896         if (box->pmu->type->fixed_ctl)
2897                 max_count++;
2898
2899         if (box->n_events >= max_count)
2900                 return -EINVAL;
2901
2902         n = box->n_events;
2903         box->event_list[n] = leader;
2904         n++;
2905         if (!dogrp)
2906                 return n;
2907
2908         list_for_each_entry(event, &leader->sibling_list, group_entry) {
2909                 if (event->state <= PERF_EVENT_STATE_OFF)
2910                         continue;
2911
2912                 if (n >= max_count)
2913                         return -EINVAL;
2914
2915                 box->event_list[n] = event;
2916                 n++;
2917         }
2918         return n;
2919 }
2920
2921 static struct event_constraint *
2922 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2923 {
2924         struct intel_uncore_type *type = box->pmu->type;
2925         struct event_constraint *c;
2926
2927         if (type->ops->get_constraint) {
2928                 c = type->ops->get_constraint(box, event);
2929                 if (c)
2930                         return c;
2931         }
2932
2933         if (event->attr.config == UNCORE_FIXED_EVENT)
2934                 return &constraint_fixed;
2935
2936         if (type->constraints) {
2937                 for_each_event_constraint(c, type->constraints) {
2938                         if ((event->hw.config & c->cmask) == c->code)
2939                                 return c;
2940                 }
2941         }
2942
2943         return &type->unconstrainted;
2944 }
2945
2946 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2947 {
2948         if (box->pmu->type->ops->put_constraint)
2949                 box->pmu->type->ops->put_constraint(box, event);
2950 }
2951
2952 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
2953 {
2954         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2955         struct event_constraint *c;
2956         int i, wmin, wmax, ret = 0;
2957         struct hw_perf_event *hwc;
2958
2959         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2960
2961         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
2962                 hwc = &box->event_list[i]->hw;
2963                 c = uncore_get_event_constraint(box, box->event_list[i]);
2964                 hwc->constraint = c;
2965                 wmin = min(wmin, c->weight);
2966                 wmax = max(wmax, c->weight);
2967         }
2968
2969         /* fastpath, try to reuse previous register */
2970         for (i = 0; i < n; i++) {
2971                 hwc = &box->event_list[i]->hw;
2972                 c = hwc->constraint;
2973
2974                 /* never assigned */
2975                 if (hwc->idx == -1)
2976                         break;
2977
2978                 /* constraint still honored */
2979                 if (!test_bit(hwc->idx, c->idxmsk))
2980                         break;
2981
2982                 /* not already used */
2983                 if (test_bit(hwc->idx, used_mask))
2984                         break;
2985
2986                 __set_bit(hwc->idx, used_mask);
2987                 if (assign)
2988                         assign[i] = hwc->idx;
2989         }
2990         /* slow path */
2991         if (i != n)
2992                 ret = perf_assign_events(box->event_list, n,
2993                                          wmin, wmax, assign);
2994
2995         if (!assign || ret) {
2996                 for (i = 0; i < n; i++)
2997                         uncore_put_event_constraint(box, box->event_list[i]);
2998         }
2999         return ret ? -EINVAL : 0;
3000 }
3001
3002 static void uncore_pmu_event_start(struct perf_event *event, int flags)
3003 {
3004         struct intel_uncore_box *box = uncore_event_to_box(event);
3005         int idx = event->hw.idx;
3006
3007         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3008                 return;
3009
3010         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3011                 return;
3012
3013         event->hw.state = 0;
3014         box->events[idx] = event;
3015         box->n_active++;
3016         __set_bit(idx, box->active_mask);
3017
3018         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3019         uncore_enable_event(box, event);
3020
3021         if (box->n_active == 1) {
3022                 uncore_enable_box(box);
3023                 uncore_pmu_start_hrtimer(box);
3024         }
3025 }
3026
3027 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3028 {
3029         struct intel_uncore_box *box = uncore_event_to_box(event);
3030         struct hw_perf_event *hwc = &event->hw;
3031
3032         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3033                 uncore_disable_event(box, event);
3034                 box->n_active--;
3035                 box->events[hwc->idx] = NULL;
3036                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3037                 hwc->state |= PERF_HES_STOPPED;
3038
3039                 if (box->n_active == 0) {
3040                         uncore_disable_box(box);
3041                         uncore_pmu_cancel_hrtimer(box);
3042                 }
3043         }
3044
3045         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3046                 /*
3047                  * Drain the remaining delta count out of a event
3048                  * that we are disabling:
3049                  */
3050                 uncore_perf_event_update(box, event);
3051                 hwc->state |= PERF_HES_UPTODATE;
3052         }
3053 }
3054
3055 static int uncore_pmu_event_add(struct perf_event *event, int flags)
3056 {
3057         struct intel_uncore_box *box = uncore_event_to_box(event);
3058         struct hw_perf_event *hwc = &event->hw;
3059         int assign[UNCORE_PMC_IDX_MAX];
3060         int i, n, ret;
3061
3062         if (!box)
3063                 return -ENODEV;
3064
3065         ret = n = uncore_collect_events(box, event, false);
3066         if (ret < 0)
3067                 return ret;
3068
3069         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3070         if (!(flags & PERF_EF_START))
3071                 hwc->state |= PERF_HES_ARCH;
3072
3073         ret = uncore_assign_events(box, assign, n);
3074         if (ret)
3075                 return ret;
3076
3077         /* save events moving to new counters */
3078         for (i = 0; i < box->n_events; i++) {
3079                 event = box->event_list[i];
3080                 hwc = &event->hw;
3081
3082                 if (hwc->idx == assign[i] &&
3083                         hwc->last_tag == box->tags[assign[i]])
3084                         continue;
3085                 /*
3086                  * Ensure we don't accidentally enable a stopped
3087                  * counter simply because we rescheduled.
3088                  */
3089                 if (hwc->state & PERF_HES_STOPPED)
3090                         hwc->state |= PERF_HES_ARCH;
3091
3092                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3093         }
3094
3095         /* reprogram moved events into new counters */
3096         for (i = 0; i < n; i++) {
3097                 event = box->event_list[i];
3098                 hwc = &event->hw;
3099
3100                 if (hwc->idx != assign[i] ||
3101                         hwc->last_tag != box->tags[assign[i]])
3102                         uncore_assign_hw_event(box, event, assign[i]);
3103                 else if (i < box->n_events)
3104                         continue;
3105
3106                 if (hwc->state & PERF_HES_ARCH)
3107                         continue;
3108
3109                 uncore_pmu_event_start(event, 0);
3110         }
3111         box->n_events = n;
3112
3113         return 0;
3114 }
3115
3116 static void uncore_pmu_event_del(struct perf_event *event, int flags)
3117 {
3118         struct intel_uncore_box *box = uncore_event_to_box(event);
3119         int i;
3120
3121         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3122
3123         for (i = 0; i < box->n_events; i++) {
3124                 if (event == box->event_list[i]) {
3125                         uncore_put_event_constraint(box, event);
3126
3127                         while (++i < box->n_events)
3128                                 box->event_list[i - 1] = box->event_list[i];
3129
3130                         --box->n_events;
3131                         break;
3132                 }
3133         }
3134
3135         event->hw.idx = -1;
3136         event->hw.last_tag = ~0ULL;
3137 }
3138
3139 static void uncore_pmu_event_read(struct perf_event *event)
3140 {
3141         struct intel_uncore_box *box = uncore_event_to_box(event);
3142         uncore_perf_event_update(box, event);
3143 }
3144
3145 /*
3146  * validation ensures the group can be loaded onto the
3147  * PMU if it was the only group available.
3148  */
3149 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3150                                 struct perf_event *event)
3151 {
3152         struct perf_event *leader = event->group_leader;
3153         struct intel_uncore_box *fake_box;
3154         int ret = -EINVAL, n;
3155
3156         fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3157         if (!fake_box)
3158                 return -ENOMEM;
3159
3160         fake_box->pmu = pmu;
3161         /*
3162          * the event is not yet connected with its
3163          * siblings therefore we must first collect
3164          * existing siblings, then add the new event
3165          * before we can simulate the scheduling
3166          */
3167         n = uncore_collect_events(fake_box, leader, true);
3168         if (n < 0)
3169                 goto out;
3170
3171         fake_box->n_events = n;
3172         n = uncore_collect_events(fake_box, event, false);
3173         if (n < 0)
3174                 goto out;
3175
3176         fake_box->n_events = n;
3177
3178         ret = uncore_assign_events(fake_box, NULL, n);
3179 out:
3180         kfree(fake_box);
3181         return ret;
3182 }
3183
3184 static int uncore_pmu_event_init(struct perf_event *event)
3185 {
3186         struct intel_uncore_pmu *pmu;
3187         struct intel_uncore_box *box;
3188         struct hw_perf_event *hwc = &event->hw;
3189         int ret;
3190
3191         if (event->attr.type != event->pmu->type)
3192                 return -ENOENT;
3193
3194         pmu = uncore_event_to_pmu(event);
3195         /* no device found for this pmu */
3196         if (pmu->func_id < 0)
3197                 return -ENOENT;
3198
3199         /*
3200          * Uncore PMU does measure at all privilege level all the time.
3201          * So it doesn't make sense to specify any exclude bits.
3202          */
3203         if (event->attr.exclude_user || event->attr.exclude_kernel ||
3204                         event->attr.exclude_hv || event->attr.exclude_idle)
3205                 return -EINVAL;
3206
3207         /* Sampling not supported yet */
3208         if (hwc->sample_period)
3209                 return -EINVAL;
3210
3211         /*
3212          * Place all uncore events for a particular physical package
3213          * onto a single cpu
3214          */
3215         if (event->cpu < 0)
3216                 return -EINVAL;
3217         box = uncore_pmu_to_box(pmu, event->cpu);
3218         if (!box || box->cpu < 0)
3219                 return -EINVAL;
3220         event->cpu = box->cpu;
3221
3222         event->hw.idx = -1;
3223         event->hw.last_tag = ~0ULL;
3224         event->hw.extra_reg.idx = EXTRA_REG_NONE;
3225         event->hw.branch_reg.idx = EXTRA_REG_NONE;
3226
3227         if (event->attr.config == UNCORE_FIXED_EVENT) {
3228                 /* no fixed counter */
3229                 if (!pmu->type->fixed_ctl)
3230                         return -EINVAL;
3231                 /*
3232                  * if there is only one fixed counter, only the first pmu
3233                  * can access the fixed counter
3234                  */
3235                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3236                         return -EINVAL;
3237
3238                 /* fixed counters have event field hardcoded to zero */
3239                 hwc->config = 0ULL;
3240         } else {
3241                 hwc->config = event->attr.config & pmu->type->event_mask;
3242                 if (pmu->type->ops->hw_config) {
3243                         ret = pmu->type->ops->hw_config(box, event);
3244                         if (ret)
3245                                 return ret;
3246                 }
3247         }
3248
3249         if (event->group_leader != event)
3250                 ret = uncore_validate_group(pmu, event);
3251         else
3252                 ret = 0;
3253
3254         return ret;
3255 }
3256
3257 static ssize_t uncore_get_attr_cpumask(struct device *dev,
3258                                 struct device_attribute *attr, char *buf)
3259 {
3260         int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3261
3262         buf[n++] = '\n';
3263         buf[n] = '\0';
3264         return n;
3265 }
3266
3267 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3268
3269 static struct attribute *uncore_pmu_attrs[] = {
3270         &dev_attr_cpumask.attr,
3271         NULL,
3272 };
3273
3274 static struct attribute_group uncore_pmu_attr_group = {
3275         .attrs = uncore_pmu_attrs,
3276 };
3277
3278 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3279 {
3280         int ret;
3281
3282         pmu->pmu = (struct pmu) {
3283                 .attr_groups    = pmu->type->attr_groups,
3284                 .task_ctx_nr    = perf_invalid_context,
3285                 .event_init     = uncore_pmu_event_init,
3286                 .add            = uncore_pmu_event_add,
3287                 .del            = uncore_pmu_event_del,
3288                 .start          = uncore_pmu_event_start,
3289                 .stop           = uncore_pmu_event_stop,
3290                 .read           = uncore_pmu_event_read,
3291         };
3292
3293         if (pmu->type->num_boxes == 1) {
3294                 if (strlen(pmu->type->name) > 0)
3295                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
3296                 else
3297                         sprintf(pmu->name, "uncore");
3298         } else {
3299                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3300                         pmu->pmu_idx);
3301         }
3302
3303         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3304         return ret;
3305 }
3306
3307 static void __init uncore_type_exit(struct intel_uncore_type *type)
3308 {
3309         int i;
3310
3311         for (i = 0; i < type->num_boxes; i++)
3312                 free_percpu(type->pmus[i].box);
3313         kfree(type->pmus);
3314         type->pmus = NULL;
3315         kfree(type->events_group);
3316         type->events_group = NULL;
3317 }
3318
3319 static void __init uncore_types_exit(struct intel_uncore_type **types)
3320 {
3321         int i;
3322         for (i = 0; types[i]; i++)
3323                 uncore_type_exit(types[i]);
3324 }
3325
3326 static int __init uncore_type_init(struct intel_uncore_type *type)
3327 {
3328         struct intel_uncore_pmu *pmus;
3329         struct attribute_group *attr_group;
3330         struct attribute **attrs;
3331         int i, j;
3332
3333         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3334         if (!pmus)
3335                 return -ENOMEM;
3336
3337         type->pmus = pmus;
3338
3339         type->unconstrainted = (struct event_constraint)
3340                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3341                                 0, type->num_counters, 0, 0);
3342
3343         for (i = 0; i < type->num_boxes; i++) {
3344                 pmus[i].func_id = -1;
3345                 pmus[i].pmu_idx = i;
3346                 pmus[i].type = type;
3347                 INIT_LIST_HEAD(&pmus[i].box_list);
3348                 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3349                 if (!pmus[i].box)
3350                         goto fail;
3351         }
3352
3353         if (type->event_descs) {
3354                 i = 0;
3355                 while (type->event_descs[i].attr.attr.name)
3356                         i++;
3357
3358                 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3359                                         sizeof(*attr_group), GFP_KERNEL);
3360                 if (!attr_group)
3361                         goto fail;
3362
3363                 attrs = (struct attribute **)(attr_group + 1);
3364                 attr_group->name = "events";
3365                 attr_group->attrs = attrs;
3366
3367                 for (j = 0; j < i; j++)
3368                         attrs[j] = &type->event_descs[j].attr.attr;
3369
3370                 type->events_group = attr_group;
3371         }
3372
3373         type->pmu_group = &uncore_pmu_attr_group;
3374         return 0;
3375 fail:
3376         uncore_type_exit(type);
3377         return -ENOMEM;
3378 }
3379
3380 static int __init uncore_types_init(struct intel_uncore_type **types)
3381 {
3382         int i, ret;
3383
3384         for (i = 0; types[i]; i++) {
3385                 ret = uncore_type_init(types[i]);
3386                 if (ret)
3387                         goto fail;
3388         }
3389         return 0;
3390 fail:
3391         while (--i >= 0)
3392                 uncore_type_exit(types[i]);
3393         return ret;
3394 }
3395
3396 static struct pci_driver *uncore_pci_driver;
3397 static bool pcidrv_registered;
3398
3399 /*
3400  * add a pci uncore device
3401  */
3402 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3403 {
3404         struct intel_uncore_pmu *pmu;
3405         struct intel_uncore_box *box;
3406         struct intel_uncore_type *type;
3407         int phys_id;
3408
3409         phys_id = pcibus_to_physid[pdev->bus->number];
3410         if (phys_id < 0)
3411                 return -ENODEV;
3412
3413         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3414                 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3415                 pci_set_drvdata(pdev, NULL);
3416                 return 0;
3417         }
3418
3419         type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3420         box = uncore_alloc_box(type, NUMA_NO_NODE);
3421         if (!box)
3422                 return -ENOMEM;
3423
3424         /*
3425          * for performance monitoring unit with multiple boxes,
3426          * each box has a different function id.
3427          */
3428         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3429         if (pmu->func_id < 0)
3430                 pmu->func_id = pdev->devfn;
3431         else
3432                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3433
3434         box->phys_id = phys_id;
3435         box->pci_dev = pdev;
3436         box->pmu = pmu;
3437         uncore_box_init(box);
3438         pci_set_drvdata(pdev, box);
3439
3440         raw_spin_lock(&uncore_box_lock);
3441         list_add_tail(&box->list, &pmu->box_list);
3442         raw_spin_unlock(&uncore_box_lock);
3443
3444         return 0;
3445 }
3446
3447 static void uncore_pci_remove(struct pci_dev *pdev)
3448 {
3449         struct intel_uncore_box *box = pci_get_drvdata(pdev);
3450         struct intel_uncore_pmu *pmu;
3451         int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3452
3453         box = pci_get_drvdata(pdev);
3454         if (!box) {
3455                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3456                         if (extra_pci_dev[phys_id][i] == pdev) {
3457                                 extra_pci_dev[phys_id][i] = NULL;
3458                                 break;
3459                         }
3460                 }
3461                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3462                 return;
3463         }
3464
3465         pmu = box->pmu;
3466         if (WARN_ON_ONCE(phys_id != box->phys_id))
3467                 return;
3468
3469         pci_set_drvdata(pdev, NULL);
3470
3471         raw_spin_lock(&uncore_box_lock);
3472         list_del(&box->list);
3473         raw_spin_unlock(&uncore_box_lock);
3474
3475         for_each_possible_cpu(cpu) {
3476                 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3477                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3478                         atomic_dec(&box->refcnt);
3479                 }
3480         }
3481
3482         WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3483         kfree(box);
3484 }
3485
3486 static int __init uncore_pci_init(void)
3487 {
3488         int ret;
3489
3490         switch (boot_cpu_data.x86_model) {
3491         case 45: /* Sandy Bridge-EP */
3492                 ret = snbep_pci2phy_map_init(0x3ce0);
3493                 if (ret)
3494                         return ret;
3495                 pci_uncores = snbep_pci_uncores;
3496                 uncore_pci_driver = &snbep_uncore_pci_driver;
3497                 break;
3498         case 62: /* IvyTown */
3499                 ret = snbep_pci2phy_map_init(0x0e1e);
3500                 if (ret)
3501                         return ret;
3502                 pci_uncores = ivt_pci_uncores;
3503                 uncore_pci_driver = &ivt_uncore_pci_driver;
3504                 break;
3505         default:
3506                 return 0;
3507         }
3508
3509         ret = uncore_types_init(pci_uncores);
3510         if (ret)
3511                 return ret;
3512
3513         uncore_pci_driver->probe = uncore_pci_probe;
3514         uncore_pci_driver->remove = uncore_pci_remove;
3515
3516         ret = pci_register_driver(uncore_pci_driver);
3517         if (ret == 0)
3518                 pcidrv_registered = true;
3519         else
3520                 uncore_types_exit(pci_uncores);
3521
3522         return ret;
3523 }
3524
3525 static void __init uncore_pci_exit(void)
3526 {
3527         if (pcidrv_registered) {
3528                 pcidrv_registered = false;
3529                 pci_unregister_driver(uncore_pci_driver);
3530                 uncore_types_exit(pci_uncores);
3531         }
3532 }
3533
3534 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3535 static LIST_HEAD(boxes_to_free);
3536
3537 static void uncore_kfree_boxes(void)
3538 {
3539         struct intel_uncore_box *box;
3540
3541         while (!list_empty(&boxes_to_free)) {
3542                 box = list_entry(boxes_to_free.next,
3543                                  struct intel_uncore_box, list);
3544                 list_del(&box->list);
3545                 kfree(box);
3546         }
3547 }
3548
3549 static void uncore_cpu_dying(int cpu)
3550 {
3551         struct intel_uncore_type *type;
3552         struct intel_uncore_pmu *pmu;
3553         struct intel_uncore_box *box;
3554         int i, j;
3555
3556         for (i = 0; msr_uncores[i]; i++) {
3557                 type = msr_uncores[i];
3558                 for (j = 0; j < type->num_boxes; j++) {
3559                         pmu = &type->pmus[j];
3560                         box = *per_cpu_ptr(pmu->box, cpu);
3561                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3562                         if (box && atomic_dec_and_test(&box->refcnt))
3563                                 list_add(&box->list, &boxes_to_free);
3564                 }
3565         }
3566 }
3567
3568 static int uncore_cpu_starting(int cpu)
3569 {
3570         struct intel_uncore_type *type;
3571         struct intel_uncore_pmu *pmu;
3572         struct intel_uncore_box *box, *exist;
3573         int i, j, k, phys_id;
3574
3575         phys_id = topology_physical_package_id(cpu);
3576
3577         for (i = 0; msr_uncores[i]; i++) {
3578                 type = msr_uncores[i];
3579                 for (j = 0; j < type->num_boxes; j++) {
3580                         pmu = &type->pmus[j];
3581                         box = *per_cpu_ptr(pmu->box, cpu);
3582                         /* called by uncore_cpu_init? */
3583                         if (box && box->phys_id >= 0) {
3584                                 uncore_box_init(box);
3585                                 continue;
3586                         }
3587
3588                         for_each_online_cpu(k) {
3589                                 exist = *per_cpu_ptr(pmu->box, k);
3590                                 if (exist && exist->phys_id == phys_id) {
3591                                         atomic_inc(&exist->refcnt);
3592                                         *per_cpu_ptr(pmu->box, cpu) = exist;
3593                                         if (box) {
3594                                                 list_add(&box->list,
3595                                                          &boxes_to_free);
3596                                                 box = NULL;
3597                                         }
3598                                         break;
3599                                 }
3600                         }
3601
3602                         if (box) {
3603                                 box->phys_id = phys_id;
3604                                 uncore_box_init(box);
3605                         }
3606                 }
3607         }
3608         return 0;
3609 }
3610
3611 static int uncore_cpu_prepare(int cpu, int phys_id)
3612 {
3613         struct intel_uncore_type *type;
3614         struct intel_uncore_pmu *pmu;
3615         struct intel_uncore_box *box;
3616         int i, j;
3617
3618         for (i = 0; msr_uncores[i]; i++) {
3619                 type = msr_uncores[i];
3620                 for (j = 0; j < type->num_boxes; j++) {
3621                         pmu = &type->pmus[j];
3622                         if (pmu->func_id < 0)
3623                                 pmu->func_id = j;
3624
3625                         box = uncore_alloc_box(type, cpu_to_node(cpu));
3626                         if (!box)
3627                                 return -ENOMEM;
3628
3629                         box->pmu = pmu;
3630                         box->phys_id = phys_id;
3631                         *per_cpu_ptr(pmu->box, cpu) = box;
3632                 }
3633         }
3634         return 0;
3635 }
3636
3637 static void
3638 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
3639 {
3640         struct intel_uncore_type *type;
3641         struct intel_uncore_pmu *pmu;
3642         struct intel_uncore_box *box;
3643         int i, j;
3644
3645         for (i = 0; uncores[i]; i++) {
3646                 type = uncores[i];
3647                 for (j = 0; j < type->num_boxes; j++) {
3648                         pmu = &type->pmus[j];
3649                         if (old_cpu < 0)
3650                                 box = uncore_pmu_to_box(pmu, new_cpu);
3651                         else
3652                                 box = uncore_pmu_to_box(pmu, old_cpu);
3653                         if (!box)
3654                                 continue;
3655
3656                         if (old_cpu < 0) {
3657                                 WARN_ON_ONCE(box->cpu != -1);
3658                                 box->cpu = new_cpu;
3659                                 continue;
3660                         }
3661
3662                         WARN_ON_ONCE(box->cpu != old_cpu);
3663                         if (new_cpu >= 0) {
3664                                 uncore_pmu_cancel_hrtimer(box);
3665                                 perf_pmu_migrate_context(&pmu->pmu,
3666                                                 old_cpu, new_cpu);
3667                                 box->cpu = new_cpu;
3668                         } else {
3669                                 box->cpu = -1;
3670                         }
3671                 }
3672         }
3673 }
3674
3675 static void uncore_event_exit_cpu(int cpu)
3676 {
3677         int i, phys_id, target;
3678
3679         /* if exiting cpu is used for collecting uncore events */
3680         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
3681                 return;
3682
3683         /* find a new cpu to collect uncore events */
3684         phys_id = topology_physical_package_id(cpu);
3685         target = -1;
3686         for_each_online_cpu(i) {
3687                 if (i == cpu)
3688                         continue;
3689                 if (phys_id == topology_physical_package_id(i)) {
3690                         target = i;
3691                         break;
3692                 }
3693         }
3694
3695         /* migrate uncore events to the new cpu */
3696         if (target >= 0)
3697                 cpumask_set_cpu(target, &uncore_cpu_mask);
3698
3699         uncore_change_context(msr_uncores, cpu, target);
3700         uncore_change_context(pci_uncores, cpu, target);
3701 }
3702
3703 static void uncore_event_init_cpu(int cpu)
3704 {
3705         int i, phys_id;
3706
3707         phys_id = topology_physical_package_id(cpu);
3708         for_each_cpu(i, &uncore_cpu_mask) {
3709                 if (phys_id == topology_physical_package_id(i))
3710                         return;
3711         }
3712
3713         cpumask_set_cpu(cpu, &uncore_cpu_mask);
3714
3715         uncore_change_context(msr_uncores, -1, cpu);
3716         uncore_change_context(pci_uncores, -1, cpu);
3717 }
3718
3719 static int uncore_cpu_notifier(struct notifier_block *self,
3720                                unsigned long action, void *hcpu)
3721 {
3722         unsigned int cpu = (long)hcpu;
3723
3724         /* allocate/free data structure for uncore box */
3725         switch (action & ~CPU_TASKS_FROZEN) {
3726         case CPU_UP_PREPARE:
3727                 uncore_cpu_prepare(cpu, -1);
3728                 break;
3729         case CPU_STARTING:
3730                 uncore_cpu_starting(cpu);
3731                 break;
3732         case CPU_UP_CANCELED:
3733         case CPU_DYING:
3734                 uncore_cpu_dying(cpu);
3735                 break;
3736         case CPU_ONLINE:
3737         case CPU_DEAD:
3738                 uncore_kfree_boxes();
3739                 break;
3740         default:
3741                 break;
3742         }
3743
3744         /* select the cpu that collects uncore events */
3745         switch (action & ~CPU_TASKS_FROZEN) {
3746         case CPU_DOWN_FAILED:
3747         case CPU_STARTING:
3748                 uncore_event_init_cpu(cpu);
3749                 break;
3750         case CPU_DOWN_PREPARE:
3751                 uncore_event_exit_cpu(cpu);
3752                 break;
3753         default:
3754                 break;
3755         }
3756
3757         return NOTIFY_OK;
3758 }
3759
3760 static struct notifier_block uncore_cpu_nb = {
3761         .notifier_call  = uncore_cpu_notifier,
3762         /*
3763          * to migrate uncore events, our notifier should be executed
3764          * before perf core's notifier.
3765          */
3766         .priority       = CPU_PRI_PERF + 1,
3767 };
3768
3769 static void __init uncore_cpu_setup(void *dummy)
3770 {
3771         uncore_cpu_starting(smp_processor_id());
3772 }
3773
3774 static int __init uncore_cpu_init(void)
3775 {
3776         int ret, cpu, max_cores;
3777
3778         max_cores = boot_cpu_data.x86_max_cores;
3779         switch (boot_cpu_data.x86_model) {
3780         case 26: /* Nehalem */
3781         case 30:
3782         case 37: /* Westmere */
3783         case 44:
3784                 msr_uncores = nhm_msr_uncores;
3785                 break;
3786         case 42: /* Sandy Bridge */
3787         case 58: /* Ivy Bridge */
3788                 if (snb_uncore_cbox.num_boxes > max_cores)
3789                         snb_uncore_cbox.num_boxes = max_cores;
3790                 msr_uncores = snb_msr_uncores;
3791                 break;
3792         case 45: /* Sandy Bridge-EP */
3793                 if (snbep_uncore_cbox.num_boxes > max_cores)
3794                         snbep_uncore_cbox.num_boxes = max_cores;
3795                 msr_uncores = snbep_msr_uncores;
3796                 break;
3797         case 46: /* Nehalem-EX */
3798                 uncore_nhmex = true;
3799         case 47: /* Westmere-EX aka. Xeon E7 */
3800                 if (!uncore_nhmex)
3801                         nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
3802                 if (nhmex_uncore_cbox.num_boxes > max_cores)
3803                         nhmex_uncore_cbox.num_boxes = max_cores;
3804                 msr_uncores = nhmex_msr_uncores;
3805                 break;
3806         case 62: /* IvyTown */
3807                 if (ivt_uncore_cbox.num_boxes > max_cores)
3808                         ivt_uncore_cbox.num_boxes = max_cores;
3809                 msr_uncores = ivt_msr_uncores;
3810                 break;
3811
3812         default:
3813                 return 0;
3814         }
3815
3816         ret = uncore_types_init(msr_uncores);
3817         if (ret)
3818                 return ret;
3819
3820         get_online_cpus();
3821
3822         for_each_online_cpu(cpu) {
3823                 int i, phys_id = topology_physical_package_id(cpu);
3824
3825                 for_each_cpu(i, &uncore_cpu_mask) {
3826                         if (phys_id == topology_physical_package_id(i)) {
3827                                 phys_id = -1;
3828                                 break;
3829                         }
3830                 }
3831                 if (phys_id < 0)
3832                         continue;
3833
3834                 uncore_cpu_prepare(cpu, phys_id);
3835                 uncore_event_init_cpu(cpu);
3836         }
3837         on_each_cpu(uncore_cpu_setup, NULL, 1);
3838
3839         register_cpu_notifier(&uncore_cpu_nb);
3840
3841         put_online_cpus();
3842
3843         return 0;
3844 }
3845
3846 static int __init uncore_pmus_register(void)
3847 {
3848         struct intel_uncore_pmu *pmu;
3849         struct intel_uncore_type *type;
3850         int i, j;
3851
3852         for (i = 0; msr_uncores[i]; i++) {
3853                 type = msr_uncores[i];
3854                 for (j = 0; j < type->num_boxes; j++) {
3855                         pmu = &type->pmus[j];
3856                         uncore_pmu_register(pmu);
3857                 }
3858         }
3859
3860         for (i = 0; pci_uncores[i]; i++) {
3861                 type = pci_uncores[i];
3862                 for (j = 0; j < type->num_boxes; j++) {
3863                         pmu = &type->pmus[j];
3864                         uncore_pmu_register(pmu);
3865                 }
3866         }
3867
3868         return 0;
3869 }
3870
3871 static int __init intel_uncore_init(void)
3872 {
3873         int ret;
3874
3875         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3876                 return -ENODEV;
3877
3878         if (cpu_has_hypervisor)
3879                 return -ENODEV;
3880
3881         ret = uncore_pci_init();
3882         if (ret)
3883                 goto fail;
3884         ret = uncore_cpu_init();
3885         if (ret) {
3886                 uncore_pci_exit();
3887                 goto fail;
3888         }
3889
3890         uncore_pmus_register();
3891         return 0;
3892 fail:
3893         return ret;
3894 }
3895 device_initcall(intel_uncore_init);