Merge branch 'testing/driver-warnings' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
1 #include "perf_event_intel_uncore.h"
2
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
10
11 /* mask of cpus that collect uncore events */
12 static cpumask_t uncore_cpu_mask;
13
14 /* constraint for the fixed counter */
15 static struct event_constraint constraint_fixed =
16         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
17 static struct event_constraint constraint_empty =
18         EVENT_CONSTRAINT(0, 0, 0);
19
20 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
21 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
22 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
23 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
24 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
25 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
26 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
27 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
28 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
29 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
30 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
31 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
32 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
33 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
34 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
35 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
36 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
37 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
41
42 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
43 {
44         u64 count;
45
46         rdmsrl(event->hw.event_base, count);
47
48         return count;
49 }
50
51 /*
52  * generic get constraint function for shared match/mask registers.
53  */
54 static struct event_constraint *
55 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
56 {
57         struct intel_uncore_extra_reg *er;
58         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
59         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
60         unsigned long flags;
61         bool ok = false;
62
63         /*
64          * reg->alloc can be set due to existing state, so for fake box we
65          * need to ignore this, otherwise we might fail to allocate proper
66          * fake state for this extra reg constraint.
67          */
68         if (reg1->idx == EXTRA_REG_NONE ||
69             (!uncore_box_is_fake(box) && reg1->alloc))
70                 return NULL;
71
72         er = &box->shared_regs[reg1->idx];
73         raw_spin_lock_irqsave(&er->lock, flags);
74         if (!atomic_read(&er->ref) ||
75             (er->config1 == reg1->config && er->config2 == reg2->config)) {
76                 atomic_inc(&er->ref);
77                 er->config1 = reg1->config;
78                 er->config2 = reg2->config;
79                 ok = true;
80         }
81         raw_spin_unlock_irqrestore(&er->lock, flags);
82
83         if (ok) {
84                 if (!uncore_box_is_fake(box))
85                         reg1->alloc = 1;
86                 return NULL;
87         }
88
89         return &constraint_empty;
90 }
91
92 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
93 {
94         struct intel_uncore_extra_reg *er;
95         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
96
97         /*
98          * Only put constraint if extra reg was actually allocated. Also
99          * takes care of event which do not use an extra shared reg.
100          *
101          * Also, if this is a fake box we shouldn't touch any event state
102          * (reg->alloc) and we don't care about leaving inconsistent box
103          * state either since it will be thrown out.
104          */
105         if (uncore_box_is_fake(box) || !reg1->alloc)
106                 return;
107
108         er = &box->shared_regs[reg1->idx];
109         atomic_dec(&er->ref);
110         reg1->alloc = 0;
111 }
112
113 /* Sandy Bridge-EP uncore support */
114 static struct intel_uncore_type snbep_uncore_cbox;
115 static struct intel_uncore_type snbep_uncore_pcu;
116
117 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
118 {
119         struct pci_dev *pdev = box->pci_dev;
120         int box_ctl = uncore_pci_box_ctl(box);
121         u32 config;
122
123         pci_read_config_dword(pdev, box_ctl, &config);
124         config |= SNBEP_PMON_BOX_CTL_FRZ;
125         pci_write_config_dword(pdev, box_ctl, config);
126 }
127
128 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
129 {
130         struct pci_dev *pdev = box->pci_dev;
131         int box_ctl = uncore_pci_box_ctl(box);
132         u32 config;
133
134         pci_read_config_dword(pdev, box_ctl, &config);
135         config &= ~SNBEP_PMON_BOX_CTL_FRZ;
136         pci_write_config_dword(pdev, box_ctl, config);
137 }
138
139 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
140 {
141         struct pci_dev *pdev = box->pci_dev;
142         struct hw_perf_event *hwc = &event->hw;
143
144         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
145 }
146
147 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
148 {
149         struct pci_dev *pdev = box->pci_dev;
150         struct hw_perf_event *hwc = &event->hw;
151
152         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
153 }
154
155 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
156 {
157         struct pci_dev *pdev = box->pci_dev;
158         struct hw_perf_event *hwc = &event->hw;
159         u64 count;
160
161         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
162         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
163
164         return count;
165 }
166
167 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
168 {
169         struct pci_dev *pdev = box->pci_dev;
170
171         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
172 }
173
174 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
175 {
176         u64 config;
177         unsigned msr;
178
179         msr = uncore_msr_box_ctl(box);
180         if (msr) {
181                 rdmsrl(msr, config);
182                 config |= SNBEP_PMON_BOX_CTL_FRZ;
183                 wrmsrl(msr, config);
184         }
185 }
186
187 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
188 {
189         u64 config;
190         unsigned msr;
191
192         msr = uncore_msr_box_ctl(box);
193         if (msr) {
194                 rdmsrl(msr, config);
195                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
196                 wrmsrl(msr, config);
197         }
198 }
199
200 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
201 {
202         struct hw_perf_event *hwc = &event->hw;
203         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
204
205         if (reg1->idx != EXTRA_REG_NONE)
206                 wrmsrl(reg1->reg, reg1->config);
207
208         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
209 }
210
211 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
212                                         struct perf_event *event)
213 {
214         struct hw_perf_event *hwc = &event->hw;
215
216         wrmsrl(hwc->config_base, hwc->config);
217 }
218
219 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
220 {
221         unsigned msr = uncore_msr_box_ctl(box);
222
223         if (msr)
224                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
225 }
226
227 static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
228 {
229         struct hw_perf_event *hwc = &event->hw;
230         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
231
232         if (box->pmu->type == &snbep_uncore_cbox) {
233                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
234                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
235                 reg1->config = event->attr.config1 &
236                         SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
237         } else {
238                 if (box->pmu->type == &snbep_uncore_pcu) {
239                         reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
240                         reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
241                 } else {
242                         return 0;
243                 }
244         }
245         reg1->idx = 0;
246
247         return 0;
248 }
249
250 static struct attribute *snbep_uncore_formats_attr[] = {
251         &format_attr_event.attr,
252         &format_attr_umask.attr,
253         &format_attr_edge.attr,
254         &format_attr_inv.attr,
255         &format_attr_thresh8.attr,
256         NULL,
257 };
258
259 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
260         &format_attr_event.attr,
261         &format_attr_umask.attr,
262         &format_attr_edge.attr,
263         &format_attr_inv.attr,
264         &format_attr_thresh5.attr,
265         NULL,
266 };
267
268 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
269         &format_attr_event.attr,
270         &format_attr_umask.attr,
271         &format_attr_edge.attr,
272         &format_attr_tid_en.attr,
273         &format_attr_inv.attr,
274         &format_attr_thresh8.attr,
275         &format_attr_filter_tid.attr,
276         &format_attr_filter_nid.attr,
277         &format_attr_filter_state.attr,
278         &format_attr_filter_opc.attr,
279         NULL,
280 };
281
282 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
283         &format_attr_event.attr,
284         &format_attr_occ_sel.attr,
285         &format_attr_edge.attr,
286         &format_attr_inv.attr,
287         &format_attr_thresh5.attr,
288         &format_attr_occ_invert.attr,
289         &format_attr_occ_edge.attr,
290         &format_attr_filter_band0.attr,
291         &format_attr_filter_band1.attr,
292         &format_attr_filter_band2.attr,
293         &format_attr_filter_band3.attr,
294         NULL,
295 };
296
297 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
298         &format_attr_event_ext.attr,
299         &format_attr_umask.attr,
300         &format_attr_edge.attr,
301         &format_attr_inv.attr,
302         &format_attr_thresh8.attr,
303         NULL,
304 };
305
306 static struct uncore_event_desc snbep_uncore_imc_events[] = {
307         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
308         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
309         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
310         { /* end: all zeroes */ },
311 };
312
313 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
314         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
315         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
316         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x02,umask=0x08"),
317         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x03,umask=0x04"),
318         { /* end: all zeroes */ },
319 };
320
321 static struct attribute_group snbep_uncore_format_group = {
322         .name = "format",
323         .attrs = snbep_uncore_formats_attr,
324 };
325
326 static struct attribute_group snbep_uncore_ubox_format_group = {
327         .name = "format",
328         .attrs = snbep_uncore_ubox_formats_attr,
329 };
330
331 static struct attribute_group snbep_uncore_cbox_format_group = {
332         .name = "format",
333         .attrs = snbep_uncore_cbox_formats_attr,
334 };
335
336 static struct attribute_group snbep_uncore_pcu_format_group = {
337         .name = "format",
338         .attrs = snbep_uncore_pcu_formats_attr,
339 };
340
341 static struct attribute_group snbep_uncore_qpi_format_group = {
342         .name = "format",
343         .attrs = snbep_uncore_qpi_formats_attr,
344 };
345
346 static struct intel_uncore_ops snbep_uncore_msr_ops = {
347         .init_box       = snbep_uncore_msr_init_box,
348         .disable_box    = snbep_uncore_msr_disable_box,
349         .enable_box     = snbep_uncore_msr_enable_box,
350         .disable_event  = snbep_uncore_msr_disable_event,
351         .enable_event   = snbep_uncore_msr_enable_event,
352         .read_counter   = uncore_msr_read_counter,
353         .get_constraint = uncore_get_constraint,
354         .put_constraint = uncore_put_constraint,
355         .hw_config      = snbep_uncore_hw_config,
356 };
357
358 static struct intel_uncore_ops snbep_uncore_pci_ops = {
359         .init_box       = snbep_uncore_pci_init_box,
360         .disable_box    = snbep_uncore_pci_disable_box,
361         .enable_box     = snbep_uncore_pci_enable_box,
362         .disable_event  = snbep_uncore_pci_disable_event,
363         .enable_event   = snbep_uncore_pci_enable_event,
364         .read_counter   = snbep_uncore_pci_read_counter,
365 };
366
367 static struct event_constraint snbep_uncore_cbox_constraints[] = {
368         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
369         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
370         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
371         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
372         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
373         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
374         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
375         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
376         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
377         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
378         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
379         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
380         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
381         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
382         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
383         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
384         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
385         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
386         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
387         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
388         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
389         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
390         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
391         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
392         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
393         EVENT_CONSTRAINT_END
394 };
395
396 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
397         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
398         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
399         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
400         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
401         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
402         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
403         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
404         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
405         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
406         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
407         EVENT_CONSTRAINT_END
408 };
409
410 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
411         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
412         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
413         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
414         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
415         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
416         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
417         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
418         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
419         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
420         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
421         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
422         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
423         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
424         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
425         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
426         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
427         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
428         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
429         EVENT_CONSTRAINT_END
430 };
431
432 static struct intel_uncore_type snbep_uncore_ubox = {
433         .name           = "ubox",
434         .num_counters   = 2,
435         .num_boxes      = 1,
436         .perf_ctr_bits  = 44,
437         .fixed_ctr_bits = 48,
438         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
439         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
440         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
441         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
442         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
443         .ops            = &snbep_uncore_msr_ops,
444         .format_group   = &snbep_uncore_ubox_format_group,
445 };
446
447 static struct intel_uncore_type snbep_uncore_cbox = {
448         .name                   = "cbox",
449         .num_counters           = 4,
450         .num_boxes              = 8,
451         .perf_ctr_bits          = 44,
452         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
453         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
454         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
455         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
456         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
457         .num_shared_regs        = 1,
458         .constraints            = snbep_uncore_cbox_constraints,
459         .ops                    = &snbep_uncore_msr_ops,
460         .format_group           = &snbep_uncore_cbox_format_group,
461 };
462
463 static struct intel_uncore_type snbep_uncore_pcu = {
464         .name                   = "pcu",
465         .num_counters           = 4,
466         .num_boxes              = 1,
467         .perf_ctr_bits          = 48,
468         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
469         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
470         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
471         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
472         .num_shared_regs        = 1,
473         .ops                    = &snbep_uncore_msr_ops,
474         .format_group           = &snbep_uncore_pcu_format_group,
475 };
476
477 static struct intel_uncore_type *snbep_msr_uncores[] = {
478         &snbep_uncore_ubox,
479         &snbep_uncore_cbox,
480         &snbep_uncore_pcu,
481         NULL,
482 };
483
484 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
485         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
486         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
487         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
488         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
489         .ops            = &snbep_uncore_pci_ops,                \
490         .format_group   = &snbep_uncore_format_group
491
492 static struct intel_uncore_type snbep_uncore_ha = {
493         .name           = "ha",
494         .num_counters   = 4,
495         .num_boxes      = 1,
496         .perf_ctr_bits  = 48,
497         SNBEP_UNCORE_PCI_COMMON_INIT(),
498 };
499
500 static struct intel_uncore_type snbep_uncore_imc = {
501         .name           = "imc",
502         .num_counters   = 4,
503         .num_boxes      = 4,
504         .perf_ctr_bits  = 48,
505         .fixed_ctr_bits = 48,
506         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
507         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
508         .event_descs    = snbep_uncore_imc_events,
509         SNBEP_UNCORE_PCI_COMMON_INIT(),
510 };
511
512 static struct intel_uncore_type snbep_uncore_qpi = {
513         .name           = "qpi",
514         .num_counters   = 4,
515         .num_boxes      = 2,
516         .perf_ctr_bits  = 48,
517         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
518         .event_ctl      = SNBEP_PCI_PMON_CTL0,
519         .event_mask     = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
520         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
521         .ops            = &snbep_uncore_pci_ops,
522         .event_descs    = snbep_uncore_qpi_events,
523         .format_group   = &snbep_uncore_qpi_format_group,
524 };
525
526
527 static struct intel_uncore_type snbep_uncore_r2pcie = {
528         .name           = "r2pcie",
529         .num_counters   = 4,
530         .num_boxes      = 1,
531         .perf_ctr_bits  = 44,
532         .constraints    = snbep_uncore_r2pcie_constraints,
533         SNBEP_UNCORE_PCI_COMMON_INIT(),
534 };
535
536 static struct intel_uncore_type snbep_uncore_r3qpi = {
537         .name           = "r3qpi",
538         .num_counters   = 3,
539         .num_boxes      = 2,
540         .perf_ctr_bits  = 44,
541         .constraints    = snbep_uncore_r3qpi_constraints,
542         SNBEP_UNCORE_PCI_COMMON_INIT(),
543 };
544
545 static struct intel_uncore_type *snbep_pci_uncores[] = {
546         &snbep_uncore_ha,
547         &snbep_uncore_imc,
548         &snbep_uncore_qpi,
549         &snbep_uncore_r2pcie,
550         &snbep_uncore_r3qpi,
551         NULL,
552 };
553
554 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
555         { /* Home Agent */
556                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
557                 .driver_data = (unsigned long)&snbep_uncore_ha,
558         },
559         { /* MC Channel 0 */
560                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
561                 .driver_data = (unsigned long)&snbep_uncore_imc,
562         },
563         { /* MC Channel 1 */
564                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
565                 .driver_data = (unsigned long)&snbep_uncore_imc,
566         },
567         { /* MC Channel 2 */
568                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
569                 .driver_data = (unsigned long)&snbep_uncore_imc,
570         },
571         { /* MC Channel 3 */
572                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
573                 .driver_data = (unsigned long)&snbep_uncore_imc,
574         },
575         { /* QPI Port 0 */
576                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
577                 .driver_data = (unsigned long)&snbep_uncore_qpi,
578         },
579         { /* QPI Port 1 */
580                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
581                 .driver_data = (unsigned long)&snbep_uncore_qpi,
582         },
583         { /* P2PCIe */
584                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
585                 .driver_data = (unsigned long)&snbep_uncore_r2pcie,
586         },
587         { /* R3QPI Link 0 */
588                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
589                 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
590         },
591         { /* R3QPI Link 1 */
592                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
593                 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
594         },
595         { /* end: all zeroes */ }
596 };
597
598 static struct pci_driver snbep_uncore_pci_driver = {
599         .name           = "snbep_uncore",
600         .id_table       = snbep_uncore_pci_ids,
601 };
602
603 /*
604  * build pci bus to socket mapping
605  */
606 static void snbep_pci2phy_map_init(void)
607 {
608         struct pci_dev *ubox_dev = NULL;
609         int i, bus, nodeid;
610         u32 config;
611
612         while (1) {
613                 /* find the UBOX device */
614                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
615                                         PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
616                                         ubox_dev);
617                 if (!ubox_dev)
618                         break;
619                 bus = ubox_dev->bus->number;
620                 /* get the Node ID of the local register */
621                 pci_read_config_dword(ubox_dev, 0x40, &config);
622                 nodeid = config;
623                 /* get the Node ID mapping */
624                 pci_read_config_dword(ubox_dev, 0x54, &config);
625                 /*
626                  * every three bits in the Node ID mapping register maps
627                  * to a particular node.
628                  */
629                 for (i = 0; i < 8; i++) {
630                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
631                                 pcibus_to_physid[bus] = i;
632                                 break;
633                         }
634                 }
635         };
636         return;
637 }
638 /* end of Sandy Bridge-EP uncore support */
639
640 /* Sandy Bridge uncore support */
641 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
642 {
643         struct hw_perf_event *hwc = &event->hw;
644
645         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
646                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
647         else
648                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
649 }
650
651 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
652 {
653         wrmsrl(event->hw.config_base, 0);
654 }
655
656 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
657 {
658         if (box->pmu->pmu_idx == 0) {
659                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
660                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
661         }
662 }
663
664 static struct uncore_event_desc snb_uncore_events[] = {
665         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
666         { /* end: all zeroes */ },
667 };
668
669 static struct attribute *snb_uncore_formats_attr[] = {
670         &format_attr_event.attr,
671         &format_attr_umask.attr,
672         &format_attr_edge.attr,
673         &format_attr_inv.attr,
674         &format_attr_cmask5.attr,
675         NULL,
676 };
677
678 static struct attribute_group snb_uncore_format_group = {
679         .name           = "format",
680         .attrs          = snb_uncore_formats_attr,
681 };
682
683 static struct intel_uncore_ops snb_uncore_msr_ops = {
684         .init_box       = snb_uncore_msr_init_box,
685         .disable_event  = snb_uncore_msr_disable_event,
686         .enable_event   = snb_uncore_msr_enable_event,
687         .read_counter   = uncore_msr_read_counter,
688 };
689
690 static struct event_constraint snb_uncore_cbox_constraints[] = {
691         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
692         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
693         EVENT_CONSTRAINT_END
694 };
695
696 static struct intel_uncore_type snb_uncore_cbox = {
697         .name           = "cbox",
698         .num_counters   = 2,
699         .num_boxes      = 4,
700         .perf_ctr_bits  = 44,
701         .fixed_ctr_bits = 48,
702         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
703         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
704         .fixed_ctr      = SNB_UNC_FIXED_CTR,
705         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
706         .single_fixed   = 1,
707         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
708         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
709         .constraints    = snb_uncore_cbox_constraints,
710         .ops            = &snb_uncore_msr_ops,
711         .format_group   = &snb_uncore_format_group,
712         .event_descs    = snb_uncore_events,
713 };
714
715 static struct intel_uncore_type *snb_msr_uncores[] = {
716         &snb_uncore_cbox,
717         NULL,
718 };
719 /* end of Sandy Bridge uncore support */
720
721 /* Nehalem uncore support */
722 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
723 {
724         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
725 }
726
727 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
728 {
729         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
730 }
731
732 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
733 {
734         struct hw_perf_event *hwc = &event->hw;
735
736         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
737                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
738         else
739                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
740 }
741
742 static struct attribute *nhm_uncore_formats_attr[] = {
743         &format_attr_event.attr,
744         &format_attr_umask.attr,
745         &format_attr_edge.attr,
746         &format_attr_inv.attr,
747         &format_attr_cmask8.attr,
748         NULL,
749 };
750
751 static struct attribute_group nhm_uncore_format_group = {
752         .name = "format",
753         .attrs = nhm_uncore_formats_attr,
754 };
755
756 static struct uncore_event_desc nhm_uncore_events[] = {
757         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
758         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
759         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
760         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
761         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
762         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
763         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
764         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
765         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
766         { /* end: all zeroes */ },
767 };
768
769 static struct intel_uncore_ops nhm_uncore_msr_ops = {
770         .disable_box    = nhm_uncore_msr_disable_box,
771         .enable_box     = nhm_uncore_msr_enable_box,
772         .disable_event  = snb_uncore_msr_disable_event,
773         .enable_event   = nhm_uncore_msr_enable_event,
774         .read_counter   = uncore_msr_read_counter,
775 };
776
777 static struct intel_uncore_type nhm_uncore = {
778         .name           = "",
779         .num_counters   = 8,
780         .num_boxes      = 1,
781         .perf_ctr_bits  = 48,
782         .fixed_ctr_bits = 48,
783         .event_ctl      = NHM_UNC_PERFEVTSEL0,
784         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
785         .fixed_ctr      = NHM_UNC_FIXED_CTR,
786         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
787         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
788         .event_descs    = nhm_uncore_events,
789         .ops            = &nhm_uncore_msr_ops,
790         .format_group   = &nhm_uncore_format_group,
791 };
792
793 static struct intel_uncore_type *nhm_msr_uncores[] = {
794         &nhm_uncore,
795         NULL,
796 };
797 /* end of Nehalem uncore support */
798
799 /* Nehalem-EX uncore support */
800 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
801                                 ((1ULL << (n)) - 1)))
802
803 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
804 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
805 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
806 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
807
808 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
809 {
810         wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
811 }
812
813 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
814 {
815         unsigned msr = uncore_msr_box_ctl(box);
816         u64 config;
817
818         if (msr) {
819                 rdmsrl(msr, config);
820                 config &= ~((1ULL << uncore_num_counters(box)) - 1);
821                 /* WBox has a fixed counter */
822                 if (uncore_msr_fixed_ctl(box))
823                         config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
824                 wrmsrl(msr, config);
825         }
826 }
827
828 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
829 {
830         unsigned msr = uncore_msr_box_ctl(box);
831         u64 config;
832
833         if (msr) {
834                 rdmsrl(msr, config);
835                 config |= (1ULL << uncore_num_counters(box)) - 1;
836                 /* WBox has a fixed counter */
837                 if (uncore_msr_fixed_ctl(box))
838                         config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
839                 wrmsrl(msr, config);
840         }
841 }
842
843 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
844 {
845         wrmsrl(event->hw.config_base, 0);
846 }
847
848 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
849 {
850         struct hw_perf_event *hwc = &event->hw;
851
852         if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
853                 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
854         else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
855                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
856         else
857                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
858 }
859
860 #define NHMEX_UNCORE_OPS_COMMON_INIT()                          \
861         .init_box       = nhmex_uncore_msr_init_box,            \
862         .disable_box    = nhmex_uncore_msr_disable_box,         \
863         .enable_box     = nhmex_uncore_msr_enable_box,          \
864         .disable_event  = nhmex_uncore_msr_disable_event,       \
865         .read_counter   = uncore_msr_read_counter
866
867 static struct intel_uncore_ops nhmex_uncore_ops = {
868         NHMEX_UNCORE_OPS_COMMON_INIT(),
869         .enable_event   = nhmex_uncore_msr_enable_event,
870 };
871
872 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
873         &format_attr_event.attr,
874         &format_attr_edge.attr,
875         NULL,
876 };
877
878 static struct attribute_group nhmex_uncore_ubox_format_group = {
879         .name           = "format",
880         .attrs          = nhmex_uncore_ubox_formats_attr,
881 };
882
883 static struct intel_uncore_type nhmex_uncore_ubox = {
884         .name           = "ubox",
885         .num_counters   = 1,
886         .num_boxes      = 1,
887         .perf_ctr_bits  = 48,
888         .event_ctl      = NHMEX_U_MSR_PMON_EV_SEL,
889         .perf_ctr       = NHMEX_U_MSR_PMON_CTR,
890         .event_mask     = NHMEX_U_PMON_RAW_EVENT_MASK,
891         .box_ctl        = NHMEX_U_MSR_PMON_GLOBAL_CTL,
892         .ops            = &nhmex_uncore_ops,
893         .format_group   = &nhmex_uncore_ubox_format_group
894 };
895
896 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
897         &format_attr_event.attr,
898         &format_attr_umask.attr,
899         &format_attr_edge.attr,
900         &format_attr_inv.attr,
901         &format_attr_thresh8.attr,
902         NULL,
903 };
904
905 static struct attribute_group nhmex_uncore_cbox_format_group = {
906         .name = "format",
907         .attrs = nhmex_uncore_cbox_formats_attr,
908 };
909
910 /* msr offset for each instance of cbox */
911 static unsigned nhmex_cbox_msr_offsets[] = {
912         0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
913 };
914
915 static struct intel_uncore_type nhmex_uncore_cbox = {
916         .name                   = "cbox",
917         .num_counters           = 6,
918         .num_boxes              = 10,
919         .perf_ctr_bits          = 48,
920         .event_ctl              = NHMEX_C0_MSR_PMON_EV_SEL0,
921         .perf_ctr               = NHMEX_C0_MSR_PMON_CTR0,
922         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
923         .box_ctl                = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
924         .msr_offsets            = nhmex_cbox_msr_offsets,
925         .pair_ctr_ctl           = 1,
926         .ops                    = &nhmex_uncore_ops,
927         .format_group           = &nhmex_uncore_cbox_format_group
928 };
929
930 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
931         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
932         { /* end: all zeroes */ },
933 };
934
935 static struct intel_uncore_type nhmex_uncore_wbox = {
936         .name                   = "wbox",
937         .num_counters           = 4,
938         .num_boxes              = 1,
939         .perf_ctr_bits          = 48,
940         .event_ctl              = NHMEX_W_MSR_PMON_CNT0,
941         .perf_ctr               = NHMEX_W_MSR_PMON_EVT_SEL0,
942         .fixed_ctr              = NHMEX_W_MSR_PMON_FIXED_CTR,
943         .fixed_ctl              = NHMEX_W_MSR_PMON_FIXED_CTL,
944         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
945         .box_ctl                = NHMEX_W_MSR_GLOBAL_CTL,
946         .pair_ctr_ctl           = 1,
947         .event_descs            = nhmex_uncore_wbox_events,
948         .ops                    = &nhmex_uncore_ops,
949         .format_group           = &nhmex_uncore_cbox_format_group
950 };
951
952 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
953 {
954         struct hw_perf_event *hwc = &event->hw;
955         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
956         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
957         int ctr, ev_sel;
958
959         ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
960                 NHMEX_B_PMON_CTR_SHIFT;
961         ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
962                   NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
963
964         /* events that do not use the match/mask registers */
965         if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
966             (ctr == 2 && ev_sel != 0x4) || ctr == 3)
967                 return 0;
968
969         if (box->pmu->pmu_idx == 0)
970                 reg1->reg = NHMEX_B0_MSR_MATCH;
971         else
972                 reg1->reg = NHMEX_B1_MSR_MATCH;
973         reg1->idx = 0;
974         reg1->config = event->attr.config1;
975         reg2->config = event->attr.config2;
976         return 0;
977 }
978
979 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
980 {
981         struct hw_perf_event *hwc = &event->hw;
982         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
983         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
984
985         if (reg1->idx != EXTRA_REG_NONE) {
986                 wrmsrl(reg1->reg, reg1->config);
987                 wrmsrl(reg1->reg + 1, reg2->config);
988         }
989         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
990                 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
991 }
992
993 /*
994  * The Bbox has 4 counters, but each counter monitors different events.
995  * Use bits 6-7 in the event config to select counter.
996  */
997 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
998         EVENT_CONSTRAINT(0 , 1, 0xc0),
999         EVENT_CONSTRAINT(0x40, 2, 0xc0),
1000         EVENT_CONSTRAINT(0x80, 4, 0xc0),
1001         EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1002         EVENT_CONSTRAINT_END,
1003 };
1004
1005 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1006         &format_attr_event5.attr,
1007         &format_attr_counter.attr,
1008         &format_attr_match.attr,
1009         &format_attr_mask.attr,
1010         NULL,
1011 };
1012
1013 static struct attribute_group nhmex_uncore_bbox_format_group = {
1014         .name = "format",
1015         .attrs = nhmex_uncore_bbox_formats_attr,
1016 };
1017
1018 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1019         NHMEX_UNCORE_OPS_COMMON_INIT(),
1020         .enable_event           = nhmex_bbox_msr_enable_event,
1021         .hw_config              = nhmex_bbox_hw_config,
1022         .get_constraint         = uncore_get_constraint,
1023         .put_constraint         = uncore_put_constraint,
1024 };
1025
1026 static struct intel_uncore_type nhmex_uncore_bbox = {
1027         .name                   = "bbox",
1028         .num_counters           = 4,
1029         .num_boxes              = 2,
1030         .perf_ctr_bits          = 48,
1031         .event_ctl              = NHMEX_B0_MSR_PMON_CTL0,
1032         .perf_ctr               = NHMEX_B0_MSR_PMON_CTR0,
1033         .event_mask             = NHMEX_B_PMON_RAW_EVENT_MASK,
1034         .box_ctl                = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1035         .msr_offset             = NHMEX_B_MSR_OFFSET,
1036         .pair_ctr_ctl           = 1,
1037         .num_shared_regs        = 1,
1038         .constraints            = nhmex_uncore_bbox_constraints,
1039         .ops                    = &nhmex_uncore_bbox_ops,
1040         .format_group           = &nhmex_uncore_bbox_format_group
1041 };
1042
1043 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1044 {
1045         struct hw_perf_event *hwc = &event->hw;
1046         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1047         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1048
1049         /* only TO_R_PROG_EV event uses the match/mask register */
1050         if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1051             NHMEX_S_EVENT_TO_R_PROG_EV)
1052                 return 0;
1053
1054         if (box->pmu->pmu_idx == 0)
1055                 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1056         else
1057                 reg1->reg = NHMEX_S1_MSR_MM_CFG;
1058         reg1->idx = 0;
1059         reg1->config = event->attr.config1;
1060         reg2->config = event->attr.config2;
1061         return 0;
1062 }
1063
1064 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1065 {
1066         struct hw_perf_event *hwc = &event->hw;
1067         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1068         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1069
1070         if (reg1->idx != EXTRA_REG_NONE) {
1071                 wrmsrl(reg1->reg, 0);
1072                 wrmsrl(reg1->reg + 1, reg1->config);
1073                 wrmsrl(reg1->reg + 2, reg2->config);
1074                 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1075         }
1076         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1077 }
1078
1079 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1080         &format_attr_event.attr,
1081         &format_attr_umask.attr,
1082         &format_attr_edge.attr,
1083         &format_attr_inv.attr,
1084         &format_attr_thresh8.attr,
1085         &format_attr_match.attr,
1086         &format_attr_mask.attr,
1087         NULL,
1088 };
1089
1090 static struct attribute_group nhmex_uncore_sbox_format_group = {
1091         .name                   = "format",
1092         .attrs                  = nhmex_uncore_sbox_formats_attr,
1093 };
1094
1095 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1096         NHMEX_UNCORE_OPS_COMMON_INIT(),
1097         .enable_event           = nhmex_sbox_msr_enable_event,
1098         .hw_config              = nhmex_sbox_hw_config,
1099         .get_constraint         = uncore_get_constraint,
1100         .put_constraint         = uncore_put_constraint,
1101 };
1102
1103 static struct intel_uncore_type nhmex_uncore_sbox = {
1104         .name                   = "sbox",
1105         .num_counters           = 4,
1106         .num_boxes              = 2,
1107         .perf_ctr_bits          = 48,
1108         .event_ctl              = NHMEX_S0_MSR_PMON_CTL0,
1109         .perf_ctr               = NHMEX_S0_MSR_PMON_CTR0,
1110         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1111         .box_ctl                = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1112         .msr_offset             = NHMEX_S_MSR_OFFSET,
1113         .pair_ctr_ctl           = 1,
1114         .num_shared_regs        = 1,
1115         .ops                    = &nhmex_uncore_sbox_ops,
1116         .format_group           = &nhmex_uncore_sbox_format_group
1117 };
1118
1119 enum {
1120         EXTRA_REG_NHMEX_M_FILTER,
1121         EXTRA_REG_NHMEX_M_DSP,
1122         EXTRA_REG_NHMEX_M_ISS,
1123         EXTRA_REG_NHMEX_M_MAP,
1124         EXTRA_REG_NHMEX_M_MSC_THR,
1125         EXTRA_REG_NHMEX_M_PGT,
1126         EXTRA_REG_NHMEX_M_PLD,
1127         EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
1128 };
1129
1130 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1131         MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
1132         MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1133         MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1134         MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1135         /* event 0xa uses two extra registers */
1136         MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1137         MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
1138         MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
1139         /* events 0xd ~ 0x10 use the same extra register */
1140         MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1141         MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1142         MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1143         MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1144         MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1145         MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
1146         MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1147         MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1148         MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
1149         EVENT_EXTRA_END
1150 };
1151
1152 /* Nehalem-EX or Westmere-EX ? */
1153 bool uncore_nhmex;
1154
1155 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1156 {
1157         struct intel_uncore_extra_reg *er;
1158         unsigned long flags;
1159         bool ret = false;
1160         u64 mask;
1161
1162         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1163                 er = &box->shared_regs[idx];
1164                 raw_spin_lock_irqsave(&er->lock, flags);
1165                 if (!atomic_read(&er->ref) || er->config == config) {
1166                         atomic_inc(&er->ref);
1167                         er->config = config;
1168                         ret = true;
1169                 }
1170                 raw_spin_unlock_irqrestore(&er->lock, flags);
1171
1172                 return ret;
1173         }
1174         /*
1175          * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1176          * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1177          * fields which are shared.
1178          */
1179         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1180         if (WARN_ON_ONCE(idx >= 4))
1181                 return false;
1182
1183         /* mask of the shared fields */
1184         if (uncore_nhmex)
1185                 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1186         else
1187                 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
1188         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1189
1190         raw_spin_lock_irqsave(&er->lock, flags);
1191         /* add mask of the non-shared field if it's in use */
1192         if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
1193                 if (uncore_nhmex)
1194                         mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1195                 else
1196                         mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1197         }
1198
1199         if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1200                 atomic_add(1 << (idx * 8), &er->ref);
1201                 if (uncore_nhmex)
1202                         mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
1203                                 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1204                 else
1205                         mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
1206                                 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1207                 er->config &= ~mask;
1208                 er->config |= (config & mask);
1209                 ret = true;
1210         }
1211         raw_spin_unlock_irqrestore(&er->lock, flags);
1212
1213         return ret;
1214 }
1215
1216 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1217 {
1218         struct intel_uncore_extra_reg *er;
1219
1220         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1221                 er = &box->shared_regs[idx];
1222                 atomic_dec(&er->ref);
1223                 return;
1224         }
1225
1226         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1227         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1228         atomic_sub(1 << (idx * 8), &er->ref);
1229 }
1230
1231 u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1232 {
1233         struct hw_perf_event *hwc = &event->hw;
1234         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1235         int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
1236         u64 config = reg1->config;
1237
1238         /* get the non-shared control bits and shift them */
1239         idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1240         if (uncore_nhmex)
1241                 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1242         else
1243                 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1244         if (new_idx > orig_idx) {
1245                 idx = new_idx - orig_idx;
1246                 config <<= 3 * idx;
1247         } else {
1248                 idx = orig_idx - new_idx;
1249                 config >>= 3 * idx;
1250         }
1251
1252         /* add the shared control bits back */
1253         if (uncore_nhmex)
1254                 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1255         else
1256                 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1257         config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1258         if (modify) {
1259                 /* adjust the main event selector */
1260                 if (new_idx > orig_idx)
1261                         hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1262                 else
1263                         hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1264                 reg1->config = config;
1265                 reg1->idx = ~0xff | new_idx;
1266         }
1267         return config;
1268 }
1269
1270 static struct event_constraint *
1271 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1272 {
1273         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1274         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1275         int i, idx[2], alloc = 0;
1276         u64 config1 = reg1->config;
1277
1278         idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
1279         idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
1280 again:
1281         for (i = 0; i < 2; i++) {
1282                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
1283                         idx[i] = 0xff;
1284
1285                 if (idx[i] == 0xff)
1286                         continue;
1287
1288                 if (!nhmex_mbox_get_shared_reg(box, idx[i],
1289                                 __BITS_VALUE(config1, i, 32)))
1290                         goto fail;
1291                 alloc |= (0x1 << i);
1292         }
1293
1294         /* for the match/mask registers */
1295         if (reg2->idx != EXTRA_REG_NONE &&
1296             (uncore_box_is_fake(box) || !reg2->alloc) &&
1297             !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
1298                 goto fail;
1299
1300         /*
1301          * If it's a fake box -- as per validate_{group,event}() we
1302          * shouldn't touch event state and we can avoid doing so
1303          * since both will only call get_event_constraints() once
1304          * on each event, this avoids the need for reg->alloc.
1305          */
1306         if (!uncore_box_is_fake(box)) {
1307                 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
1308                         nhmex_mbox_alter_er(event, idx[0], true);
1309                 reg1->alloc |= alloc;
1310                 if (reg2->idx != EXTRA_REG_NONE)
1311                         reg2->alloc = 1;
1312         }
1313         return NULL;
1314 fail:
1315         if (idx[0] != 0xff && !(alloc & 0x1) &&
1316             idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1317                 /*
1318                  * events 0xd ~ 0x10 are functional identical, but are
1319                  * controlled by different fields in the ZDP_CTL_FVC
1320                  * register. If we failed to take one field, try the
1321                  * rest 3 choices.
1322                  */
1323                 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
1324                 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1325                 idx[0] = (idx[0] + 1) % 4;
1326                 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1327                 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
1328                         config1 = nhmex_mbox_alter_er(event, idx[0], false);
1329                         goto again;
1330                 }
1331         }
1332
1333         if (alloc & 0x1)
1334                 nhmex_mbox_put_shared_reg(box, idx[0]);
1335         if (alloc & 0x2)
1336                 nhmex_mbox_put_shared_reg(box, idx[1]);
1337         return &constraint_empty;
1338 }
1339
1340 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1341 {
1342         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1343         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1344
1345         if (uncore_box_is_fake(box))
1346                 return;
1347
1348         if (reg1->alloc & 0x1)
1349                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
1350         if (reg1->alloc & 0x2)
1351                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
1352         reg1->alloc = 0;
1353
1354         if (reg2->alloc) {
1355                 nhmex_mbox_put_shared_reg(box, reg2->idx);
1356                 reg2->alloc = 0;
1357         }
1358 }
1359
1360 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
1361 {
1362         if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1363                 return er->idx;
1364         return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
1365 }
1366
1367 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1368 {
1369         struct intel_uncore_type *type = box->pmu->type;
1370         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1371         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1372         struct extra_reg *er;
1373         unsigned msr;
1374         int reg_idx = 0;
1375         /*
1376          * The mbox events may require 2 extra MSRs at the most. But only
1377          * the lower 32 bits in these MSRs are significant, so we can use
1378          * config1 to pass two MSRs' config.
1379          */
1380         for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
1381                 if (er->event != (event->hw.config & er->config_mask))
1382                         continue;
1383                 if (event->attr.config1 & ~er->valid_mask)
1384                         return -EINVAL;
1385
1386                 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
1387                 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
1388                         return -EINVAL;
1389
1390                 /* always use the 32~63 bits to pass the PLD config */
1391                 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
1392                         reg_idx = 1;
1393                 else if (WARN_ON_ONCE(reg_idx > 0))
1394                         return -EINVAL;
1395
1396                 reg1->idx &= ~(0xff << (reg_idx * 8));
1397                 reg1->reg &= ~(0xffff << (reg_idx * 16));
1398                 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
1399                 reg1->reg |= msr << (reg_idx * 16);
1400                 reg1->config = event->attr.config1;
1401                 reg_idx++;
1402         }
1403         /*
1404          * The mbox only provides ability to perform address matching
1405          * for the PLD events.
1406          */
1407         if (reg_idx == 2) {
1408                 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
1409                 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
1410                         reg2->config = event->attr.config2;
1411                 else
1412                         reg2->config = ~0ULL;
1413                 if (box->pmu->pmu_idx == 0)
1414                         reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
1415                 else
1416                         reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
1417         }
1418         return 0;
1419 }
1420
1421 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1422 {
1423         struct intel_uncore_extra_reg *er;
1424         unsigned long flags;
1425         u64 config;
1426
1427         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1428                 return box->shared_regs[idx].config;
1429
1430         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1431         raw_spin_lock_irqsave(&er->lock, flags);
1432         config = er->config;
1433         raw_spin_unlock_irqrestore(&er->lock, flags);
1434         return config;
1435 }
1436
1437 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1438 {
1439         struct hw_perf_event *hwc = &event->hw;
1440         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1441         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1442         int idx;
1443
1444         idx = __BITS_VALUE(reg1->idx, 0, 8);
1445         if (idx != 0xff)
1446                 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
1447                         nhmex_mbox_shared_reg_config(box, idx));
1448         idx = __BITS_VALUE(reg1->idx, 1, 8);
1449         if (idx != 0xff)
1450                 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
1451                         nhmex_mbox_shared_reg_config(box, idx));
1452
1453         if (reg2->idx != EXTRA_REG_NONE) {
1454                 wrmsrl(reg2->reg, 0);
1455                 if (reg2->config != ~0ULL) {
1456                         wrmsrl(reg2->reg + 1,
1457                                 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
1458                         wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
1459                                 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
1460                         wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
1461                 }
1462         }
1463
1464         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1465 }
1466
1467 DEFINE_UNCORE_FORMAT_ATTR(count_mode,           count_mode,     "config:2-3");
1468 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,         storage_mode,   "config:4-5");
1469 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,            wrap_mode,      "config:6");
1470 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,            flag_mode,      "config:7");
1471 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,              inc_sel,        "config:9-13");
1472 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,         set_flag_sel,   "config:19-21");
1473 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,        filter_cfg_en,  "config2:63");
1474 DEFINE_UNCORE_FORMAT_ATTR(filter_match,         filter_match,   "config2:0-33");
1475 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,          filter_mask,    "config2:34-61");
1476 DEFINE_UNCORE_FORMAT_ATTR(dsp,                  dsp,            "config1:0-31");
1477 DEFINE_UNCORE_FORMAT_ATTR(thr,                  thr,            "config1:0-31");
1478 DEFINE_UNCORE_FORMAT_ATTR(fvc,                  fvc,            "config1:0-31");
1479 DEFINE_UNCORE_FORMAT_ATTR(pgt,                  pgt,            "config1:0-31");
1480 DEFINE_UNCORE_FORMAT_ATTR(map,                  map,            "config1:0-31");
1481 DEFINE_UNCORE_FORMAT_ATTR(iss,                  iss,            "config1:0-31");
1482 DEFINE_UNCORE_FORMAT_ATTR(pld,                  pld,            "config1:32-63");
1483
1484 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1485         &format_attr_count_mode.attr,
1486         &format_attr_storage_mode.attr,
1487         &format_attr_wrap_mode.attr,
1488         &format_attr_flag_mode.attr,
1489         &format_attr_inc_sel.attr,
1490         &format_attr_set_flag_sel.attr,
1491         &format_attr_filter_cfg_en.attr,
1492         &format_attr_filter_match.attr,
1493         &format_attr_filter_mask.attr,
1494         &format_attr_dsp.attr,
1495         &format_attr_thr.attr,
1496         &format_attr_fvc.attr,
1497         &format_attr_pgt.attr,
1498         &format_attr_map.attr,
1499         &format_attr_iss.attr,
1500         &format_attr_pld.attr,
1501         NULL,
1502 };
1503
1504 static struct attribute_group nhmex_uncore_mbox_format_group = {
1505         .name           = "format",
1506         .attrs          = nhmex_uncore_mbox_formats_attr,
1507 };
1508
1509 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
1510         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
1511         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
1512         { /* end: all zeroes */ },
1513 };
1514
1515 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
1516         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
1517         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
1518         { /* end: all zeroes */ },
1519 };
1520
1521 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
1522         NHMEX_UNCORE_OPS_COMMON_INIT(),
1523         .enable_event   = nhmex_mbox_msr_enable_event,
1524         .hw_config      = nhmex_mbox_hw_config,
1525         .get_constraint = nhmex_mbox_get_constraint,
1526         .put_constraint = nhmex_mbox_put_constraint,
1527 };
1528
1529 static struct intel_uncore_type nhmex_uncore_mbox = {
1530         .name                   = "mbox",
1531         .num_counters           = 6,
1532         .num_boxes              = 2,
1533         .perf_ctr_bits          = 48,
1534         .event_ctl              = NHMEX_M0_MSR_PMU_CTL0,
1535         .perf_ctr               = NHMEX_M0_MSR_PMU_CNT0,
1536         .event_mask             = NHMEX_M_PMON_RAW_EVENT_MASK,
1537         .box_ctl                = NHMEX_M0_MSR_GLOBAL_CTL,
1538         .msr_offset             = NHMEX_M_MSR_OFFSET,
1539         .pair_ctr_ctl           = 1,
1540         .num_shared_regs        = 8,
1541         .event_descs            = nhmex_uncore_mbox_events,
1542         .ops                    = &nhmex_uncore_mbox_ops,
1543         .format_group           = &nhmex_uncore_mbox_format_group,
1544 };
1545
1546 void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1547 {
1548         struct hw_perf_event *hwc = &event->hw;
1549         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1550         int port;
1551
1552         /* adjust the main event selector and extra register index */
1553         if (reg1->idx % 2) {
1554                 reg1->idx--;
1555                 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1556         } else {
1557                 reg1->idx++;
1558                 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1559         }
1560
1561         /* adjust extra register config */
1562         port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1563         switch (reg1->idx % 6) {
1564         case 2:
1565                 /* shift the 8~15 bits to the 0~7 bits */
1566                 reg1->config >>= 8;
1567                 break;
1568         case 3:
1569                 /* shift the 0~7 bits to the 8~15 bits */
1570                 reg1->config <<= 8;
1571                 break;
1572         };
1573 }
1574
1575 /*
1576  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
1577  * An event set consists of 6 events, the 3rd and 4th events in
1578  * an event set use the same extra register. So an event set uses
1579  * 5 extra registers.
1580  */
1581 static struct event_constraint *
1582 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1583 {
1584         struct hw_perf_event *hwc = &event->hw;
1585         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1586         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1587         struct intel_uncore_extra_reg *er;
1588         unsigned long flags;
1589         int idx, er_idx;
1590         u64 config1;
1591         bool ok = false;
1592
1593         if (!uncore_box_is_fake(box) && reg1->alloc)
1594                 return NULL;
1595
1596         idx = reg1->idx % 6;
1597         config1 = reg1->config;
1598 again:
1599         er_idx = idx;
1600         /* the 3rd and 4th events use the same extra register */
1601         if (er_idx > 2)
1602                 er_idx--;
1603         er_idx += (reg1->idx / 6) * 5;
1604
1605         er = &box->shared_regs[er_idx];
1606         raw_spin_lock_irqsave(&er->lock, flags);
1607         if (idx < 2) {
1608                 if (!atomic_read(&er->ref) || er->config == reg1->config) {
1609                         atomic_inc(&er->ref);
1610                         er->config = reg1->config;
1611                         ok = true;
1612                 }
1613         } else if (idx == 2 || idx == 3) {
1614                 /*
1615                  * these two events use different fields in a extra register,
1616                  * the 0~7 bits and the 8~15 bits respectively.
1617                  */
1618                 u64 mask = 0xff << ((idx - 2) * 8);
1619                 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1620                                 !((er->config ^ config1) & mask)) {
1621                         atomic_add(1 << ((idx - 2) * 8), &er->ref);
1622                         er->config &= ~mask;
1623                         er->config |= config1 & mask;
1624                         ok = true;
1625                 }
1626         } else {
1627                 if (!atomic_read(&er->ref) ||
1628                                 (er->config == (hwc->config >> 32) &&
1629                                  er->config1 == reg1->config &&
1630                                  er->config2 == reg2->config)) {
1631                         atomic_inc(&er->ref);
1632                         er->config = (hwc->config >> 32);
1633                         er->config1 = reg1->config;
1634                         er->config2 = reg2->config;
1635                         ok = true;
1636                 }
1637         }
1638         raw_spin_unlock_irqrestore(&er->lock, flags);
1639
1640         if (!ok) {
1641                 /*
1642                  * The Rbox events are always in pairs. The paired
1643                  * events are functional identical, but use different
1644                  * extra registers. If we failed to take an extra
1645                  * register, try the alternative.
1646                  */
1647                 if (idx % 2)
1648                         idx--;
1649                 else
1650                         idx++;
1651                 if (idx != reg1->idx % 6) {
1652                         if (idx == 2)
1653                                 config1 >>= 8;
1654                         else if (idx == 3)
1655                                 config1 <<= 8;
1656                         goto again;
1657                 }
1658         } else {
1659                 if (!uncore_box_is_fake(box)) {
1660                         if (idx != reg1->idx % 6)
1661                                 nhmex_rbox_alter_er(box, event);
1662                         reg1->alloc = 1;
1663                 }
1664                 return NULL;
1665         }
1666         return &constraint_empty;
1667 }
1668
1669 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1670 {
1671         struct intel_uncore_extra_reg *er;
1672         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1673         int idx, er_idx;
1674
1675         if (uncore_box_is_fake(box) || !reg1->alloc)
1676                 return;
1677
1678         idx = reg1->idx % 6;
1679         er_idx = idx;
1680         if (er_idx > 2)
1681                 er_idx--;
1682         er_idx += (reg1->idx / 6) * 5;
1683
1684         er = &box->shared_regs[er_idx];
1685         if (idx == 2 || idx == 3)
1686                 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1687         else
1688                 atomic_dec(&er->ref);
1689
1690         reg1->alloc = 0;
1691 }
1692
1693 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1694 {
1695         struct hw_perf_event *hwc = &event->hw;
1696         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1697         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1698         int idx;
1699
1700         idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1701                 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1702         if (idx >= 0x18)
1703                 return -EINVAL;
1704
1705         reg1->idx = idx;
1706         reg1->config = event->attr.config1;
1707
1708         switch (idx % 6) {
1709         case 4:
1710         case 5:
1711                 hwc->config |= event->attr.config & (~0ULL << 32);
1712                 reg2->config = event->attr.config2;
1713                 break;
1714         };
1715         return 0;
1716 }
1717
1718 static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1719 {
1720         struct intel_uncore_extra_reg *er;
1721         unsigned long flags;
1722         u64 config;
1723
1724         er = &box->shared_regs[idx];
1725
1726         raw_spin_lock_irqsave(&er->lock, flags);
1727         config = er->config;
1728         raw_spin_unlock_irqrestore(&er->lock, flags);
1729
1730         return config;
1731 }
1732
1733 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1734 {
1735         struct hw_perf_event *hwc = &event->hw;
1736         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1737         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1738         int idx, port;
1739
1740         idx = reg1->idx;
1741         port = idx / 6 + box->pmu->pmu_idx * 4;
1742
1743         switch (idx % 6) {
1744         case 0:
1745                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1746                 break;
1747         case 1:
1748                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
1749                 break;
1750         case 2:
1751         case 3:
1752                 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1753                         nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
1754                 break;
1755         case 4:
1756                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1757                         hwc->config >> 32);
1758                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1759                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1760                 break;
1761         case 5:
1762                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1763                         hwc->config >> 32);
1764                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1765                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
1766                 break;
1767         };
1768
1769         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1770                 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1771 }
1772
1773 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1774 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
1775 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1776 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1777 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1778
1779 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1780         &format_attr_event5.attr,
1781         &format_attr_xbr_mm_cfg.attr,
1782         &format_attr_xbr_match.attr,
1783         &format_attr_xbr_mask.attr,
1784         &format_attr_qlx_cfg.attr,
1785         &format_attr_iperf_cfg.attr,
1786         NULL,
1787 };
1788
1789 static struct attribute_group nhmex_uncore_rbox_format_group = {
1790         .name = "format",
1791         .attrs = nhmex_uncore_rbox_formats_attr,
1792 };
1793
1794 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1795         INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,         "event=0x0,iperf_cfg=0x80000000"),
1796         INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,         "event=0x6,iperf_cfg=0x80000000"),
1797         INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,         "event=0x0,iperf_cfg=0x40000000"),
1798         INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,         "event=0x6,iperf_cfg=0x40000000"),
1799         INTEL_UNCORE_EVENT_DESC(qpi0_date_response,     "event=0x0,iperf_cfg=0xc4"),
1800         INTEL_UNCORE_EVENT_DESC(qpi1_date_response,     "event=0x6,iperf_cfg=0xc4"),
1801         { /* end: all zeroes */ },
1802 };
1803
1804 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1805         NHMEX_UNCORE_OPS_COMMON_INIT(),
1806         .enable_event           = nhmex_rbox_msr_enable_event,
1807         .hw_config              = nhmex_rbox_hw_config,
1808         .get_constraint         = nhmex_rbox_get_constraint,
1809         .put_constraint         = nhmex_rbox_put_constraint,
1810 };
1811
1812 static struct intel_uncore_type nhmex_uncore_rbox = {
1813         .name                   = "rbox",
1814         .num_counters           = 8,
1815         .num_boxes              = 2,
1816         .perf_ctr_bits          = 48,
1817         .event_ctl              = NHMEX_R_MSR_PMON_CTL0,
1818         .perf_ctr               = NHMEX_R_MSR_PMON_CNT0,
1819         .event_mask             = NHMEX_R_PMON_RAW_EVENT_MASK,
1820         .box_ctl                = NHMEX_R_MSR_GLOBAL_CTL,
1821         .msr_offset             = NHMEX_R_MSR_OFFSET,
1822         .pair_ctr_ctl           = 1,
1823         .num_shared_regs        = 20,
1824         .event_descs            = nhmex_uncore_rbox_events,
1825         .ops                    = &nhmex_uncore_rbox_ops,
1826         .format_group           = &nhmex_uncore_rbox_format_group
1827 };
1828
1829 static struct intel_uncore_type *nhmex_msr_uncores[] = {
1830         &nhmex_uncore_ubox,
1831         &nhmex_uncore_cbox,
1832         &nhmex_uncore_bbox,
1833         &nhmex_uncore_sbox,
1834         &nhmex_uncore_mbox,
1835         &nhmex_uncore_rbox,
1836         &nhmex_uncore_wbox,
1837         NULL,
1838 };
1839 /* end of Nehalem-EX uncore support */
1840
1841 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
1842 {
1843         struct hw_perf_event *hwc = &event->hw;
1844
1845         hwc->idx = idx;
1846         hwc->last_tag = ++box->tags[idx];
1847
1848         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
1849                 hwc->event_base = uncore_fixed_ctr(box);
1850                 hwc->config_base = uncore_fixed_ctl(box);
1851                 return;
1852         }
1853
1854         hwc->config_base = uncore_event_ctl(box, hwc->idx);
1855         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
1856 }
1857
1858 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
1859 {
1860         u64 prev_count, new_count, delta;
1861         int shift;
1862
1863         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
1864                 shift = 64 - uncore_fixed_ctr_bits(box);
1865         else
1866                 shift = 64 - uncore_perf_ctr_bits(box);
1867
1868         /* the hrtimer might modify the previous event value */
1869 again:
1870         prev_count = local64_read(&event->hw.prev_count);
1871         new_count = uncore_read_counter(box, event);
1872         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
1873                 goto again;
1874
1875         delta = (new_count << shift) - (prev_count << shift);
1876         delta >>= shift;
1877
1878         local64_add(delta, &event->count);
1879 }
1880
1881 /*
1882  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
1883  * for SandyBridge. So we use hrtimer to periodically poll the counter
1884  * to avoid overflow.
1885  */
1886 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
1887 {
1888         struct intel_uncore_box *box;
1889         unsigned long flags;
1890         int bit;
1891
1892         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
1893         if (!box->n_active || box->cpu != smp_processor_id())
1894                 return HRTIMER_NORESTART;
1895         /*
1896          * disable local interrupt to prevent uncore_pmu_event_start/stop
1897          * to interrupt the update process
1898          */
1899         local_irq_save(flags);
1900
1901         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
1902                 uncore_perf_event_update(box, box->events[bit]);
1903
1904         local_irq_restore(flags);
1905
1906         hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
1907         return HRTIMER_RESTART;
1908 }
1909
1910 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
1911 {
1912         __hrtimer_start_range_ns(&box->hrtimer,
1913                         ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
1914                         HRTIMER_MODE_REL_PINNED, 0);
1915 }
1916
1917 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
1918 {
1919         hrtimer_cancel(&box->hrtimer);
1920 }
1921
1922 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
1923 {
1924         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1925         box->hrtimer.function = uncore_pmu_hrtimer;
1926 }
1927
1928 struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
1929 {
1930         struct intel_uncore_box *box;
1931         int i, size;
1932
1933         size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
1934
1935         box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
1936         if (!box)
1937                 return NULL;
1938
1939         for (i = 0; i < type->num_shared_regs; i++)
1940                 raw_spin_lock_init(&box->shared_regs[i].lock);
1941
1942         uncore_pmu_init_hrtimer(box);
1943         atomic_set(&box->refcnt, 1);
1944         box->cpu = -1;
1945         box->phys_id = -1;
1946
1947         return box;
1948 }
1949
1950 static struct intel_uncore_box *
1951 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
1952 {
1953         struct intel_uncore_box *box;
1954
1955         box = *per_cpu_ptr(pmu->box, cpu);
1956         if (box)
1957                 return box;
1958
1959         raw_spin_lock(&uncore_box_lock);
1960         list_for_each_entry(box, &pmu->box_list, list) {
1961                 if (box->phys_id == topology_physical_package_id(cpu)) {
1962                         atomic_inc(&box->refcnt);
1963                         *per_cpu_ptr(pmu->box, cpu) = box;
1964                         break;
1965                 }
1966         }
1967         raw_spin_unlock(&uncore_box_lock);
1968
1969         return *per_cpu_ptr(pmu->box, cpu);
1970 }
1971
1972 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
1973 {
1974         return container_of(event->pmu, struct intel_uncore_pmu, pmu);
1975 }
1976
1977 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
1978 {
1979         /*
1980          * perf core schedules event on the basis of cpu, uncore events are
1981          * collected by one of the cpus inside a physical package.
1982          */
1983         return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
1984 }
1985
1986 static int
1987 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
1988 {
1989         struct perf_event *event;
1990         int n, max_count;
1991
1992         max_count = box->pmu->type->num_counters;
1993         if (box->pmu->type->fixed_ctl)
1994                 max_count++;
1995
1996         if (box->n_events >= max_count)
1997                 return -EINVAL;
1998
1999         n = box->n_events;
2000         box->event_list[n] = leader;
2001         n++;
2002         if (!dogrp)
2003                 return n;
2004
2005         list_for_each_entry(event, &leader->sibling_list, group_entry) {
2006                 if (event->state <= PERF_EVENT_STATE_OFF)
2007                         continue;
2008
2009                 if (n >= max_count)
2010                         return -EINVAL;
2011
2012                 box->event_list[n] = event;
2013                 n++;
2014         }
2015         return n;
2016 }
2017
2018 static struct event_constraint *
2019 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2020 {
2021         struct intel_uncore_type *type = box->pmu->type;
2022         struct event_constraint *c;
2023
2024         if (type->ops->get_constraint) {
2025                 c = type->ops->get_constraint(box, event);
2026                 if (c)
2027                         return c;
2028         }
2029
2030         if (event->hw.config == ~0ULL)
2031                 return &constraint_fixed;
2032
2033         if (type->constraints) {
2034                 for_each_event_constraint(c, type->constraints) {
2035                         if ((event->hw.config & c->cmask) == c->code)
2036                                 return c;
2037                 }
2038         }
2039
2040         return &type->unconstrainted;
2041 }
2042
2043 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2044 {
2045         if (box->pmu->type->ops->put_constraint)
2046                 box->pmu->type->ops->put_constraint(box, event);
2047 }
2048
2049 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
2050 {
2051         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2052         struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
2053         int i, wmin, wmax, ret = 0;
2054         struct hw_perf_event *hwc;
2055
2056         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2057
2058         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
2059                 c = uncore_get_event_constraint(box, box->event_list[i]);
2060                 constraints[i] = c;
2061                 wmin = min(wmin, c->weight);
2062                 wmax = max(wmax, c->weight);
2063         }
2064
2065         /* fastpath, try to reuse previous register */
2066         for (i = 0; i < n; i++) {
2067                 hwc = &box->event_list[i]->hw;
2068                 c = constraints[i];
2069
2070                 /* never assigned */
2071                 if (hwc->idx == -1)
2072                         break;
2073
2074                 /* constraint still honored */
2075                 if (!test_bit(hwc->idx, c->idxmsk))
2076                         break;
2077
2078                 /* not already used */
2079                 if (test_bit(hwc->idx, used_mask))
2080                         break;
2081
2082                 __set_bit(hwc->idx, used_mask);
2083                 if (assign)
2084                         assign[i] = hwc->idx;
2085         }
2086         /* slow path */
2087         if (i != n)
2088                 ret = perf_assign_events(constraints, n, wmin, wmax, assign);
2089
2090         if (!assign || ret) {
2091                 for (i = 0; i < n; i++)
2092                         uncore_put_event_constraint(box, box->event_list[i]);
2093         }
2094         return ret ? -EINVAL : 0;
2095 }
2096
2097 static void uncore_pmu_event_start(struct perf_event *event, int flags)
2098 {
2099         struct intel_uncore_box *box = uncore_event_to_box(event);
2100         int idx = event->hw.idx;
2101
2102         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2103                 return;
2104
2105         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2106                 return;
2107
2108         event->hw.state = 0;
2109         box->events[idx] = event;
2110         box->n_active++;
2111         __set_bit(idx, box->active_mask);
2112
2113         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2114         uncore_enable_event(box, event);
2115
2116         if (box->n_active == 1) {
2117                 uncore_enable_box(box);
2118                 uncore_pmu_start_hrtimer(box);
2119         }
2120 }
2121
2122 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2123 {
2124         struct intel_uncore_box *box = uncore_event_to_box(event);
2125         struct hw_perf_event *hwc = &event->hw;
2126
2127         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2128                 uncore_disable_event(box, event);
2129                 box->n_active--;
2130                 box->events[hwc->idx] = NULL;
2131                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2132                 hwc->state |= PERF_HES_STOPPED;
2133
2134                 if (box->n_active == 0) {
2135                         uncore_disable_box(box);
2136                         uncore_pmu_cancel_hrtimer(box);
2137                 }
2138         }
2139
2140         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2141                 /*
2142                  * Drain the remaining delta count out of a event
2143                  * that we are disabling:
2144                  */
2145                 uncore_perf_event_update(box, event);
2146                 hwc->state |= PERF_HES_UPTODATE;
2147         }
2148 }
2149
2150 static int uncore_pmu_event_add(struct perf_event *event, int flags)
2151 {
2152         struct intel_uncore_box *box = uncore_event_to_box(event);
2153         struct hw_perf_event *hwc = &event->hw;
2154         int assign[UNCORE_PMC_IDX_MAX];
2155         int i, n, ret;
2156
2157         if (!box)
2158                 return -ENODEV;
2159
2160         ret = n = uncore_collect_events(box, event, false);
2161         if (ret < 0)
2162                 return ret;
2163
2164         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2165         if (!(flags & PERF_EF_START))
2166                 hwc->state |= PERF_HES_ARCH;
2167
2168         ret = uncore_assign_events(box, assign, n);
2169         if (ret)
2170                 return ret;
2171
2172         /* save events moving to new counters */
2173         for (i = 0; i < box->n_events; i++) {
2174                 event = box->event_list[i];
2175                 hwc = &event->hw;
2176
2177                 if (hwc->idx == assign[i] &&
2178                         hwc->last_tag == box->tags[assign[i]])
2179                         continue;
2180                 /*
2181                  * Ensure we don't accidentally enable a stopped
2182                  * counter simply because we rescheduled.
2183                  */
2184                 if (hwc->state & PERF_HES_STOPPED)
2185                         hwc->state |= PERF_HES_ARCH;
2186
2187                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2188         }
2189
2190         /* reprogram moved events into new counters */
2191         for (i = 0; i < n; i++) {
2192                 event = box->event_list[i];
2193                 hwc = &event->hw;
2194
2195                 if (hwc->idx != assign[i] ||
2196                         hwc->last_tag != box->tags[assign[i]])
2197                         uncore_assign_hw_event(box, event, assign[i]);
2198                 else if (i < box->n_events)
2199                         continue;
2200
2201                 if (hwc->state & PERF_HES_ARCH)
2202                         continue;
2203
2204                 uncore_pmu_event_start(event, 0);
2205         }
2206         box->n_events = n;
2207
2208         return 0;
2209 }
2210
2211 static void uncore_pmu_event_del(struct perf_event *event, int flags)
2212 {
2213         struct intel_uncore_box *box = uncore_event_to_box(event);
2214         int i;
2215
2216         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2217
2218         for (i = 0; i < box->n_events; i++) {
2219                 if (event == box->event_list[i]) {
2220                         uncore_put_event_constraint(box, event);
2221
2222                         while (++i < box->n_events)
2223                                 box->event_list[i - 1] = box->event_list[i];
2224
2225                         --box->n_events;
2226                         break;
2227                 }
2228         }
2229
2230         event->hw.idx = -1;
2231         event->hw.last_tag = ~0ULL;
2232 }
2233
2234 static void uncore_pmu_event_read(struct perf_event *event)
2235 {
2236         struct intel_uncore_box *box = uncore_event_to_box(event);
2237         uncore_perf_event_update(box, event);
2238 }
2239
2240 /*
2241  * validation ensures the group can be loaded onto the
2242  * PMU if it was the only group available.
2243  */
2244 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
2245                                 struct perf_event *event)
2246 {
2247         struct perf_event *leader = event->group_leader;
2248         struct intel_uncore_box *fake_box;
2249         int ret = -EINVAL, n;
2250
2251         fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
2252         if (!fake_box)
2253                 return -ENOMEM;
2254
2255         fake_box->pmu = pmu;
2256         /*
2257          * the event is not yet connected with its
2258          * siblings therefore we must first collect
2259          * existing siblings, then add the new event
2260          * before we can simulate the scheduling
2261          */
2262         n = uncore_collect_events(fake_box, leader, true);
2263         if (n < 0)
2264                 goto out;
2265
2266         fake_box->n_events = n;
2267         n = uncore_collect_events(fake_box, event, false);
2268         if (n < 0)
2269                 goto out;
2270
2271         fake_box->n_events = n;
2272
2273         ret = uncore_assign_events(fake_box, NULL, n);
2274 out:
2275         kfree(fake_box);
2276         return ret;
2277 }
2278
2279 int uncore_pmu_event_init(struct perf_event *event)
2280 {
2281         struct intel_uncore_pmu *pmu;
2282         struct intel_uncore_box *box;
2283         struct hw_perf_event *hwc = &event->hw;
2284         int ret;
2285
2286         if (event->attr.type != event->pmu->type)
2287                 return -ENOENT;
2288
2289         pmu = uncore_event_to_pmu(event);
2290         /* no device found for this pmu */
2291         if (pmu->func_id < 0)
2292                 return -ENOENT;
2293
2294         /*
2295          * Uncore PMU does measure at all privilege level all the time.
2296          * So it doesn't make sense to specify any exclude bits.
2297          */
2298         if (event->attr.exclude_user || event->attr.exclude_kernel ||
2299                         event->attr.exclude_hv || event->attr.exclude_idle)
2300                 return -EINVAL;
2301
2302         /* Sampling not supported yet */
2303         if (hwc->sample_period)
2304                 return -EINVAL;
2305
2306         /*
2307          * Place all uncore events for a particular physical package
2308          * onto a single cpu
2309          */
2310         if (event->cpu < 0)
2311                 return -EINVAL;
2312         box = uncore_pmu_to_box(pmu, event->cpu);
2313         if (!box || box->cpu < 0)
2314                 return -EINVAL;
2315         event->cpu = box->cpu;
2316
2317         event->hw.idx = -1;
2318         event->hw.last_tag = ~0ULL;
2319         event->hw.extra_reg.idx = EXTRA_REG_NONE;
2320         event->hw.branch_reg.idx = EXTRA_REG_NONE;
2321
2322         if (event->attr.config == UNCORE_FIXED_EVENT) {
2323                 /* no fixed counter */
2324                 if (!pmu->type->fixed_ctl)
2325                         return -EINVAL;
2326                 /*
2327                  * if there is only one fixed counter, only the first pmu
2328                  * can access the fixed counter
2329                  */
2330                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
2331                         return -EINVAL;
2332                 hwc->config = ~0ULL;
2333         } else {
2334                 hwc->config = event->attr.config & pmu->type->event_mask;
2335                 if (pmu->type->ops->hw_config) {
2336                         ret = pmu->type->ops->hw_config(box, event);
2337                         if (ret)
2338                                 return ret;
2339                 }
2340         }
2341
2342         if (event->group_leader != event)
2343                 ret = uncore_validate_group(pmu, event);
2344         else
2345                 ret = 0;
2346
2347         return ret;
2348 }
2349
2350 static ssize_t uncore_get_attr_cpumask(struct device *dev,
2351                                 struct device_attribute *attr, char *buf)
2352 {
2353         int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
2354
2355         buf[n++] = '\n';
2356         buf[n] = '\0';
2357         return n;
2358 }
2359
2360 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
2361
2362 static struct attribute *uncore_pmu_attrs[] = {
2363         &dev_attr_cpumask.attr,
2364         NULL,
2365 };
2366
2367 static struct attribute_group uncore_pmu_attr_group = {
2368         .attrs = uncore_pmu_attrs,
2369 };
2370
2371 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
2372 {
2373         int ret;
2374
2375         pmu->pmu = (struct pmu) {
2376                 .attr_groups    = pmu->type->attr_groups,
2377                 .task_ctx_nr    = perf_invalid_context,
2378                 .event_init     = uncore_pmu_event_init,
2379                 .add            = uncore_pmu_event_add,
2380                 .del            = uncore_pmu_event_del,
2381                 .start          = uncore_pmu_event_start,
2382                 .stop           = uncore_pmu_event_stop,
2383                 .read           = uncore_pmu_event_read,
2384         };
2385
2386         if (pmu->type->num_boxes == 1) {
2387                 if (strlen(pmu->type->name) > 0)
2388                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
2389                 else
2390                         sprintf(pmu->name, "uncore");
2391         } else {
2392                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
2393                         pmu->pmu_idx);
2394         }
2395
2396         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
2397         return ret;
2398 }
2399
2400 static void __init uncore_type_exit(struct intel_uncore_type *type)
2401 {
2402         int i;
2403
2404         for (i = 0; i < type->num_boxes; i++)
2405                 free_percpu(type->pmus[i].box);
2406         kfree(type->pmus);
2407         type->pmus = NULL;
2408         kfree(type->events_group);
2409         type->events_group = NULL;
2410 }
2411
2412 static void __init uncore_types_exit(struct intel_uncore_type **types)
2413 {
2414         int i;
2415         for (i = 0; types[i]; i++)
2416                 uncore_type_exit(types[i]);
2417 }
2418
2419 static int __init uncore_type_init(struct intel_uncore_type *type)
2420 {
2421         struct intel_uncore_pmu *pmus;
2422         struct attribute_group *events_group;
2423         struct attribute **attrs;
2424         int i, j;
2425
2426         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
2427         if (!pmus)
2428                 return -ENOMEM;
2429
2430         type->unconstrainted = (struct event_constraint)
2431                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
2432                                 0, type->num_counters, 0);
2433
2434         for (i = 0; i < type->num_boxes; i++) {
2435                 pmus[i].func_id = -1;
2436                 pmus[i].pmu_idx = i;
2437                 pmus[i].type = type;
2438                 INIT_LIST_HEAD(&pmus[i].box_list);
2439                 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
2440                 if (!pmus[i].box)
2441                         goto fail;
2442         }
2443
2444         if (type->event_descs) {
2445                 i = 0;
2446                 while (type->event_descs[i].attr.attr.name)
2447                         i++;
2448
2449                 events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
2450                                         sizeof(*events_group), GFP_KERNEL);
2451                 if (!events_group)
2452                         goto fail;
2453
2454                 attrs = (struct attribute **)(events_group + 1);
2455                 events_group->name = "events";
2456                 events_group->attrs = attrs;
2457
2458                 for (j = 0; j < i; j++)
2459                         attrs[j] = &type->event_descs[j].attr.attr;
2460
2461                 type->events_group = events_group;
2462         }
2463
2464         type->pmu_group = &uncore_pmu_attr_group;
2465         type->pmus = pmus;
2466         return 0;
2467 fail:
2468         uncore_type_exit(type);
2469         return -ENOMEM;
2470 }
2471
2472 static int __init uncore_types_init(struct intel_uncore_type **types)
2473 {
2474         int i, ret;
2475
2476         for (i = 0; types[i]; i++) {
2477                 ret = uncore_type_init(types[i]);
2478                 if (ret)
2479                         goto fail;
2480         }
2481         return 0;
2482 fail:
2483         while (--i >= 0)
2484                 uncore_type_exit(types[i]);
2485         return ret;
2486 }
2487
2488 static struct pci_driver *uncore_pci_driver;
2489 static bool pcidrv_registered;
2490
2491 /*
2492  * add a pci uncore device
2493  */
2494 static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
2495 {
2496         struct intel_uncore_pmu *pmu;
2497         struct intel_uncore_box *box;
2498         int i, phys_id;
2499
2500         phys_id = pcibus_to_physid[pdev->bus->number];
2501         if (phys_id < 0)
2502                 return -ENODEV;
2503
2504         box = uncore_alloc_box(type, 0);
2505         if (!box)
2506                 return -ENOMEM;
2507
2508         /*
2509          * for performance monitoring unit with multiple boxes,
2510          * each box has a different function id.
2511          */
2512         for (i = 0; i < type->num_boxes; i++) {
2513                 pmu = &type->pmus[i];
2514                 if (pmu->func_id == pdev->devfn)
2515                         break;
2516                 if (pmu->func_id < 0) {
2517                         pmu->func_id = pdev->devfn;
2518                         break;
2519                 }
2520                 pmu = NULL;
2521         }
2522
2523         if (!pmu) {
2524                 kfree(box);
2525                 return -EINVAL;
2526         }
2527
2528         box->phys_id = phys_id;
2529         box->pci_dev = pdev;
2530         box->pmu = pmu;
2531         uncore_box_init(box);
2532         pci_set_drvdata(pdev, box);
2533
2534         raw_spin_lock(&uncore_box_lock);
2535         list_add_tail(&box->list, &pmu->box_list);
2536         raw_spin_unlock(&uncore_box_lock);
2537
2538         return 0;
2539 }
2540
2541 static void uncore_pci_remove(struct pci_dev *pdev)
2542 {
2543         struct intel_uncore_box *box = pci_get_drvdata(pdev);
2544         struct intel_uncore_pmu *pmu = box->pmu;
2545         int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
2546
2547         if (WARN_ON_ONCE(phys_id != box->phys_id))
2548                 return;
2549
2550         raw_spin_lock(&uncore_box_lock);
2551         list_del(&box->list);
2552         raw_spin_unlock(&uncore_box_lock);
2553
2554         for_each_possible_cpu(cpu) {
2555                 if (*per_cpu_ptr(pmu->box, cpu) == box) {
2556                         *per_cpu_ptr(pmu->box, cpu) = NULL;
2557                         atomic_dec(&box->refcnt);
2558                 }
2559         }
2560
2561         WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
2562         kfree(box);
2563 }
2564
2565 static int __devinit uncore_pci_probe(struct pci_dev *pdev,
2566                                 const struct pci_device_id *id)
2567 {
2568         struct intel_uncore_type *type;
2569
2570         type = (struct intel_uncore_type *)id->driver_data;
2571
2572         return uncore_pci_add(type, pdev);
2573 }
2574
2575 static int __init uncore_pci_init(void)
2576 {
2577         int ret;
2578
2579         switch (boot_cpu_data.x86_model) {
2580         case 45: /* Sandy Bridge-EP */
2581                 pci_uncores = snbep_pci_uncores;
2582                 uncore_pci_driver = &snbep_uncore_pci_driver;
2583                 snbep_pci2phy_map_init();
2584                 break;
2585         default:
2586                 return 0;
2587         }
2588
2589         ret = uncore_types_init(pci_uncores);
2590         if (ret)
2591                 return ret;
2592
2593         uncore_pci_driver->probe = uncore_pci_probe;
2594         uncore_pci_driver->remove = uncore_pci_remove;
2595
2596         ret = pci_register_driver(uncore_pci_driver);
2597         if (ret == 0)
2598                 pcidrv_registered = true;
2599         else
2600                 uncore_types_exit(pci_uncores);
2601
2602         return ret;
2603 }
2604
2605 static void __init uncore_pci_exit(void)
2606 {
2607         if (pcidrv_registered) {
2608                 pcidrv_registered = false;
2609                 pci_unregister_driver(uncore_pci_driver);
2610                 uncore_types_exit(pci_uncores);
2611         }
2612 }
2613
2614 static void __cpuinit uncore_cpu_dying(int cpu)
2615 {
2616         struct intel_uncore_type *type;
2617         struct intel_uncore_pmu *pmu;
2618         struct intel_uncore_box *box;
2619         int i, j;
2620
2621         for (i = 0; msr_uncores[i]; i++) {
2622                 type = msr_uncores[i];
2623                 for (j = 0; j < type->num_boxes; j++) {
2624                         pmu = &type->pmus[j];
2625                         box = *per_cpu_ptr(pmu->box, cpu);
2626                         *per_cpu_ptr(pmu->box, cpu) = NULL;
2627                         if (box && atomic_dec_and_test(&box->refcnt))
2628                                 kfree(box);
2629                 }
2630         }
2631 }
2632
2633 static int __cpuinit uncore_cpu_starting(int cpu)
2634 {
2635         struct intel_uncore_type *type;
2636         struct intel_uncore_pmu *pmu;
2637         struct intel_uncore_box *box, *exist;
2638         int i, j, k, phys_id;
2639
2640         phys_id = topology_physical_package_id(cpu);
2641
2642         for (i = 0; msr_uncores[i]; i++) {
2643                 type = msr_uncores[i];
2644                 for (j = 0; j < type->num_boxes; j++) {
2645                         pmu = &type->pmus[j];
2646                         box = *per_cpu_ptr(pmu->box, cpu);
2647                         /* called by uncore_cpu_init? */
2648                         if (box && box->phys_id >= 0) {
2649                                 uncore_box_init(box);
2650                                 continue;
2651                         }
2652
2653                         for_each_online_cpu(k) {
2654                                 exist = *per_cpu_ptr(pmu->box, k);
2655                                 if (exist && exist->phys_id == phys_id) {
2656                                         atomic_inc(&exist->refcnt);
2657                                         *per_cpu_ptr(pmu->box, cpu) = exist;
2658                                         kfree(box);
2659                                         box = NULL;
2660                                         break;
2661                                 }
2662                         }
2663
2664                         if (box) {
2665                                 box->phys_id = phys_id;
2666                                 uncore_box_init(box);
2667                         }
2668                 }
2669         }
2670         return 0;
2671 }
2672
2673 static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
2674 {
2675         struct intel_uncore_type *type;
2676         struct intel_uncore_pmu *pmu;
2677         struct intel_uncore_box *box;
2678         int i, j;
2679
2680         for (i = 0; msr_uncores[i]; i++) {
2681                 type = msr_uncores[i];
2682                 for (j = 0; j < type->num_boxes; j++) {
2683                         pmu = &type->pmus[j];
2684                         if (pmu->func_id < 0)
2685                                 pmu->func_id = j;
2686
2687                         box = uncore_alloc_box(type, cpu);
2688                         if (!box)
2689                                 return -ENOMEM;
2690
2691                         box->pmu = pmu;
2692                         box->phys_id = phys_id;
2693                         *per_cpu_ptr(pmu->box, cpu) = box;
2694                 }
2695         }
2696         return 0;
2697 }
2698
2699 static void __cpuinit
2700 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
2701 {
2702         struct intel_uncore_type *type;
2703         struct intel_uncore_pmu *pmu;
2704         struct intel_uncore_box *box;
2705         int i, j;
2706
2707         for (i = 0; uncores[i]; i++) {
2708                 type = uncores[i];
2709                 for (j = 0; j < type->num_boxes; j++) {
2710                         pmu = &type->pmus[j];
2711                         if (old_cpu < 0)
2712                                 box = uncore_pmu_to_box(pmu, new_cpu);
2713                         else
2714                                 box = uncore_pmu_to_box(pmu, old_cpu);
2715                         if (!box)
2716                                 continue;
2717
2718                         if (old_cpu < 0) {
2719                                 WARN_ON_ONCE(box->cpu != -1);
2720                                 box->cpu = new_cpu;
2721                                 continue;
2722                         }
2723
2724                         WARN_ON_ONCE(box->cpu != old_cpu);
2725                         if (new_cpu >= 0) {
2726                                 uncore_pmu_cancel_hrtimer(box);
2727                                 perf_pmu_migrate_context(&pmu->pmu,
2728                                                 old_cpu, new_cpu);
2729                                 box->cpu = new_cpu;
2730                         } else {
2731                                 box->cpu = -1;
2732                         }
2733                 }
2734         }
2735 }
2736
2737 static void __cpuinit uncore_event_exit_cpu(int cpu)
2738 {
2739         int i, phys_id, target;
2740
2741         /* if exiting cpu is used for collecting uncore events */
2742         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
2743                 return;
2744
2745         /* find a new cpu to collect uncore events */
2746         phys_id = topology_physical_package_id(cpu);
2747         target = -1;
2748         for_each_online_cpu(i) {
2749                 if (i == cpu)
2750                         continue;
2751                 if (phys_id == topology_physical_package_id(i)) {
2752                         target = i;
2753                         break;
2754                 }
2755         }
2756
2757         /* migrate uncore events to the new cpu */
2758         if (target >= 0)
2759                 cpumask_set_cpu(target, &uncore_cpu_mask);
2760
2761         uncore_change_context(msr_uncores, cpu, target);
2762         uncore_change_context(pci_uncores, cpu, target);
2763 }
2764
2765 static void __cpuinit uncore_event_init_cpu(int cpu)
2766 {
2767         int i, phys_id;
2768
2769         phys_id = topology_physical_package_id(cpu);
2770         for_each_cpu(i, &uncore_cpu_mask) {
2771                 if (phys_id == topology_physical_package_id(i))
2772                         return;
2773         }
2774
2775         cpumask_set_cpu(cpu, &uncore_cpu_mask);
2776
2777         uncore_change_context(msr_uncores, -1, cpu);
2778         uncore_change_context(pci_uncores, -1, cpu);
2779 }
2780
2781 static int
2782  __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
2783 {
2784         unsigned int cpu = (long)hcpu;
2785
2786         /* allocate/free data structure for uncore box */
2787         switch (action & ~CPU_TASKS_FROZEN) {
2788         case CPU_UP_PREPARE:
2789                 uncore_cpu_prepare(cpu, -1);
2790                 break;
2791         case CPU_STARTING:
2792                 uncore_cpu_starting(cpu);
2793                 break;
2794         case CPU_UP_CANCELED:
2795         case CPU_DYING:
2796                 uncore_cpu_dying(cpu);
2797                 break;
2798         default:
2799                 break;
2800         }
2801
2802         /* select the cpu that collects uncore events */
2803         switch (action & ~CPU_TASKS_FROZEN) {
2804         case CPU_DOWN_FAILED:
2805         case CPU_STARTING:
2806                 uncore_event_init_cpu(cpu);
2807                 break;
2808         case CPU_DOWN_PREPARE:
2809                 uncore_event_exit_cpu(cpu);
2810                 break;
2811         default:
2812                 break;
2813         }
2814
2815         return NOTIFY_OK;
2816 }
2817
2818 static struct notifier_block uncore_cpu_nb __cpuinitdata = {
2819         .notifier_call  = uncore_cpu_notifier,
2820         /*
2821          * to migrate uncore events, our notifier should be executed
2822          * before perf core's notifier.
2823          */
2824         .priority       = CPU_PRI_PERF + 1,
2825 };
2826
2827 static void __init uncore_cpu_setup(void *dummy)
2828 {
2829         uncore_cpu_starting(smp_processor_id());
2830 }
2831
2832 static int __init uncore_cpu_init(void)
2833 {
2834         int ret, cpu, max_cores;
2835
2836         max_cores = boot_cpu_data.x86_max_cores;
2837         switch (boot_cpu_data.x86_model) {
2838         case 26: /* Nehalem */
2839         case 30:
2840         case 37: /* Westmere */
2841         case 44:
2842                 msr_uncores = nhm_msr_uncores;
2843                 break;
2844         case 42: /* Sandy Bridge */
2845                 if (snb_uncore_cbox.num_boxes > max_cores)
2846                         snb_uncore_cbox.num_boxes = max_cores;
2847                 msr_uncores = snb_msr_uncores;
2848                 break;
2849         case 45: /* Sandy Birdge-EP */
2850                 if (snbep_uncore_cbox.num_boxes > max_cores)
2851                         snbep_uncore_cbox.num_boxes = max_cores;
2852                 msr_uncores = snbep_msr_uncores;
2853                 break;
2854         case 46: /* Nehalem-EX */
2855                 uncore_nhmex = true;
2856         case 47: /* Westmere-EX aka. Xeon E7 */
2857                 if (!uncore_nhmex)
2858                         nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
2859                 if (nhmex_uncore_cbox.num_boxes > max_cores)
2860                         nhmex_uncore_cbox.num_boxes = max_cores;
2861                 msr_uncores = nhmex_msr_uncores;
2862                 break;
2863         default:
2864                 return 0;
2865         }
2866
2867         ret = uncore_types_init(msr_uncores);
2868         if (ret)
2869                 return ret;
2870
2871         get_online_cpus();
2872
2873         for_each_online_cpu(cpu) {
2874                 int i, phys_id = topology_physical_package_id(cpu);
2875
2876                 for_each_cpu(i, &uncore_cpu_mask) {
2877                         if (phys_id == topology_physical_package_id(i)) {
2878                                 phys_id = -1;
2879                                 break;
2880                         }
2881                 }
2882                 if (phys_id < 0)
2883                         continue;
2884
2885                 uncore_cpu_prepare(cpu, phys_id);
2886                 uncore_event_init_cpu(cpu);
2887         }
2888         on_each_cpu(uncore_cpu_setup, NULL, 1);
2889
2890         register_cpu_notifier(&uncore_cpu_nb);
2891
2892         put_online_cpus();
2893
2894         return 0;
2895 }
2896
2897 static int __init uncore_pmus_register(void)
2898 {
2899         struct intel_uncore_pmu *pmu;
2900         struct intel_uncore_type *type;
2901         int i, j;
2902
2903         for (i = 0; msr_uncores[i]; i++) {
2904                 type = msr_uncores[i];
2905                 for (j = 0; j < type->num_boxes; j++) {
2906                         pmu = &type->pmus[j];
2907                         uncore_pmu_register(pmu);
2908                 }
2909         }
2910
2911         for (i = 0; pci_uncores[i]; i++) {
2912                 type = pci_uncores[i];
2913                 for (j = 0; j < type->num_boxes; j++) {
2914                         pmu = &type->pmus[j];
2915                         uncore_pmu_register(pmu);
2916                 }
2917         }
2918
2919         return 0;
2920 }
2921
2922 static int __init intel_uncore_init(void)
2923 {
2924         int ret;
2925
2926         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2927                 return -ENODEV;
2928
2929         ret = uncore_pci_init();
2930         if (ret)
2931                 goto fail;
2932         ret = uncore_cpu_init();
2933         if (ret) {
2934                 uncore_pci_exit();
2935                 goto fail;
2936         }
2937
2938         uncore_pmus_register();
2939         return 0;
2940 fail:
2941         return ret;
2942 }
2943 device_initcall(intel_uncore_init);