1 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
4 /* Uncore IMC PCI IDs */
5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
11 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
12 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
13 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
14 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
15 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
16 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
18 /* SNB event control */
19 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
20 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
21 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
22 #define SNB_UNC_CTL_EN (1 << 22)
23 #define SNB_UNC_CTL_INVERT (1 << 23)
24 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
25 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
26 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
28 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
29 SNB_UNC_CTL_UMASK_MASK | \
30 SNB_UNC_CTL_EDGE_DET | \
31 SNB_UNC_CTL_INVERT | \
32 SNB_UNC_CTL_CMASK_MASK)
34 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
35 SNB_UNC_CTL_UMASK_MASK | \
36 SNB_UNC_CTL_EDGE_DET | \
37 SNB_UNC_CTL_INVERT | \
38 NHM_UNC_CTL_CMASK_MASK)
40 /* SNB global control register */
41 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
42 #define SNB_UNC_FIXED_CTR_CTRL 0x394
43 #define SNB_UNC_FIXED_CTR 0x395
45 /* SNB uncore global control */
46 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
47 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
49 /* SNB Cbo register */
50 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
51 #define SNB_UNC_CBO_0_PER_CTR0 0x706
52 #define SNB_UNC_CBO_MSR_OFFSET 0x10
54 /* SNB ARB register */
55 #define SNB_UNC_ARB_PER_CTR0 0x3b0
56 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
57 #define SNB_UNC_ARB_MSR_OFFSET 0x10
59 /* NHM global control register */
60 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
61 #define NHM_UNC_FIXED_CTR 0x394
62 #define NHM_UNC_FIXED_CTR_CTRL 0x395
64 /* NHM uncore global control */
65 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
66 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
68 /* NHM uncore register */
69 #define NHM_UNC_PERFEVTSEL0 0x3c0
70 #define NHM_UNC_UNCORE_PMC0 0x3b0
72 /* SKL uncore global control */
73 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
74 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
76 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
77 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
78 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
79 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
80 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
81 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
83 /* Sandy Bridge uncore support */
84 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
86 struct hw_perf_event *hwc = &event->hw;
88 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
89 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
91 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
94 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
96 wrmsrl(event->hw.config_base, 0);
99 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
101 if (box->pmu->pmu_idx == 0) {
102 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
103 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
107 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
109 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
110 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
113 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
115 if (box->pmu->pmu_idx == 0)
116 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
119 static struct uncore_event_desc snb_uncore_events[] = {
120 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
121 { /* end: all zeroes */ },
124 static struct attribute *snb_uncore_formats_attr[] = {
125 &format_attr_event.attr,
126 &format_attr_umask.attr,
127 &format_attr_edge.attr,
128 &format_attr_inv.attr,
129 &format_attr_cmask5.attr,
133 static struct attribute_group snb_uncore_format_group = {
135 .attrs = snb_uncore_formats_attr,
138 static struct intel_uncore_ops snb_uncore_msr_ops = {
139 .init_box = snb_uncore_msr_init_box,
140 .enable_box = snb_uncore_msr_enable_box,
141 .exit_box = snb_uncore_msr_exit_box,
142 .disable_event = snb_uncore_msr_disable_event,
143 .enable_event = snb_uncore_msr_enable_event,
144 .read_counter = uncore_msr_read_counter,
147 static struct event_constraint snb_uncore_arb_constraints[] = {
148 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
149 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
153 static struct intel_uncore_type snb_uncore_cbox = {
158 .fixed_ctr_bits = 48,
159 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
160 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
161 .fixed_ctr = SNB_UNC_FIXED_CTR,
162 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
164 .event_mask = SNB_UNC_RAW_EVENT_MASK,
165 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
166 .ops = &snb_uncore_msr_ops,
167 .format_group = &snb_uncore_format_group,
168 .event_descs = snb_uncore_events,
171 static struct intel_uncore_type snb_uncore_arb = {
176 .perf_ctr = SNB_UNC_ARB_PER_CTR0,
177 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0,
178 .event_mask = SNB_UNC_RAW_EVENT_MASK,
179 .msr_offset = SNB_UNC_ARB_MSR_OFFSET,
180 .constraints = snb_uncore_arb_constraints,
181 .ops = &snb_uncore_msr_ops,
182 .format_group = &snb_uncore_format_group,
185 static struct intel_uncore_type *snb_msr_uncores[] = {
191 void snb_uncore_cpu_init(void)
193 uncore_msr_uncores = snb_msr_uncores;
194 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
195 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
198 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
200 if (box->pmu->pmu_idx == 0) {
201 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
202 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
206 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
208 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
209 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
212 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
214 if (box->pmu->pmu_idx == 0)
215 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
218 static struct intel_uncore_ops skl_uncore_msr_ops = {
219 .init_box = skl_uncore_msr_init_box,
220 .enable_box = skl_uncore_msr_enable_box,
221 .exit_box = skl_uncore_msr_exit_box,
222 .disable_event = snb_uncore_msr_disable_event,
223 .enable_event = snb_uncore_msr_enable_event,
224 .read_counter = uncore_msr_read_counter,
227 static struct intel_uncore_type skl_uncore_cbox = {
232 .fixed_ctr_bits = 48,
233 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
234 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
235 .fixed_ctr = SNB_UNC_FIXED_CTR,
236 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
238 .event_mask = SNB_UNC_RAW_EVENT_MASK,
239 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
240 .ops = &skl_uncore_msr_ops,
241 .format_group = &snb_uncore_format_group,
242 .event_descs = snb_uncore_events,
245 static struct intel_uncore_type *skl_msr_uncores[] = {
251 void skl_uncore_cpu_init(void)
253 uncore_msr_uncores = skl_msr_uncores;
254 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
255 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
256 snb_uncore_arb.ops = &skl_uncore_msr_ops;
263 static struct uncore_event_desc snb_uncore_imc_events[] = {
264 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
265 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
266 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
268 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
269 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
270 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
272 { /* end: all zeroes */ },
275 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
276 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
278 /* page size multiple covering all config regs */
279 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
281 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
282 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
283 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
284 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
285 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
287 static struct attribute *snb_uncore_imc_formats_attr[] = {
288 &format_attr_event.attr,
292 static struct attribute_group snb_uncore_imc_format_group = {
294 .attrs = snb_uncore_imc_formats_attr,
297 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
299 struct pci_dev *pdev = box->pci_dev;
300 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
301 resource_size_t addr;
304 pci_read_config_dword(pdev, where, &pci_dword);
307 #ifdef CONFIG_PHYS_ADDR_T_64BIT
308 pci_read_config_dword(pdev, where + 4, &pci_dword);
309 addr |= ((resource_size_t)pci_dword << 32);
312 addr &= ~(PAGE_SIZE - 1);
314 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
315 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
318 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
320 iounmap(box->io_addr);
323 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
326 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
329 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
332 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
335 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
337 struct hw_perf_event *hwc = &event->hw;
339 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
343 * custom event_init() function because we define our own fixed, free
344 * running counters, so we do not want to conflict with generic uncore
345 * logic. Also simplifies processing
347 static int snb_uncore_imc_event_init(struct perf_event *event)
349 struct intel_uncore_pmu *pmu;
350 struct intel_uncore_box *box;
351 struct hw_perf_event *hwc = &event->hw;
352 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
355 if (event->attr.type != event->pmu->type)
358 pmu = uncore_event_to_pmu(event);
359 /* no device found for this pmu */
360 if (pmu->func_id < 0)
363 /* Sampling not supported yet */
364 if (hwc->sample_period)
367 /* unsupported modes and filters */
368 if (event->attr.exclude_user ||
369 event->attr.exclude_kernel ||
370 event->attr.exclude_hv ||
371 event->attr.exclude_idle ||
372 event->attr.exclude_host ||
373 event->attr.exclude_guest ||
374 event->attr.sample_period) /* no sampling */
378 * Place all uncore events for a particular physical package
384 /* check only supported bits are set */
385 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
388 box = uncore_pmu_to_box(pmu, event->cpu);
389 if (!box || box->cpu < 0)
392 event->cpu = box->cpu;
393 event->pmu_private = box;
395 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
398 event->hw.last_tag = ~0ULL;
399 event->hw.extra_reg.idx = EXTRA_REG_NONE;
400 event->hw.branch_reg.idx = EXTRA_REG_NONE;
402 * check event is known (whitelist, determines counter)
405 case SNB_UNCORE_PCI_IMC_DATA_READS:
406 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
407 idx = UNCORE_PMC_IDX_FIXED;
409 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
410 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
411 idx = UNCORE_PMC_IDX_FIXED + 1;
417 /* must be done before validate_group */
418 event->hw.event_base = base;
419 event->hw.config = cfg;
422 /* no group validation needed, we have free running counters */
427 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
432 static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
434 struct intel_uncore_box *box = uncore_event_to_box(event);
437 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
443 list_add_tail(&event->active_entry, &box->active_list);
445 count = snb_uncore_imc_read_counter(box, event);
446 local64_set(&event->hw.prev_count, count);
448 if (box->n_active == 1)
449 uncore_pmu_start_hrtimer(box);
452 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
454 struct intel_uncore_box *box = uncore_event_to_box(event);
455 struct hw_perf_event *hwc = &event->hw;
457 if (!(hwc->state & PERF_HES_STOPPED)) {
460 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
461 hwc->state |= PERF_HES_STOPPED;
463 list_del(&event->active_entry);
465 if (box->n_active == 0)
466 uncore_pmu_cancel_hrtimer(box);
469 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
471 * Drain the remaining delta count out of a event
472 * that we are disabling:
474 uncore_perf_event_update(box, event);
475 hwc->state |= PERF_HES_UPTODATE;
479 static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
481 struct intel_uncore_box *box = uncore_event_to_box(event);
482 struct hw_perf_event *hwc = &event->hw;
487 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
488 if (!(flags & PERF_EF_START))
489 hwc->state |= PERF_HES_ARCH;
491 snb_uncore_imc_event_start(event, 0);
498 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
500 struct intel_uncore_box *box = uncore_event_to_box(event);
503 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
505 for (i = 0; i < box->n_events; i++) {
506 if (event == box->event_list[i]) {
513 int snb_pci2phy_map_init(int devid)
515 struct pci_dev *dev = NULL;
516 struct pci2phy_map *map;
519 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
523 bus = dev->bus->number;
524 segment = pci_domain_nr(dev->bus);
526 raw_spin_lock(&pci2phy_map_lock);
527 map = __find_pci2phy_map(segment);
529 raw_spin_unlock(&pci2phy_map_lock);
533 map->pbus_to_physid[bus] = 0;
534 raw_spin_unlock(&pci2phy_map_lock);
541 static struct pmu snb_uncore_imc_pmu = {
542 .task_ctx_nr = perf_invalid_context,
543 .event_init = snb_uncore_imc_event_init,
544 .add = snb_uncore_imc_event_add,
545 .del = snb_uncore_imc_event_del,
546 .start = snb_uncore_imc_event_start,
547 .stop = snb_uncore_imc_event_stop,
548 .read = uncore_pmu_event_read,
551 static struct intel_uncore_ops snb_uncore_imc_ops = {
552 .init_box = snb_uncore_imc_init_box,
553 .exit_box = snb_uncore_imc_exit_box,
554 .enable_box = snb_uncore_imc_enable_box,
555 .disable_box = snb_uncore_imc_disable_box,
556 .disable_event = snb_uncore_imc_disable_event,
557 .enable_event = snb_uncore_imc_enable_event,
558 .hw_config = snb_uncore_imc_hw_config,
559 .read_counter = snb_uncore_imc_read_counter,
562 static struct intel_uncore_type snb_uncore_imc = {
566 .fixed_ctr_bits = 32,
567 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
568 .event_descs = snb_uncore_imc_events,
569 .format_group = &snb_uncore_imc_format_group,
570 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
571 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
572 .ops = &snb_uncore_imc_ops,
573 .pmu = &snb_uncore_imc_pmu,
576 static struct intel_uncore_type *snb_pci_uncores[] = {
577 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
581 static const struct pci_device_id snb_uncore_pci_ids[] = {
583 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
584 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
586 { /* end: all zeroes */ },
589 static const struct pci_device_id ivb_uncore_pci_ids[] = {
591 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
592 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
595 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
596 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
598 { /* end: all zeroes */ },
601 static const struct pci_device_id hsw_uncore_pci_ids[] = {
603 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
604 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
607 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
608 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
610 { /* end: all zeroes */ },
613 static const struct pci_device_id bdw_uncore_pci_ids[] = {
615 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
616 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
618 { /* end: all zeroes */ },
621 static const struct pci_device_id skl_uncore_pci_ids[] = {
623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
624 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
627 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
628 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
631 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
632 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
635 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
636 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
639 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
640 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
643 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
644 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
647 { /* end: all zeroes */ },
650 static struct pci_driver snb_uncore_pci_driver = {
651 .name = "snb_uncore",
652 .id_table = snb_uncore_pci_ids,
655 static struct pci_driver ivb_uncore_pci_driver = {
656 .name = "ivb_uncore",
657 .id_table = ivb_uncore_pci_ids,
660 static struct pci_driver hsw_uncore_pci_driver = {
661 .name = "hsw_uncore",
662 .id_table = hsw_uncore_pci_ids,
665 static struct pci_driver bdw_uncore_pci_driver = {
666 .name = "bdw_uncore",
667 .id_table = bdw_uncore_pci_ids,
670 static struct pci_driver skl_uncore_pci_driver = {
671 .name = "skl_uncore",
672 .id_table = skl_uncore_pci_ids,
675 struct imc_uncore_pci_dev {
677 struct pci_driver *driver;
679 #define IMC_DEV(a, d) \
680 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
682 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
683 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
684 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
685 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
686 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
687 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
688 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
689 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
690 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
691 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
692 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
693 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
694 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
699 #define for_each_imc_pci_id(x, t) \
700 for (x = (t); (x)->pci_id; x++)
702 static struct pci_driver *imc_uncore_find_dev(void)
704 const struct imc_uncore_pci_dev *p;
707 for_each_imc_pci_id(p, desktop_imc_pci_ids) {
708 ret = snb_pci2phy_map_init(p->pci_id);
715 static int imc_uncore_pci_init(void)
717 struct pci_driver *imc_drv = imc_uncore_find_dev();
722 uncore_pci_uncores = snb_pci_uncores;
723 uncore_pci_driver = imc_drv;
728 int snb_uncore_pci_init(void)
730 return imc_uncore_pci_init();
733 int ivb_uncore_pci_init(void)
735 return imc_uncore_pci_init();
737 int hsw_uncore_pci_init(void)
739 return imc_uncore_pci_init();
742 int bdw_uncore_pci_init(void)
744 return imc_uncore_pci_init();
747 int skl_uncore_pci_init(void)
749 return imc_uncore_pci_init();
752 /* end of Sandy Bridge uncore support */
754 /* Nehalem uncore support */
755 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
757 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
760 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
762 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
765 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
767 struct hw_perf_event *hwc = &event->hw;
769 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
770 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
772 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
775 static struct attribute *nhm_uncore_formats_attr[] = {
776 &format_attr_event.attr,
777 &format_attr_umask.attr,
778 &format_attr_edge.attr,
779 &format_attr_inv.attr,
780 &format_attr_cmask8.attr,
784 static struct attribute_group nhm_uncore_format_group = {
786 .attrs = nhm_uncore_formats_attr,
789 static struct uncore_event_desc nhm_uncore_events[] = {
790 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
791 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
792 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
793 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
794 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
795 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
796 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
797 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
798 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
799 { /* end: all zeroes */ },
802 static struct intel_uncore_ops nhm_uncore_msr_ops = {
803 .disable_box = nhm_uncore_msr_disable_box,
804 .enable_box = nhm_uncore_msr_enable_box,
805 .disable_event = snb_uncore_msr_disable_event,
806 .enable_event = nhm_uncore_msr_enable_event,
807 .read_counter = uncore_msr_read_counter,
810 static struct intel_uncore_type nhm_uncore = {
815 .fixed_ctr_bits = 48,
816 .event_ctl = NHM_UNC_PERFEVTSEL0,
817 .perf_ctr = NHM_UNC_UNCORE_PMC0,
818 .fixed_ctr = NHM_UNC_FIXED_CTR,
819 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
820 .event_mask = NHM_UNC_RAW_EVENT_MASK,
821 .event_descs = nhm_uncore_events,
822 .ops = &nhm_uncore_msr_ops,
823 .format_group = &nhm_uncore_format_group,
826 static struct intel_uncore_type *nhm_msr_uncores[] = {
831 void nhm_uncore_cpu_init(void)
833 uncore_msr_uncores = nhm_msr_uncores;
836 /* end of Nehalem uncore support */