1 #include <asm/cpu_device_id.h>
2 #include <asm/intel-family.h>
5 static struct intel_uncore_type *empty_uncore[] = { NULL, };
6 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
7 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
9 static bool pcidrv_registered;
10 struct pci_driver *uncore_pci_driver;
11 /* pci bus to socket mapping */
12 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
13 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
14 struct pci_extra_dev *uncore_extra_pci_dev;
15 static int max_packages;
17 /* mask of cpus that collect uncore events */
18 static cpumask_t uncore_cpu_mask;
20 /* constraint for the fixed counter */
21 static struct event_constraint uncore_constraint_fixed =
22 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
23 struct event_constraint uncore_constraint_empty =
24 EVENT_CONSTRAINT(0, 0, 0);
26 MODULE_LICENSE("GPL");
28 static int uncore_pcibus_to_physid(struct pci_bus *bus)
30 struct pci2phy_map *map;
33 raw_spin_lock(&pci2phy_map_lock);
34 list_for_each_entry(map, &pci2phy_map_head, list) {
35 if (map->segment == pci_domain_nr(bus)) {
36 phys_id = map->pbus_to_physid[bus->number];
40 raw_spin_unlock(&pci2phy_map_lock);
45 static void uncore_free_pcibus_map(void)
47 struct pci2phy_map *map, *tmp;
49 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
55 struct pci2phy_map *__find_pci2phy_map(int segment)
57 struct pci2phy_map *map, *alloc = NULL;
60 lockdep_assert_held(&pci2phy_map_lock);
63 list_for_each_entry(map, &pci2phy_map_head, list) {
64 if (map->segment == segment)
69 raw_spin_unlock(&pci2phy_map_lock);
70 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
71 raw_spin_lock(&pci2phy_map_lock);
81 map->segment = segment;
82 for (i = 0; i < 256; i++)
83 map->pbus_to_physid[i] = -1;
84 list_add_tail(&map->list, &pci2phy_map_head);
91 ssize_t uncore_event_show(struct kobject *kobj,
92 struct kobj_attribute *attr, char *buf)
94 struct uncore_event_desc *event =
95 container_of(attr, struct uncore_event_desc, attr);
96 return sprintf(buf, "%s", event->config);
99 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
101 return pmu->boxes[topology_logical_package_id(cpu)];
104 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
108 rdmsrl(event->hw.event_base, count);
114 * generic get constraint function for shared match/mask registers.
116 struct event_constraint *
117 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
119 struct intel_uncore_extra_reg *er;
120 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
121 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
126 * reg->alloc can be set due to existing state, so for fake box we
127 * need to ignore this, otherwise we might fail to allocate proper
128 * fake state for this extra reg constraint.
130 if (reg1->idx == EXTRA_REG_NONE ||
131 (!uncore_box_is_fake(box) && reg1->alloc))
134 er = &box->shared_regs[reg1->idx];
135 raw_spin_lock_irqsave(&er->lock, flags);
136 if (!atomic_read(&er->ref) ||
137 (er->config1 == reg1->config && er->config2 == reg2->config)) {
138 atomic_inc(&er->ref);
139 er->config1 = reg1->config;
140 er->config2 = reg2->config;
143 raw_spin_unlock_irqrestore(&er->lock, flags);
146 if (!uncore_box_is_fake(box))
151 return &uncore_constraint_empty;
154 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
156 struct intel_uncore_extra_reg *er;
157 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
160 * Only put constraint if extra reg was actually allocated. Also
161 * takes care of event which do not use an extra shared reg.
163 * Also, if this is a fake box we shouldn't touch any event state
164 * (reg->alloc) and we don't care about leaving inconsistent box
165 * state either since it will be thrown out.
167 if (uncore_box_is_fake(box) || !reg1->alloc)
170 er = &box->shared_regs[reg1->idx];
171 atomic_dec(&er->ref);
175 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
177 struct intel_uncore_extra_reg *er;
181 er = &box->shared_regs[idx];
183 raw_spin_lock_irqsave(&er->lock, flags);
185 raw_spin_unlock_irqrestore(&er->lock, flags);
190 static void uncore_assign_hw_event(struct intel_uncore_box *box,
191 struct perf_event *event, int idx)
193 struct hw_perf_event *hwc = &event->hw;
196 hwc->last_tag = ++box->tags[idx];
198 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
199 hwc->event_base = uncore_fixed_ctr(box);
200 hwc->config_base = uncore_fixed_ctl(box);
204 hwc->config_base = uncore_event_ctl(box, hwc->idx);
205 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
208 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
210 u64 prev_count, new_count, delta;
213 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
214 shift = 64 - uncore_fixed_ctr_bits(box);
216 shift = 64 - uncore_perf_ctr_bits(box);
218 /* the hrtimer might modify the previous event value */
220 prev_count = local64_read(&event->hw.prev_count);
221 new_count = uncore_read_counter(box, event);
222 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
225 delta = (new_count << shift) - (prev_count << shift);
228 local64_add(delta, &event->count);
232 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
233 * for SandyBridge. So we use hrtimer to periodically poll the counter
236 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
238 struct intel_uncore_box *box;
239 struct perf_event *event;
243 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
244 if (!box->n_active || box->cpu != smp_processor_id())
245 return HRTIMER_NORESTART;
247 * disable local interrupt to prevent uncore_pmu_event_start/stop
248 * to interrupt the update process
250 local_irq_save(flags);
253 * handle boxes with an active event list as opposed to active
256 list_for_each_entry(event, &box->active_list, active_entry) {
257 uncore_perf_event_update(box, event);
260 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
261 uncore_perf_event_update(box, box->events[bit]);
263 local_irq_restore(flags);
265 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
266 return HRTIMER_RESTART;
269 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
271 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
272 HRTIMER_MODE_REL_PINNED);
275 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
277 hrtimer_cancel(&box->hrtimer);
280 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
282 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
283 box->hrtimer.function = uncore_pmu_hrtimer;
286 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
289 int i, size, numshared = type->num_shared_regs ;
290 struct intel_uncore_box *box;
292 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
294 box = kzalloc_node(size, GFP_KERNEL, node);
298 for (i = 0; i < numshared; i++)
299 raw_spin_lock_init(&box->shared_regs[i].lock);
301 uncore_pmu_init_hrtimer(box);
303 box->pci_phys_id = -1;
306 /* set default hrtimer timeout */
307 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
309 INIT_LIST_HEAD(&box->active_list);
315 * Using uncore_pmu_event_init pmu event_init callback
316 * as a detection point for uncore events.
318 static int uncore_pmu_event_init(struct perf_event *event);
320 static bool is_uncore_event(struct perf_event *event)
322 return event->pmu->event_init == uncore_pmu_event_init;
326 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
329 struct perf_event *event;
332 max_count = box->pmu->type->num_counters;
333 if (box->pmu->type->fixed_ctl)
336 if (box->n_events >= max_count)
341 if (is_uncore_event(leader)) {
342 box->event_list[n] = leader;
349 list_for_each_entry(event, &leader->sibling_list, group_entry) {
350 if (!is_uncore_event(event) ||
351 event->state <= PERF_EVENT_STATE_OFF)
357 box->event_list[n] = event;
363 static struct event_constraint *
364 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
366 struct intel_uncore_type *type = box->pmu->type;
367 struct event_constraint *c;
369 if (type->ops->get_constraint) {
370 c = type->ops->get_constraint(box, event);
375 if (event->attr.config == UNCORE_FIXED_EVENT)
376 return &uncore_constraint_fixed;
378 if (type->constraints) {
379 for_each_event_constraint(c, type->constraints) {
380 if ((event->hw.config & c->cmask) == c->code)
385 return &type->unconstrainted;
388 static void uncore_put_event_constraint(struct intel_uncore_box *box,
389 struct perf_event *event)
391 if (box->pmu->type->ops->put_constraint)
392 box->pmu->type->ops->put_constraint(box, event);
395 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
397 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
398 struct event_constraint *c;
399 int i, wmin, wmax, ret = 0;
400 struct hw_perf_event *hwc;
402 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
404 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
405 c = uncore_get_event_constraint(box, box->event_list[i]);
406 box->event_constraint[i] = c;
407 wmin = min(wmin, c->weight);
408 wmax = max(wmax, c->weight);
411 /* fastpath, try to reuse previous register */
412 for (i = 0; i < n; i++) {
413 hwc = &box->event_list[i]->hw;
414 c = box->event_constraint[i];
420 /* constraint still honored */
421 if (!test_bit(hwc->idx, c->idxmsk))
424 /* not already used */
425 if (test_bit(hwc->idx, used_mask))
428 __set_bit(hwc->idx, used_mask);
430 assign[i] = hwc->idx;
434 ret = perf_assign_events(box->event_constraint, n,
435 wmin, wmax, n, assign);
437 if (!assign || ret) {
438 for (i = 0; i < n; i++)
439 uncore_put_event_constraint(box, box->event_list[i]);
441 return ret ? -EINVAL : 0;
444 static void uncore_pmu_event_start(struct perf_event *event, int flags)
446 struct intel_uncore_box *box = uncore_event_to_box(event);
447 int idx = event->hw.idx;
449 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
452 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
456 box->events[idx] = event;
458 __set_bit(idx, box->active_mask);
460 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
461 uncore_enable_event(box, event);
463 if (box->n_active == 1) {
464 uncore_enable_box(box);
465 uncore_pmu_start_hrtimer(box);
469 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
471 struct intel_uncore_box *box = uncore_event_to_box(event);
472 struct hw_perf_event *hwc = &event->hw;
474 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
475 uncore_disable_event(box, event);
477 box->events[hwc->idx] = NULL;
478 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
479 hwc->state |= PERF_HES_STOPPED;
481 if (box->n_active == 0) {
482 uncore_disable_box(box);
483 uncore_pmu_cancel_hrtimer(box);
487 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
489 * Drain the remaining delta count out of a event
490 * that we are disabling:
492 uncore_perf_event_update(box, event);
493 hwc->state |= PERF_HES_UPTODATE;
497 static int uncore_pmu_event_add(struct perf_event *event, int flags)
499 struct intel_uncore_box *box = uncore_event_to_box(event);
500 struct hw_perf_event *hwc = &event->hw;
501 int assign[UNCORE_PMC_IDX_MAX];
507 ret = n = uncore_collect_events(box, event, false);
511 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
512 if (!(flags & PERF_EF_START))
513 hwc->state |= PERF_HES_ARCH;
515 ret = uncore_assign_events(box, assign, n);
519 /* save events moving to new counters */
520 for (i = 0; i < box->n_events; i++) {
521 event = box->event_list[i];
524 if (hwc->idx == assign[i] &&
525 hwc->last_tag == box->tags[assign[i]])
528 * Ensure we don't accidentally enable a stopped
529 * counter simply because we rescheduled.
531 if (hwc->state & PERF_HES_STOPPED)
532 hwc->state |= PERF_HES_ARCH;
534 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
537 /* reprogram moved events into new counters */
538 for (i = 0; i < n; i++) {
539 event = box->event_list[i];
542 if (hwc->idx != assign[i] ||
543 hwc->last_tag != box->tags[assign[i]])
544 uncore_assign_hw_event(box, event, assign[i]);
545 else if (i < box->n_events)
548 if (hwc->state & PERF_HES_ARCH)
551 uncore_pmu_event_start(event, 0);
558 static void uncore_pmu_event_del(struct perf_event *event, int flags)
560 struct intel_uncore_box *box = uncore_event_to_box(event);
563 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
565 for (i = 0; i < box->n_events; i++) {
566 if (event == box->event_list[i]) {
567 uncore_put_event_constraint(box, event);
569 for (++i; i < box->n_events; i++)
570 box->event_list[i - 1] = box->event_list[i];
578 event->hw.last_tag = ~0ULL;
581 void uncore_pmu_event_read(struct perf_event *event)
583 struct intel_uncore_box *box = uncore_event_to_box(event);
584 uncore_perf_event_update(box, event);
588 * validation ensures the group can be loaded onto the
589 * PMU if it was the only group available.
591 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
592 struct perf_event *event)
594 struct perf_event *leader = event->group_leader;
595 struct intel_uncore_box *fake_box;
596 int ret = -EINVAL, n;
598 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
604 * the event is not yet connected with its
605 * siblings therefore we must first collect
606 * existing siblings, then add the new event
607 * before we can simulate the scheduling
609 n = uncore_collect_events(fake_box, leader, true);
613 fake_box->n_events = n;
614 n = uncore_collect_events(fake_box, event, false);
618 fake_box->n_events = n;
620 ret = uncore_assign_events(fake_box, NULL, n);
626 static int uncore_pmu_event_init(struct perf_event *event)
628 struct intel_uncore_pmu *pmu;
629 struct intel_uncore_box *box;
630 struct hw_perf_event *hwc = &event->hw;
633 if (event->attr.type != event->pmu->type)
636 pmu = uncore_event_to_pmu(event);
637 /* no device found for this pmu */
638 if (pmu->func_id < 0)
642 * Uncore PMU does measure at all privilege level all the time.
643 * So it doesn't make sense to specify any exclude bits.
645 if (event->attr.exclude_user || event->attr.exclude_kernel ||
646 event->attr.exclude_hv || event->attr.exclude_idle)
649 /* Sampling not supported yet */
650 if (hwc->sample_period)
654 * Place all uncore events for a particular physical package
659 box = uncore_pmu_to_box(pmu, event->cpu);
660 if (!box || box->cpu < 0)
662 event->cpu = box->cpu;
663 event->pmu_private = box;
666 event->hw.last_tag = ~0ULL;
667 event->hw.extra_reg.idx = EXTRA_REG_NONE;
668 event->hw.branch_reg.idx = EXTRA_REG_NONE;
670 if (event->attr.config == UNCORE_FIXED_EVENT) {
671 /* no fixed counter */
672 if (!pmu->type->fixed_ctl)
675 * if there is only one fixed counter, only the first pmu
676 * can access the fixed counter
678 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
681 /* fixed counters have event field hardcoded to zero */
684 hwc->config = event->attr.config & pmu->type->event_mask;
685 if (pmu->type->ops->hw_config) {
686 ret = pmu->type->ops->hw_config(box, event);
692 if (event->group_leader != event)
693 ret = uncore_validate_group(pmu, event);
700 static ssize_t uncore_get_attr_cpumask(struct device *dev,
701 struct device_attribute *attr, char *buf)
703 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
706 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
708 static struct attribute *uncore_pmu_attrs[] = {
709 &dev_attr_cpumask.attr,
713 static struct attribute_group uncore_pmu_attr_group = {
714 .attrs = uncore_pmu_attrs,
717 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
721 if (!pmu->type->pmu) {
722 pmu->pmu = (struct pmu) {
723 .attr_groups = pmu->type->attr_groups,
724 .task_ctx_nr = perf_invalid_context,
725 .event_init = uncore_pmu_event_init,
726 .add = uncore_pmu_event_add,
727 .del = uncore_pmu_event_del,
728 .start = uncore_pmu_event_start,
729 .stop = uncore_pmu_event_stop,
730 .read = uncore_pmu_event_read,
733 pmu->pmu = *pmu->type->pmu;
734 pmu->pmu.attr_groups = pmu->type->attr_groups;
737 if (pmu->type->num_boxes == 1) {
738 if (strlen(pmu->type->name) > 0)
739 sprintf(pmu->name, "uncore_%s", pmu->type->name);
741 sprintf(pmu->name, "uncore");
743 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
747 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
749 pmu->registered = true;
753 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
755 if (!pmu->registered)
757 perf_pmu_unregister(&pmu->pmu);
758 pmu->registered = false;
761 static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
763 struct intel_uncore_pmu *pmu = type->pmus;
764 struct intel_uncore_box *box;
768 pkg = topology_physical_package_id(cpu);
769 for (i = 0; i < type->num_boxes; i++, pmu++) {
770 box = pmu->boxes[pkg];
772 uncore_box_exit(box);
777 static void uncore_exit_boxes(void *dummy)
779 struct intel_uncore_type **types;
781 for (types = uncore_msr_uncores; *types; types++)
782 __uncore_exit_boxes(*types++, smp_processor_id());
785 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
789 for (pkg = 0; pkg < max_packages; pkg++)
790 kfree(pmu->boxes[pkg]);
794 static void uncore_type_exit(struct intel_uncore_type *type)
796 struct intel_uncore_pmu *pmu = type->pmus;
800 for (i = 0; i < type->num_boxes; i++, pmu++) {
801 uncore_pmu_unregister(pmu);
802 uncore_free_boxes(pmu);
807 kfree(type->events_group);
808 type->events_group = NULL;
811 static void uncore_types_exit(struct intel_uncore_type **types)
813 for (; *types; types++)
814 uncore_type_exit(*types);
817 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
819 struct intel_uncore_pmu *pmus;
820 struct attribute_group *attr_group;
821 struct attribute **attrs;
825 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
829 size = max_packages * sizeof(struct intel_uncore_box *);
831 for (i = 0; i < type->num_boxes; i++) {
832 pmus[i].func_id = setid ? i : -1;
835 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
841 type->unconstrainted = (struct event_constraint)
842 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
843 0, type->num_counters, 0, 0);
845 if (type->event_descs) {
846 for (i = 0; type->event_descs[i].attr.attr.name; i++);
848 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
849 sizeof(*attr_group), GFP_KERNEL);
853 attrs = (struct attribute **)(attr_group + 1);
854 attr_group->name = "events";
855 attr_group->attrs = attrs;
857 for (j = 0; j < i; j++)
858 attrs[j] = &type->event_descs[j].attr.attr;
860 type->events_group = attr_group;
863 type->pmu_group = &uncore_pmu_attr_group;
868 uncore_types_init(struct intel_uncore_type **types, bool setid)
872 for (; *types; types++) {
873 ret = uncore_type_init(*types, setid);
881 * add a pci uncore device
883 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
885 struct intel_uncore_type *type;
886 struct intel_uncore_pmu *pmu = NULL;
887 struct intel_uncore_box *box;
888 int phys_id, pkg, ret;
890 phys_id = uncore_pcibus_to_physid(pdev->bus);
894 pkg = topology_phys_to_logical_pkg(phys_id);
898 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
899 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
901 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
902 pci_set_drvdata(pdev, NULL);
906 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
909 * Some platforms, e.g. Knights Landing, use a common PCI device ID
910 * for multiple instances of an uncore PMU device type. We should check
911 * PCI slot and func to indicate the uncore box.
913 if (id->driver_data & ~0xffff) {
914 struct pci_driver *pci_drv = pdev->driver;
915 const struct pci_device_id *ids = pci_drv->id_table;
918 while (ids && ids->vendor) {
919 if ((ids->vendor == pdev->vendor) &&
920 (ids->device == pdev->device)) {
921 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
922 UNCORE_PCI_DEV_FUNC(ids->driver_data));
923 if (devfn == pdev->devfn) {
924 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
934 * for performance monitoring unit with multiple boxes,
935 * each box has a different function id.
937 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
940 if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
943 box = uncore_alloc_box(type, NUMA_NO_NODE);
947 if (pmu->func_id < 0)
948 pmu->func_id = pdev->devfn;
950 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
952 atomic_inc(&box->refcnt);
953 box->pci_phys_id = phys_id;
957 uncore_box_init(box);
958 pci_set_drvdata(pdev, box);
960 pmu->boxes[pkg] = box;
961 if (atomic_inc_return(&pmu->activeboxes) > 1)
964 /* First active box registers the pmu */
965 ret = uncore_pmu_register(pmu);
967 pci_set_drvdata(pdev, NULL);
968 pmu->boxes[pkg] = NULL;
969 uncore_box_exit(box);
975 static void uncore_pci_remove(struct pci_dev *pdev)
977 struct intel_uncore_box *box;
978 struct intel_uncore_pmu *pmu;
981 phys_id = uncore_pcibus_to_physid(pdev->bus);
982 pkg = topology_phys_to_logical_pkg(phys_id);
984 box = pci_get_drvdata(pdev);
986 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
987 if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
988 uncore_extra_pci_dev[pkg].dev[i] = NULL;
992 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
997 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
1000 pci_set_drvdata(pdev, NULL);
1001 pmu->boxes[pkg] = NULL;
1002 if (atomic_dec_return(&pmu->activeboxes) == 0)
1003 uncore_pmu_unregister(pmu);
1004 uncore_box_exit(box);
1008 static int __init uncore_pci_init(void)
1013 size = max_packages * sizeof(struct pci_extra_dev);
1014 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1015 if (!uncore_extra_pci_dev) {
1020 ret = uncore_types_init(uncore_pci_uncores, false);
1024 uncore_pci_driver->probe = uncore_pci_probe;
1025 uncore_pci_driver->remove = uncore_pci_remove;
1027 ret = pci_register_driver(uncore_pci_driver);
1031 pcidrv_registered = true;
1035 uncore_types_exit(uncore_pci_uncores);
1036 kfree(uncore_extra_pci_dev);
1037 uncore_extra_pci_dev = NULL;
1038 uncore_free_pcibus_map();
1040 uncore_pci_uncores = empty_uncore;
1044 static void uncore_pci_exit(void)
1046 if (pcidrv_registered) {
1047 pcidrv_registered = false;
1048 pci_unregister_driver(uncore_pci_driver);
1049 uncore_types_exit(uncore_pci_uncores);
1050 kfree(uncore_extra_pci_dev);
1051 uncore_free_pcibus_map();
1055 static void uncore_cpu_dying(int cpu)
1057 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1058 struct intel_uncore_pmu *pmu;
1059 struct intel_uncore_box *box;
1062 pkg = topology_logical_package_id(cpu);
1063 for (; *types; types++) {
1066 for (i = 0; i < type->num_boxes; i++, pmu++) {
1067 box = pmu->boxes[pkg];
1068 if (box && atomic_dec_return(&box->refcnt) == 0)
1069 uncore_box_exit(box);
1074 static void uncore_cpu_starting(int cpu, bool init)
1076 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1077 struct intel_uncore_pmu *pmu;
1078 struct intel_uncore_box *box;
1079 int i, pkg, ncpus = 1;
1083 * On init we get the number of online cpus in the package
1084 * and set refcount for all of them.
1086 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1089 pkg = topology_logical_package_id(cpu);
1090 for (; *types; types++) {
1093 for (i = 0; i < type->num_boxes; i++, pmu++) {
1094 box = pmu->boxes[pkg];
1097 /* The first cpu on a package activates the box */
1098 if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1099 uncore_box_init(box);
1104 static int uncore_cpu_prepare(int cpu)
1106 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1107 struct intel_uncore_pmu *pmu;
1108 struct intel_uncore_box *box;
1111 pkg = topology_logical_package_id(cpu);
1112 for (; *types; types++) {
1115 for (i = 0; i < type->num_boxes; i++, pmu++) {
1116 if (pmu->boxes[pkg])
1118 /* First cpu of a package allocates the box */
1119 box = uncore_alloc_box(type, cpu_to_node(cpu));
1124 pmu->boxes[pkg] = box;
1130 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1133 struct intel_uncore_pmu *pmu = type->pmus;
1134 struct intel_uncore_box *box;
1137 pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1138 for (i = 0; i < type->num_boxes; i++, pmu++) {
1139 box = pmu->boxes[pkg];
1144 WARN_ON_ONCE(box->cpu != -1);
1149 WARN_ON_ONCE(box->cpu != old_cpu);
1154 uncore_pmu_cancel_hrtimer(box);
1155 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1160 static void uncore_change_context(struct intel_uncore_type **uncores,
1161 int old_cpu, int new_cpu)
1163 for (; *uncores; uncores++)
1164 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1167 static void uncore_event_exit_cpu(int cpu)
1171 /* Check if exiting cpu is used for collecting uncore events */
1172 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1175 /* Find a new cpu to collect uncore events */
1176 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1178 /* Migrate uncore events to the new target */
1179 if (target < nr_cpu_ids)
1180 cpumask_set_cpu(target, &uncore_cpu_mask);
1184 uncore_change_context(uncore_msr_uncores, cpu, target);
1185 uncore_change_context(uncore_pci_uncores, cpu, target);
1188 static void uncore_event_init_cpu(int cpu)
1193 * Check if there is an online cpu in the package
1194 * which collects uncore events already.
1196 target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1197 if (target < nr_cpu_ids)
1200 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1202 uncore_change_context(uncore_msr_uncores, -1, cpu);
1203 uncore_change_context(uncore_pci_uncores, -1, cpu);
1206 static int uncore_cpu_notifier(struct notifier_block *self,
1207 unsigned long action, void *hcpu)
1209 unsigned int cpu = (long)hcpu;
1211 switch (action & ~CPU_TASKS_FROZEN) {
1212 case CPU_UP_PREPARE:
1213 return notifier_from_errno(uncore_cpu_prepare(cpu));
1216 uncore_cpu_starting(cpu, false);
1217 case CPU_DOWN_FAILED:
1218 uncore_event_init_cpu(cpu);
1221 case CPU_UP_CANCELED:
1223 uncore_cpu_dying(cpu);
1226 case CPU_DOWN_PREPARE:
1227 uncore_event_exit_cpu(cpu);
1233 static struct notifier_block uncore_cpu_nb = {
1234 .notifier_call = uncore_cpu_notifier,
1236 * to migrate uncore events, our notifier should be executed
1237 * before perf core's notifier.
1239 .priority = CPU_PRI_PERF + 1,
1242 static int __init type_pmu_register(struct intel_uncore_type *type)
1246 for (i = 0; i < type->num_boxes; i++) {
1247 ret = uncore_pmu_register(&type->pmus[i]);
1254 static int __init uncore_msr_pmus_register(void)
1256 struct intel_uncore_type **types = uncore_msr_uncores;
1259 for (; *types; types++) {
1260 ret = type_pmu_register(*types);
1267 static int __init uncore_cpu_init(void)
1271 ret = uncore_types_init(uncore_msr_uncores, true);
1275 ret = uncore_msr_pmus_register();
1280 uncore_types_exit(uncore_msr_uncores);
1281 uncore_msr_uncores = empty_uncore;
1285 static void __init uncore_cpu_setup(void *dummy)
1287 uncore_cpu_starting(smp_processor_id(), true);
1290 /* Lazy to avoid allocation of a few bytes for the normal case */
1291 static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
1293 static int __init uncore_cpumask_init(bool msr)
1297 for_each_online_cpu(cpu) {
1298 unsigned int pkg = topology_logical_package_id(cpu);
1301 if (test_and_set_bit(pkg, packages))
1304 * The first online cpu of each package allocates and takes
1305 * the refcounts for all other online cpus in that package.
1306 * If msrs are not enabled no allocation is required.
1309 ret = uncore_cpu_prepare(cpu);
1313 uncore_event_init_cpu(cpu);
1314 smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
1316 __register_cpu_notifier(&uncore_cpu_nb);
1320 #define X86_UNCORE_MODEL_MATCH(model, init) \
1321 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1323 struct intel_uncore_init_fun {
1324 void (*cpu_init)(void);
1325 int (*pci_init)(void);
1328 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1329 .cpu_init = nhm_uncore_cpu_init,
1332 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1333 .cpu_init = snb_uncore_cpu_init,
1334 .pci_init = snb_uncore_pci_init,
1337 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1338 .cpu_init = snb_uncore_cpu_init,
1339 .pci_init = ivb_uncore_pci_init,
1342 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1343 .cpu_init = snb_uncore_cpu_init,
1344 .pci_init = hsw_uncore_pci_init,
1347 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1348 .cpu_init = snb_uncore_cpu_init,
1349 .pci_init = bdw_uncore_pci_init,
1352 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1353 .cpu_init = snbep_uncore_cpu_init,
1354 .pci_init = snbep_uncore_pci_init,
1357 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1358 .cpu_init = nhmex_uncore_cpu_init,
1361 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1362 .cpu_init = ivbep_uncore_cpu_init,
1363 .pci_init = ivbep_uncore_pci_init,
1366 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1367 .cpu_init = hswep_uncore_cpu_init,
1368 .pci_init = hswep_uncore_pci_init,
1371 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1372 .cpu_init = bdx_uncore_cpu_init,
1373 .pci_init = bdx_uncore_pci_init,
1376 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1377 .cpu_init = knl_uncore_cpu_init,
1378 .pci_init = knl_uncore_pci_init,
1381 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1382 .cpu_init = skl_uncore_cpu_init,
1383 .pci_init = skl_uncore_pci_init,
1386 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1387 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
1388 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
1389 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
1390 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
1391 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
1392 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
1393 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
1394 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
1395 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
1396 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1397 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1398 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
1399 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
1400 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
1401 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
1402 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
1403 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
1404 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1405 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
1406 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1407 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1411 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1413 static int __init intel_uncore_init(void)
1415 const struct x86_cpu_id *id;
1416 struct intel_uncore_init_fun *uncore_init;
1417 int pret = 0, cret = 0, ret;
1419 id = x86_match_cpu(intel_uncore_match);
1423 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1426 max_packages = topology_max_packages();
1428 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1429 if (uncore_init->pci_init) {
1430 pret = uncore_init->pci_init();
1432 pret = uncore_pci_init();
1435 if (uncore_init->cpu_init) {
1436 uncore_init->cpu_init();
1437 cret = uncore_cpu_init();
1443 cpu_notifier_register_begin();
1444 ret = uncore_cpumask_init(!cret);
1447 cpu_notifier_register_done();
1451 /* Undo box->init_box() */
1452 on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
1453 uncore_types_exit(uncore_msr_uncores);
1455 cpu_notifier_register_done();
1458 module_init(intel_uncore_init);
1460 static void __exit intel_uncore_exit(void)
1462 cpu_notifier_register_begin();
1463 __unregister_cpu_notifier(&uncore_cpu_nb);
1464 uncore_types_exit(uncore_msr_uncores);
1466 cpu_notifier_register_done();
1468 module_exit(intel_uncore_exit);