2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
30 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
37 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
38 * and any further add()s must fail.
40 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
41 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
42 * we've cleared the EN bit).
44 * In order to consume these late NMIs we have the STOPPED state, any NMI that
45 * happens after we've cleared the EN state will clear this bit and report the
46 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
47 * someone else can consume our BIT and our NMI will go unhandled).
49 * And since we cannot set/clear this separate bit together with the EN bit,
50 * there are races; if we cleared STARTED early, an NMI could land in
51 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
52 * could happen if the period is small enough), and consume our STOPPED bit
53 * and trigger streams of unhandled NMIs.
55 * If, however, we clear STARTED late, an NMI can hit between clearing the
56 * EN bit and clearing STARTED, still see STARTED set and process the event.
57 * If this event will have the VALID bit clear, we bail properly, but this
58 * is not a given. With VALID set we can end up calling pmu::stop() again
59 * (the throttle logic) and trigger the WARNs in there.
61 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
62 * nesting, and clear STARTED late, so that we have a well defined state over
63 * the clearing of the EN bit.
65 * XXX: we could probably be using !atomic bitops for all this.
78 struct perf_event *event;
79 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
90 unsigned long offset_mask[1];
92 struct cpu_perf_ibs __percpu *pcpu;
94 struct attribute **format_attrs;
95 struct attribute_group format_group;
96 const struct attribute_group *attr_groups[2];
98 u64 (*get_count)(u64 config);
101 struct perf_ibs_data {
104 u32 data[0]; /* data buffer starts here */
107 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
111 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
113 s64 left = local64_read(&hwc->period_left);
114 s64 period = hwc->sample_period;
118 * If we are way outside a reasonable range then just skip forward:
120 if (unlikely(left <= -period)) {
122 local64_set(&hwc->period_left, left);
123 hwc->last_period = period;
127 if (unlikely(left < (s64)min)) {
129 local64_set(&hwc->period_left, left);
130 hwc->last_period = period;
135 * If the hw period that triggers the sw overflow is too short
136 * we might hit the irq handler. This biases the results.
137 * Thus we shorten the next-to-last period and set the last
138 * period to the max period.
148 *hw_period = (u64)left;
154 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
156 struct hw_perf_event *hwc = &event->hw;
157 int shift = 64 - width;
162 * Careful: an NMI might modify the previous event value.
164 * Our tactic to handle this is to first atomically read and
165 * exchange a new raw count - then add that new-prev delta
166 * count to the generic event atomically:
168 prev_raw_count = local64_read(&hwc->prev_count);
169 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
170 new_raw_count) != prev_raw_count)
174 * Now we have the new raw value and have updated the prev
175 * timestamp already. We can now calculate the elapsed delta
176 * (event-)time and add that to the generic event.
178 * Careful, not all hw sign-extends above the physical width
181 delta = (new_raw_count << shift) - (prev_raw_count << shift);
184 local64_add(delta, &event->count);
185 local64_sub(delta, &hwc->period_left);
190 static struct perf_ibs perf_ibs_fetch;
191 static struct perf_ibs perf_ibs_op;
193 static struct perf_ibs *get_ibs_pmu(int type)
195 if (perf_ibs_fetch.pmu.type == type)
196 return &perf_ibs_fetch;
197 if (perf_ibs_op.pmu.type == type)
203 * Use IBS for precise event sampling:
205 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
206 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
207 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
209 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
210 * MSRC001_1033) is used to select either cycle or micro-ops counting
213 * The rip of IBS samples has skid 0. Thus, IBS supports precise
214 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
215 * rip is invalid when IBS was not able to record the rip correctly.
216 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
219 static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
221 switch (event->attr.precise_ip) {
231 switch (event->attr.type) {
232 case PERF_TYPE_HARDWARE:
233 switch (event->attr.config) {
234 case PERF_COUNT_HW_CPU_CYCLES:
240 switch (event->attr.config) {
245 *config = IBS_OP_CNT_CTL;
256 static int perf_ibs_init(struct perf_event *event)
258 struct hw_perf_event *hwc = &event->hw;
259 struct perf_ibs *perf_ibs;
263 perf_ibs = get_ibs_pmu(event->attr.type);
265 config = event->attr.config;
267 perf_ibs = &perf_ibs_op;
268 ret = perf_ibs_precise_event(event, &config);
273 if (event->pmu != &perf_ibs->pmu)
276 if (config & ~perf_ibs->config_mask)
279 if (hwc->sample_period) {
280 if (config & perf_ibs->cnt_mask)
281 /* raw max_cnt may not be set */
283 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
285 * lower 4 bits can not be set in ibs max cnt,
286 * but allowing it in case we adjust the
287 * sample period to set a frequency.
290 hwc->sample_period &= ~0x0FULL;
291 if (!hwc->sample_period)
292 hwc->sample_period = 0x10;
294 max_cnt = config & perf_ibs->cnt_mask;
295 config &= ~perf_ibs->cnt_mask;
296 event->attr.sample_period = max_cnt << 4;
297 hwc->sample_period = event->attr.sample_period;
300 if (!hwc->sample_period)
304 * If we modify hwc->sample_period, we also need to update
305 * hwc->last_period and hwc->period_left.
307 hwc->last_period = hwc->sample_period;
308 local64_set(&hwc->period_left, hwc->sample_period);
310 hwc->config_base = perf_ibs->msr;
311 hwc->config = config;
316 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
317 struct hw_perf_event *hwc, u64 *period)
321 /* ignore lower 4 bits in min count: */
322 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
323 local64_set(&hwc->prev_count, 0);
328 static u64 get_ibs_fetch_count(u64 config)
330 return (config & IBS_FETCH_CNT) >> 12;
333 static u64 get_ibs_op_count(u64 config)
337 if (config & IBS_OP_VAL)
338 count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
340 if (ibs_caps & IBS_CAPS_RDWROPCNT)
341 count += (config & IBS_OP_CUR_CNT) >> 32;
347 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
350 u64 count = perf_ibs->get_count(*config);
353 * Set width to 64 since we do not overflow on max width but
354 * instead on max count. In perf_ibs_set_period() we clear
355 * prev count manually on overflow.
357 while (!perf_event_try_update(event, count, 64)) {
358 rdmsrl(event->hw.config_base, *config);
359 count = perf_ibs->get_count(*config);
363 static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
364 struct hw_perf_event *hwc, u64 config)
366 wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
370 * Erratum #420 Instruction-Based Sampling Engine May Generate
371 * Interrupt that Cannot Be Cleared:
373 * Must clear counter mask first, then clear the enable bit. See
374 * Revision Guide for AMD Family 10h Processors, Publication #41322.
376 static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
377 struct hw_perf_event *hwc, u64 config)
379 config &= ~perf_ibs->cnt_mask;
380 wrmsrl(hwc->config_base, config);
381 config &= ~perf_ibs->enable_mask;
382 wrmsrl(hwc->config_base, config);
386 * We cannot restore the ibs pmu state, so we always needs to update
387 * the event while stopping it and then reset the state when starting
388 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
389 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
391 static void perf_ibs_start(struct perf_event *event, int flags)
393 struct hw_perf_event *hwc = &event->hw;
394 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
395 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
398 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
401 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
404 perf_ibs_set_period(perf_ibs, hwc, &period);
406 * Set STARTED before enabling the hardware, such that a subsequent NMI
409 set_bit(IBS_STARTED, pcpu->state);
410 clear_bit(IBS_STOPPING, pcpu->state);
411 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
413 perf_event_update_userpage(event);
416 static void perf_ibs_stop(struct perf_event *event, int flags)
418 struct hw_perf_event *hwc = &event->hw;
419 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
420 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
424 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
427 stopping = test_bit(IBS_STARTED, pcpu->state);
429 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
432 rdmsrl(hwc->config_base, config);
436 * Set STOPPED before disabling the hardware, such that it
437 * must be visible to NMIs the moment we clear the EN bit,
438 * at which point we can generate an !VALID sample which
439 * we need to consume.
441 set_bit(IBS_STOPPED, pcpu->state);
442 perf_ibs_disable_event(perf_ibs, hwc, config);
444 * Clear STARTED after disabling the hardware; if it were
445 * cleared before an NMI hitting after the clear but before
446 * clearing the EN bit might think it a spurious NMI and not
449 * Clearing it after, however, creates the problem of the NMI
450 * handler seeing STARTED but not having a valid sample.
452 clear_bit(IBS_STARTED, pcpu->state);
453 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
454 hwc->state |= PERF_HES_STOPPED;
457 if (hwc->state & PERF_HES_UPTODATE)
461 * Clear valid bit to not count rollovers on update, rollovers
462 * are only updated in the irq handler.
464 config &= ~perf_ibs->valid_mask;
466 perf_ibs_event_update(perf_ibs, event, &config);
467 hwc->state |= PERF_HES_UPTODATE;
470 static int perf_ibs_add(struct perf_event *event, int flags)
472 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
473 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
475 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
478 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
482 if (flags & PERF_EF_START)
483 perf_ibs_start(event, PERF_EF_RELOAD);
488 static void perf_ibs_del(struct perf_event *event, int flags)
490 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
491 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
493 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
496 perf_ibs_stop(event, PERF_EF_UPDATE);
500 perf_event_update_userpage(event);
503 static void perf_ibs_read(struct perf_event *event) { }
505 PMU_FORMAT_ATTR(rand_en, "config:57");
506 PMU_FORMAT_ATTR(cnt_ctl, "config:19");
508 static struct attribute *ibs_fetch_format_attrs[] = {
509 &format_attr_rand_en.attr,
513 static struct attribute *ibs_op_format_attrs[] = {
514 NULL, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
518 static struct perf_ibs perf_ibs_fetch = {
520 .task_ctx_nr = perf_invalid_context,
522 .event_init = perf_ibs_init,
525 .start = perf_ibs_start,
526 .stop = perf_ibs_stop,
527 .read = perf_ibs_read,
528 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
530 .msr = MSR_AMD64_IBSFETCHCTL,
531 .config_mask = IBS_FETCH_CONFIG_MASK,
532 .cnt_mask = IBS_FETCH_MAX_CNT,
533 .enable_mask = IBS_FETCH_ENABLE,
534 .valid_mask = IBS_FETCH_VAL,
535 .max_period = IBS_FETCH_MAX_CNT << 4,
536 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
537 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
538 .format_attrs = ibs_fetch_format_attrs,
540 .get_count = get_ibs_fetch_count,
543 static struct perf_ibs perf_ibs_op = {
545 .task_ctx_nr = perf_invalid_context,
547 .event_init = perf_ibs_init,
550 .start = perf_ibs_start,
551 .stop = perf_ibs_stop,
552 .read = perf_ibs_read,
554 .msr = MSR_AMD64_IBSOPCTL,
555 .config_mask = IBS_OP_CONFIG_MASK,
556 .cnt_mask = IBS_OP_MAX_CNT,
557 .enable_mask = IBS_OP_ENABLE,
558 .valid_mask = IBS_OP_VAL,
559 .max_period = IBS_OP_MAX_CNT << 4,
560 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
561 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
562 .format_attrs = ibs_op_format_attrs,
564 .get_count = get_ibs_op_count,
567 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
569 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
570 struct perf_event *event = pcpu->event;
571 struct hw_perf_event *hwc;
572 struct perf_sample_data data;
573 struct perf_raw_record raw;
575 struct perf_ibs_data ibs_data;
576 int offset, size, check_rip, offset_max, throttle = 0;
578 u64 *buf, *config, period;
580 if (!test_bit(IBS_STARTED, pcpu->state)) {
583 * Catch spurious interrupts after stopping IBS: After
584 * disabling IBS there could be still incoming NMIs
585 * with samples that even have the valid bit cleared.
586 * Mark all this NMIs as handled.
588 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
594 if (WARN_ON_ONCE(!event))
598 msr = hwc->config_base;
601 if (!(*buf++ & perf_ibs->valid_mask))
604 config = &ibs_data.regs[0];
605 perf_ibs_event_update(perf_ibs, event, config);
606 perf_sample_data_init(&data, 0, hwc->last_period);
607 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
608 goto out; /* no sw counter overflow */
610 ibs_data.caps = ibs_caps;
613 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
614 if (event->attr.sample_type & PERF_SAMPLE_RAW)
615 offset_max = perf_ibs->offset_max;
621 rdmsrl(msr + offset, *buf++);
623 offset = find_next_bit(perf_ibs->offset_mask,
624 perf_ibs->offset_max,
626 } while (offset < offset_max);
627 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
629 * Read IbsBrTarget and IbsOpData4 separately
630 * depending on their availability.
631 * Can't add to offset_max as they are staggered
633 if (ibs_caps & IBS_CAPS_BRNTRGT) {
634 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
637 if (ibs_caps & IBS_CAPS_OPDATA4) {
638 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
642 ibs_data.size = sizeof(u64) * size;
645 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
646 regs.flags &= ~PERF_EFLAGS_EXACT;
648 set_linear_ip(®s, ibs_data.regs[1]);
649 regs.flags |= PERF_EFLAGS_EXACT;
652 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
653 raw = (struct perf_raw_record){
655 .size = sizeof(u32) + ibs_data.size,
656 .data = ibs_data.data,
662 throttle = perf_event_overflow(event, &data, ®s);
665 perf_ibs_stop(event, 0);
667 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
669 perf_event_update_userpage(event);
675 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
677 u64 stamp = sched_clock();
680 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
681 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
684 inc_irq_stat(apic_perf_irqs);
686 perf_sample_event_took(sched_clock() - stamp);
690 NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
692 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
694 struct cpu_perf_ibs __percpu *pcpu;
697 pcpu = alloc_percpu(struct cpu_perf_ibs);
701 perf_ibs->pcpu = pcpu;
703 /* register attributes */
704 if (perf_ibs->format_attrs[0]) {
705 memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
706 perf_ibs->format_group.name = "format";
707 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
709 memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
710 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
711 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
714 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
716 perf_ibs->pcpu = NULL;
723 static __init void perf_event_ibs_init(void)
725 struct attribute **attr = ibs_op_format_attrs;
727 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
729 if (ibs_caps & IBS_CAPS_OPCNT) {
730 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
731 *attr++ = &format_attr_cnt_ctl.attr;
733 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
735 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
736 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
739 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
741 static __init void perf_event_ibs_init(void) { }
745 /* IBS - apic initialization, for perf and oprofile */
747 static __init u32 __get_ibs_caps(void)
750 unsigned int max_level;
752 if (!boot_cpu_has(X86_FEATURE_IBS))
755 /* check IBS cpuid feature flags */
756 max_level = cpuid_eax(0x80000000);
757 if (max_level < IBS_CPUID_FEATURES)
758 return IBS_CAPS_DEFAULT;
760 caps = cpuid_eax(IBS_CPUID_FEATURES);
761 if (!(caps & IBS_CAPS_AVAIL))
762 /* cpuid flags not valid */
763 return IBS_CAPS_DEFAULT;
768 u32 get_ibs_caps(void)
773 EXPORT_SYMBOL(get_ibs_caps);
775 static inline int get_eilvt(int offset)
777 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
780 static inline int put_eilvt(int offset)
782 return !setup_APIC_eilvt(offset, 0, 0, 1);
786 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
788 static inline int ibs_eilvt_valid(void)
796 rdmsrl(MSR_AMD64_IBSCTL, val);
797 offset = val & IBSCTL_LVT_OFFSET_MASK;
799 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
800 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
801 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
805 if (!get_eilvt(offset)) {
806 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
807 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
818 static int setup_ibs_ctl(int ibs_eilvt_off)
820 struct pci_dev *cpu_cfg;
827 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
828 PCI_DEVICE_ID_AMD_10H_NB_MISC,
833 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
834 | IBSCTL_LVT_OFFSET_VALID);
835 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
836 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
837 pci_dev_put(cpu_cfg);
838 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
845 pr_debug("No CPU node configured for IBS\n");
853 * This runs only on the current cpu. We try to find an LVT offset and
854 * setup the local APIC. For this we must disable preemption. On
855 * success we initialize all nodes with this offset. This updates then
856 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
857 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
858 * is using the new offset.
860 static void force_ibs_eilvt_setup(void)
866 /* find the next free available EILVT entry, skip offset 0 */
867 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
868 if (get_eilvt(offset))
873 if (offset == APIC_EILVT_NR_MAX) {
874 pr_debug("No EILVT entry available\n");
878 ret = setup_ibs_ctl(offset);
882 if (!ibs_eilvt_valid())
885 pr_info("LVT offset %d assigned\n", offset);
895 static void ibs_eilvt_setup(void)
898 * Force LVT offset assignment for family 10h: The offsets are
899 * not assigned by the BIOS for this family, so the OS is
900 * responsible for doing it. If the OS assignment fails, fall
901 * back to BIOS settings and try to setup this.
903 if (boot_cpu_data.x86 == 0x10)
904 force_ibs_eilvt_setup();
907 static inline int get_ibs_lvt_offset(void)
911 rdmsrl(MSR_AMD64_IBSCTL, val);
912 if (!(val & IBSCTL_LVT_OFFSET_VALID))
915 return val & IBSCTL_LVT_OFFSET_MASK;
918 static void setup_APIC_ibs(void)
922 offset = get_ibs_lvt_offset();
926 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
929 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
933 static void clear_APIC_ibs(void)
937 offset = get_ibs_lvt_offset();
939 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
942 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
950 static int perf_ibs_suspend(void)
956 static void perf_ibs_resume(void)
962 static struct syscore_ops perf_ibs_syscore_ops = {
963 .resume = perf_ibs_resume,
964 .suspend = perf_ibs_suspend,
967 static void perf_ibs_pm_init(void)
969 register_syscore_ops(&perf_ibs_syscore_ops);
974 static inline void perf_ibs_pm_init(void) { }
978 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
984 static __init int amd_ibs_init(void)
988 caps = __get_ibs_caps();
990 return -ENODEV; /* ibs not supported by the cpu */
994 if (!ibs_eilvt_valid())
1000 /* make ibs_caps visible to other cpus: */
1003 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1006 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1007 "perf/x86/amd/ibs:starting",
1008 x86_pmu_amd_ibs_starting_cpu,
1009 x86_pmu_amd_ibs_dying_cpu);
1011 perf_event_ibs_init();
1016 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1017 device_initcall(amd_ibs_init);