void (*unreserve)(void);
int (*setup)(unsigned nmi_hz);
void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
- void (*stop)(void *);
+ void (*stop)(void);
unsigned perfctr;
unsigned evntsel;
u64 checkbit;
/* converts an msr to an appropriate reservation bit */
static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
{
- return wd_ops ? msr - wd_ops->perfctr : 0;
+ /* returns the bit offset of the performance counter register */
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ return (msr - MSR_K7_PERFCTR0);
+ case X86_VENDOR_INTEL:
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return (msr - MSR_ARCH_PERFMON_PERFCTR0);
+
+ switch (boot_cpu_data.x86) {
+ case 6:
+ return (msr - MSR_P6_PERFCTR0);
+ case 15:
+ return (msr - MSR_P4_BPU_PERFCTR0);
+ }
+ }
+ return 0;
}
/* converts an msr to an appropriate reservation bit */
/* returns the bit offset of the event selection register */
static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
{
- return wd_ops ? msr - wd_ops->evntsel : 0;
+ /* returns the bit offset of the event selection register */
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ return (msr - MSR_K7_EVNTSEL0);
+ case X86_VENDOR_INTEL:
+ if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
+
+ switch (boot_cpu_data.x86) {
+ case 6:
+ return (msr - MSR_P6_EVNTSEL0);
+ case 15:
+ return (msr - MSR_P4_BSU_ESCR0);
+ }
+ }
+ return 0;
+
}
/* checks for a bit availability (hack for oprofile) */
if (atomic_read(&nmi_active) <= 0)
return;
- on_each_cpu(wd_ops->stop, NULL, 0, 1);
+ on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
wd_ops->unreserve();
BUG_ON(atomic_read(&nmi_active) != 0);
return 1;
}
-static void single_msr_stop_watchdog(void *arg)
+static void single_msr_stop_watchdog(void)
{
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
static void single_msr_unreserve(void)
{
- release_evntsel_nmi(wd_ops->perfctr);
- release_perfctr_nmi(wd_ops->evntsel);
+ release_evntsel_nmi(wd_ops->evntsel);
+ release_perfctr_nmi(wd_ops->perfctr);
}
static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
.stop = single_msr_stop_watchdog,
.perfctr = MSR_K7_PERFCTR0,
.evntsel = MSR_K7_EVNTSEL0,
- .checkbit = 1ULL<<63,
+ .checkbit = 1ULL<<47,
};
/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
perfctr_msr = MSR_P6_PERFCTR0;
evntsel_msr = MSR_P6_EVNTSEL0;
- wrmsrl(perfctr_msr, 0UL);
+ /* KVM doesn't implement this MSR */
+ if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
+ return 0;
evntsel = P6_EVNTSEL_INT
| P6_EVNTSEL_OS
return 1;
}
-static void stop_p4_watchdog(void *arg)
+static void stop_p4_watchdog(void)
{
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
wrmsr(wd->cccr_msr, 0, 0);
{
#ifdef CONFIG_SMP
if (smp_num_siblings > 1)
- release_evntsel_nmi(MSR_P4_IQ_PERFCTR1);
+ release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
#endif
- release_evntsel_nmi(MSR_P4_IQ_PERFCTR0);
- release_perfctr_nmi(MSR_P4_CRU_ESCR0);
+ release_evntsel_nmi(MSR_P4_CRU_ESCR0);
+ release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
}
static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
.setup = setup_intel_arch_watchdog,
.rearm = p6_rearm,
.stop = single_msr_stop_watchdog,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
+ .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
};
static void probe_nmi_watchdog(void)
probe_nmi_watchdog();
if (!wd_ops)
return -1;
+
+ if (!wd_ops->reserve()) {
+ printk(KERN_ERR
+ "NMI watchdog: cannot reserve perfctrs\n");
+ return -1;
+ }
}
if (!(wd_ops->setup(nmi_hz))) {
void lapic_watchdog_stop(void)
{
if (wd_ops)
- wd_ops->stop(NULL);
+ wd_ops->stop();
}
unsigned lapic_adjust_nmi_hz(unsigned hz)