#endif /* CONFIG_HARDLOCKUP_DETECTOR */
-#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
+void watchdog_hardlockup_touch_cpu(unsigned int cpu)
+{
+ per_cpu(watchdog_hardlockup_touched, cpu) = true;
+
+ /* Match with smp_rmb() in watchdog_hardlockup_check() */
+ smp_wmb();
+}
+
static bool is_hardlockup(unsigned int cpu)
{
int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
return false;
}
-static void watchdog_hardlockup_kick(void)
+static unsigned long watchdog_hardlockup_kick(void)
{
- atomic_inc(raw_cpu_ptr(&hrtimer_interrupts));
+ return atomic_inc_return(raw_cpu_ptr(&hrtimer_interrupts));
}
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
{
+ /* Match with smp_wmb() in watchdog_hardlockup_touch_cpu() */
+ smp_rmb();
+
if (per_cpu(watchdog_hardlockup_touched, cpu)) {
per_cpu(watchdog_hardlockup_touched, cpu) = false;
return;
}
}
-#else /* CONFIG_HARDLOCKUP_DETECTOR_PERF */
+#else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
-static inline void watchdog_hardlockup_kick(void) { }
+static inline unsigned long watchdog_hardlockup_kick(void) { return 0; }
-#endif /* !CONFIG_HARDLOCKUP_DETECTOR_PERF */
+#endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
/*
* These functions can be overridden based on the configured hardlockdup detector.
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+ unsigned long hrtimer_interrupts;
if (!watchdog_enabled)
return HRTIMER_NORESTART;
- watchdog_hardlockup_kick();
+ hrtimer_interrupts = watchdog_hardlockup_kick();
+
+ /* test for hardlockups */
+ watchdog_buddy_check_hardlockup(hrtimer_interrupts);
/* kick the softlockup detector */
if (completion_done(this_cpu_ptr(&softlockup_completion))) {