2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
23 #include <asm/irq_regs.h>
24 #include <linux/kvm_para.h>
25 #include <linux/perf_event.h>
27 int watchdog_user_enabled = 1;
28 int __read_mostly watchdog_thresh = 10;
30 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
32 #define sysctl_softlockup_all_cpu_backtrace 0
35 static int __read_mostly watchdog_running;
36 static u64 __read_mostly sample_period;
38 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
39 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
40 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
41 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
42 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
43 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
44 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
45 #ifdef CONFIG_HARDLOCKUP_DETECTOR
46 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
47 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
48 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
49 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
51 static unsigned long soft_lockup_nmi_warn;
55 * Should we panic when a soft-lockup or hard-lockup occurs:
57 #ifdef CONFIG_HARDLOCKUP_DETECTOR
58 static int hardlockup_panic =
59 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
61 static int __init hardlockup_panic_setup(char *str)
63 if (!strncmp(str, "panic", 5))
65 else if (!strncmp(str, "nopanic", 7))
67 else if (!strncmp(str, "0", 1))
68 watchdog_user_enabled = 0;
71 __setup("nmi_watchdog=", hardlockup_panic_setup);
74 unsigned int __read_mostly softlockup_panic =
75 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
77 static int __init softlockup_panic_setup(char *str)
79 softlockup_panic = simple_strtoul(str, NULL, 0);
83 __setup("softlockup_panic=", softlockup_panic_setup);
85 static int __init nowatchdog_setup(char *str)
87 watchdog_user_enabled = 0;
90 __setup("nowatchdog", nowatchdog_setup);
93 static int __init nosoftlockup_setup(char *str)
95 watchdog_user_enabled = 0;
98 __setup("nosoftlockup", nosoftlockup_setup);
101 static int __init softlockup_all_cpu_backtrace_setup(char *str)
103 sysctl_softlockup_all_cpu_backtrace =
104 !!simple_strtol(str, NULL, 0);
107 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
111 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
112 * lockups can have false positives under extreme conditions. So we generally
113 * want a higher threshold for soft lockups than for hard lockups. So we couple
114 * the thresholds with a factor: we make the soft threshold twice the amount of
115 * time the hard threshold is.
117 static int get_softlockup_thresh(void)
119 return watchdog_thresh * 2;
123 * Returns seconds, approximately. We don't need nanosecond
124 * resolution, and we don't need to waste time with a big divide when
127 static unsigned long get_timestamp(void)
129 return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
132 static void set_sample_period(void)
135 * convert watchdog_thresh from seconds to ns
136 * the divide by 5 is to give hrtimer several chances (two
137 * or three with the current relation between the soft
138 * and hard thresholds) to increment before the
139 * hardlockup detector generates a warning
141 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
144 /* Commands for resetting the watchdog */
145 static void __touch_watchdog(void)
147 __this_cpu_write(watchdog_touch_ts, get_timestamp());
150 void touch_softlockup_watchdog(void)
153 * Preemption can be enabled. It doesn't matter which CPU's timestamp
154 * gets zeroed here, so use the raw_ operation.
156 raw_cpu_write(watchdog_touch_ts, 0);
158 EXPORT_SYMBOL(touch_softlockup_watchdog);
160 void touch_all_softlockup_watchdogs(void)
165 * this is done lockless
166 * do we care if a 0 races with a timestamp?
167 * all it means is the softlock check starts one cycle later
169 for_each_online_cpu(cpu)
170 per_cpu(watchdog_touch_ts, cpu) = 0;
173 #ifdef CONFIG_HARDLOCKUP_DETECTOR
174 void touch_nmi_watchdog(void)
177 * Using __raw here because some code paths have
178 * preemption enabled. If preemption is enabled
179 * then interrupts should be enabled too, in which
180 * case we shouldn't have to worry about the watchdog
183 __raw_get_cpu_var(watchdog_nmi_touch) = true;
184 touch_softlockup_watchdog();
186 EXPORT_SYMBOL(touch_nmi_watchdog);
190 void touch_softlockup_watchdog_sync(void)
192 __raw_get_cpu_var(softlockup_touch_sync) = true;
193 __raw_get_cpu_var(watchdog_touch_ts) = 0;
196 #ifdef CONFIG_HARDLOCKUP_DETECTOR
197 /* watchdog detector functions */
198 static int is_hardlockup(void)
200 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
202 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
205 __this_cpu_write(hrtimer_interrupts_saved, hrint);
210 static int is_softlockup(unsigned long touch_ts)
212 unsigned long now = get_timestamp();
214 /* Warn about unreasonable delays: */
215 if (time_after(now, touch_ts + get_softlockup_thresh()))
216 return now - touch_ts;
221 #ifdef CONFIG_HARDLOCKUP_DETECTOR
223 static struct perf_event_attr wd_hw_attr = {
224 .type = PERF_TYPE_HARDWARE,
225 .config = PERF_COUNT_HW_CPU_CYCLES,
226 .size = sizeof(struct perf_event_attr),
231 /* Callback function for perf event subsystem */
232 static void watchdog_overflow_callback(struct perf_event *event,
233 struct perf_sample_data *data,
234 struct pt_regs *regs)
236 /* Ensure the watchdog never gets throttled */
237 event->hw.interrupts = 0;
239 if (__this_cpu_read(watchdog_nmi_touch) == true) {
240 __this_cpu_write(watchdog_nmi_touch, false);
244 /* check for a hardlockup
245 * This is done by making sure our timer interrupt
246 * is incrementing. The timer interrupt should have
247 * fired multiple times before we overflow'd. If it hasn't
248 * then this is a good indication the cpu is stuck
250 if (is_hardlockup()) {
251 int this_cpu = smp_processor_id();
253 /* only print hardlockups once */
254 if (__this_cpu_read(hard_watchdog_warn) == true)
257 if (hardlockup_panic)
258 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
260 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
262 __this_cpu_write(hard_watchdog_warn, true);
266 __this_cpu_write(hard_watchdog_warn, false);
269 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
271 static void watchdog_interrupt_count(void)
273 __this_cpu_inc(hrtimer_interrupts);
276 static int watchdog_nmi_enable(unsigned int cpu);
277 static void watchdog_nmi_disable(unsigned int cpu);
279 /* watchdog kicker functions */
280 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
282 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
283 struct pt_regs *regs = get_irq_regs();
285 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
287 /* kick the hardlockup detector */
288 watchdog_interrupt_count();
290 /* kick the softlockup detector */
291 wake_up_process(__this_cpu_read(softlockup_watchdog));
294 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
297 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
299 * If the time stamp was touched atomically
300 * make sure the scheduler tick is up to date.
302 __this_cpu_write(softlockup_touch_sync, false);
306 /* Clear the guest paused flag on watchdog reset */
307 kvm_check_and_clear_guest_paused();
309 return HRTIMER_RESTART;
312 /* check for a softlockup
313 * This is done by making sure a high priority task is
314 * being scheduled. The task touches the watchdog to
315 * indicate it is getting cpu time. If it hasn't then
316 * this is a good indication some task is hogging the cpu
318 duration = is_softlockup(touch_ts);
319 if (unlikely(duration)) {
321 * If a virtual machine is stopped by the host it can look to
322 * the watchdog like a soft lockup, check to see if the host
323 * stopped the vm before we issue the warning
325 if (kvm_check_and_clear_guest_paused())
326 return HRTIMER_RESTART;
329 if (__this_cpu_read(soft_watchdog_warn) == true)
330 return HRTIMER_RESTART;
332 if (softlockup_all_cpu_backtrace) {
333 /* Prevent multiple soft-lockup reports if one cpu is already
334 * engaged in dumping cpu back traces
336 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
337 /* Someone else will report us. Let's give up */
338 __this_cpu_write(soft_watchdog_warn, true);
339 return HRTIMER_RESTART;
343 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
344 smp_processor_id(), duration,
345 current->comm, task_pid_nr(current));
347 print_irqtrace_events(current);
353 if (softlockup_all_cpu_backtrace) {
354 /* Avoid generating two back traces for current
355 * given that one is already made above
357 trigger_allbutself_cpu_backtrace();
359 clear_bit(0, &soft_lockup_nmi_warn);
360 /* Barrier to sync with other cpus */
361 smp_mb__after_atomic();
364 if (softlockup_panic)
365 panic("softlockup: hung tasks");
366 __this_cpu_write(soft_watchdog_warn, true);
368 __this_cpu_write(soft_watchdog_warn, false);
370 return HRTIMER_RESTART;
373 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
375 struct sched_param param = { .sched_priority = prio };
377 sched_setscheduler(current, policy, ¶m);
380 static void watchdog_enable(unsigned int cpu)
382 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
384 /* kick off the timer for the hardlockup detector */
385 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
386 hrtimer->function = watchdog_timer_fn;
388 /* Enable the perf event */
389 watchdog_nmi_enable(cpu);
391 /* done here because hrtimer_start can only pin to smp_processor_id() */
392 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
393 HRTIMER_MODE_REL_PINNED);
395 /* initialize timestamp */
396 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
400 static void watchdog_disable(unsigned int cpu)
402 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
404 watchdog_set_prio(SCHED_NORMAL, 0);
405 hrtimer_cancel(hrtimer);
406 /* disable the perf event */
407 watchdog_nmi_disable(cpu);
410 static void watchdog_cleanup(unsigned int cpu, bool online)
412 watchdog_disable(cpu);
415 static int watchdog_should_run(unsigned int cpu)
417 return __this_cpu_read(hrtimer_interrupts) !=
418 __this_cpu_read(soft_lockup_hrtimer_cnt);
422 * The watchdog thread function - touches the timestamp.
424 * It only runs once every sample_period seconds (4 seconds by
425 * default) to reset the softlockup timestamp. If this gets delayed
426 * for more than 2*watchdog_thresh seconds then the debug-printout
427 * triggers in watchdog_timer_fn().
429 static void watchdog(unsigned int cpu)
431 __this_cpu_write(soft_lockup_hrtimer_cnt,
432 __this_cpu_read(hrtimer_interrupts));
436 #ifdef CONFIG_HARDLOCKUP_DETECTOR
438 * People like the simple clean cpu node info on boot.
439 * Reduce the watchdog noise by only printing messages
440 * that are different from what cpu0 displayed.
442 static unsigned long cpu0_err;
444 static int watchdog_nmi_enable(unsigned int cpu)
446 struct perf_event_attr *wd_attr;
447 struct perf_event *event = per_cpu(watchdog_ev, cpu);
449 /* is it already setup and enabled? */
450 if (event && event->state > PERF_EVENT_STATE_OFF)
453 /* it is setup but not enabled */
457 wd_attr = &wd_hw_attr;
458 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
460 /* Try to register using hardware perf events */
461 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
463 /* save cpu0 error for future comparision */
464 if (cpu == 0 && IS_ERR(event))
465 cpu0_err = PTR_ERR(event);
467 if (!IS_ERR(event)) {
468 /* only print for cpu0 or different than cpu0 */
469 if (cpu == 0 || cpu0_err)
470 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
474 /* skip displaying the same error again */
475 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
476 return PTR_ERR(event);
478 /* vary the KERN level based on the returned errno */
479 if (PTR_ERR(event) == -EOPNOTSUPP)
480 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
481 else if (PTR_ERR(event) == -ENOENT)
482 pr_warning("disabled (cpu%i): hardware events not enabled\n",
485 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
486 cpu, PTR_ERR(event));
487 return PTR_ERR(event);
491 per_cpu(watchdog_ev, cpu) = event;
493 perf_event_enable(per_cpu(watchdog_ev, cpu));
498 static void watchdog_nmi_disable(unsigned int cpu)
500 struct perf_event *event = per_cpu(watchdog_ev, cpu);
503 perf_event_disable(event);
504 per_cpu(watchdog_ev, cpu) = NULL;
506 /* should be in cleanup, but blocks oprofile */
507 perf_event_release_kernel(event);
510 /* watchdog_nmi_enable() expects this to be zero initially. */
515 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
516 static void watchdog_nmi_disable(unsigned int cpu) { return; }
517 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
519 static struct smp_hotplug_thread watchdog_threads = {
520 .store = &softlockup_watchdog,
521 .thread_should_run = watchdog_should_run,
522 .thread_fn = watchdog,
523 .thread_comm = "watchdog/%u",
524 .setup = watchdog_enable,
525 .cleanup = watchdog_cleanup,
526 .park = watchdog_disable,
527 .unpark = watchdog_enable,
530 static void restart_watchdog_hrtimer(void *info)
532 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
536 * No need to cancel and restart hrtimer if it is currently executing
537 * because it will reprogram itself with the new period now.
538 * We should never see it unqueued here because we are running per-cpu
539 * with interrupts disabled.
541 ret = hrtimer_try_to_cancel(hrtimer);
543 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
544 HRTIMER_MODE_REL_PINNED);
547 static void update_timers(int cpu)
550 * Make sure that perf event counter will adopt to a new
551 * sampling period. Updating the sampling period directly would
552 * be much nicer but we do not have an API for that now so
553 * let's use a big hammer.
554 * Hrtimer will adopt the new period on the next tick but this
555 * might be late already so we have to restart the timer as well.
557 watchdog_nmi_disable(cpu);
558 smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
559 watchdog_nmi_enable(cpu);
562 static void update_timers_all_cpus(void)
567 for_each_online_cpu(cpu)
572 static int watchdog_enable_all_cpus(bool sample_period_changed)
576 if (!watchdog_running) {
577 err = smpboot_register_percpu_thread(&watchdog_threads);
579 pr_err("Failed to create watchdog threads, disabled\n");
581 watchdog_running = 1;
582 } else if (sample_period_changed) {
583 update_timers_all_cpus();
589 /* prepare/enable/disable routines */
590 /* sysctl functions */
592 static void watchdog_disable_all_cpus(void)
594 if (watchdog_running) {
595 watchdog_running = 0;
596 smpboot_unregister_percpu_thread(&watchdog_threads);
601 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
604 int proc_dowatchdog(struct ctl_table *table, int write,
605 void __user *buffer, size_t *lenp, loff_t *ppos)
607 int err, old_thresh, old_enabled;
608 static DEFINE_MUTEX(watchdog_proc_mutex);
610 mutex_lock(&watchdog_proc_mutex);
611 old_thresh = ACCESS_ONCE(watchdog_thresh);
612 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
614 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
620 * Watchdog threads shouldn't be enabled if they are
621 * disabled. The 'watchdog_running' variable check in
622 * watchdog_*_all_cpus() function takes care of this.
624 if (watchdog_user_enabled && watchdog_thresh)
625 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
627 watchdog_disable_all_cpus();
629 /* Restore old values on failure */
631 watchdog_thresh = old_thresh;
632 watchdog_user_enabled = old_enabled;
635 mutex_unlock(&watchdog_proc_mutex);
638 #endif /* CONFIG_SYSCTL */
640 void __init lockup_detector_init(void)
644 if (watchdog_user_enabled)
645 watchdog_enable_all_cpus(false);