Merge tag 'locks-v3.15-2' of git://git.samba.org/jlayton/linux
[sfrench/cifs-2.6.git] / kernel / watchdog.c
1 /*
2  * Detect hard and soft lockups on a system
3  *
4  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  * Note: Most of this code is borrowed heavily from the original softlockup
7  * detector, so thanks to Ingo for the initial implementation.
8  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9  * to those contributors as well.
10  */
11
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
13
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/lockdep.h>
22 #include <linux/notifier.h>
23 #include <linux/module.h>
24 #include <linux/sysctl.h>
25 #include <linux/smpboot.h>
26 #include <linux/sched/rt.h>
27
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/perf_event.h>
31
32 int watchdog_user_enabled = 1;
33 int __read_mostly watchdog_thresh = 10;
34 static int __read_mostly watchdog_running;
35 static u64 __read_mostly sample_period;
36
37 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
38 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
39 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
40 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
41 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
43 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
45 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
46 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
47 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
48 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
49 #endif
50
51 /* boot commands */
52 /*
53  * Should we panic when a soft-lockup or hard-lockup occurs:
54  */
55 #ifdef CONFIG_HARDLOCKUP_DETECTOR
56 static int hardlockup_panic =
57                         CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58
59 static int __init hardlockup_panic_setup(char *str)
60 {
61         if (!strncmp(str, "panic", 5))
62                 hardlockup_panic = 1;
63         else if (!strncmp(str, "nopanic", 7))
64                 hardlockup_panic = 0;
65         else if (!strncmp(str, "0", 1))
66                 watchdog_user_enabled = 0;
67         return 1;
68 }
69 __setup("nmi_watchdog=", hardlockup_panic_setup);
70 #endif
71
72 unsigned int __read_mostly softlockup_panic =
73                         CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
74
75 static int __init softlockup_panic_setup(char *str)
76 {
77         softlockup_panic = simple_strtoul(str, NULL, 0);
78
79         return 1;
80 }
81 __setup("softlockup_panic=", softlockup_panic_setup);
82
83 static int __init nowatchdog_setup(char *str)
84 {
85         watchdog_user_enabled = 0;
86         return 1;
87 }
88 __setup("nowatchdog", nowatchdog_setup);
89
90 /* deprecated */
91 static int __init nosoftlockup_setup(char *str)
92 {
93         watchdog_user_enabled = 0;
94         return 1;
95 }
96 __setup("nosoftlockup", nosoftlockup_setup);
97 /*  */
98
99 /*
100  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
101  * lockups can have false positives under extreme conditions. So we generally
102  * want a higher threshold for soft lockups than for hard lockups. So we couple
103  * the thresholds with a factor: we make the soft threshold twice the amount of
104  * time the hard threshold is.
105  */
106 static int get_softlockup_thresh(void)
107 {
108         return watchdog_thresh * 2;
109 }
110
111 /*
112  * Returns seconds, approximately.  We don't need nanosecond
113  * resolution, and we don't need to waste time with a big divide when
114  * 2^30ns == 1.074s.
115  */
116 static unsigned long get_timestamp(void)
117 {
118         return local_clock() >> 30LL;  /* 2^30 ~= 10^9 */
119 }
120
121 static void set_sample_period(void)
122 {
123         /*
124          * convert watchdog_thresh from seconds to ns
125          * the divide by 5 is to give hrtimer several chances (two
126          * or three with the current relation between the soft
127          * and hard thresholds) to increment before the
128          * hardlockup detector generates a warning
129          */
130         sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
131 }
132
133 /* Commands for resetting the watchdog */
134 static void __touch_watchdog(void)
135 {
136         __this_cpu_write(watchdog_touch_ts, get_timestamp());
137 }
138
139 void touch_softlockup_watchdog(void)
140 {
141         /*
142          * Preemption can be enabled.  It doesn't matter which CPU's timestamp
143          * gets zeroed here, so use the raw_ operation.
144          */
145         raw_cpu_write(watchdog_touch_ts, 0);
146 }
147 EXPORT_SYMBOL(touch_softlockup_watchdog);
148
149 void touch_all_softlockup_watchdogs(void)
150 {
151         int cpu;
152
153         /*
154          * this is done lockless
155          * do we care if a 0 races with a timestamp?
156          * all it means is the softlock check starts one cycle later
157          */
158         for_each_online_cpu(cpu)
159                 per_cpu(watchdog_touch_ts, cpu) = 0;
160 }
161
162 #ifdef CONFIG_HARDLOCKUP_DETECTOR
163 void touch_nmi_watchdog(void)
164 {
165         /*
166          * Using __raw here because some code paths have
167          * preemption enabled.  If preemption is enabled
168          * then interrupts should be enabled too, in which
169          * case we shouldn't have to worry about the watchdog
170          * going off.
171          */
172         __raw_get_cpu_var(watchdog_nmi_touch) = true;
173         touch_softlockup_watchdog();
174 }
175 EXPORT_SYMBOL(touch_nmi_watchdog);
176
177 #endif
178
179 void touch_softlockup_watchdog_sync(void)
180 {
181         __raw_get_cpu_var(softlockup_touch_sync) = true;
182         __raw_get_cpu_var(watchdog_touch_ts) = 0;
183 }
184
185 #ifdef CONFIG_HARDLOCKUP_DETECTOR
186 /* watchdog detector functions */
187 static int is_hardlockup(void)
188 {
189         unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
190
191         if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
192                 return 1;
193
194         __this_cpu_write(hrtimer_interrupts_saved, hrint);
195         return 0;
196 }
197 #endif
198
199 static int is_softlockup(unsigned long touch_ts)
200 {
201         unsigned long now = get_timestamp();
202
203         /* Warn about unreasonable delays: */
204         if (time_after(now, touch_ts + get_softlockup_thresh()))
205                 return now - touch_ts;
206
207         return 0;
208 }
209
210 #ifdef CONFIG_HARDLOCKUP_DETECTOR
211
212 static struct perf_event_attr wd_hw_attr = {
213         .type           = PERF_TYPE_HARDWARE,
214         .config         = PERF_COUNT_HW_CPU_CYCLES,
215         .size           = sizeof(struct perf_event_attr),
216         .pinned         = 1,
217         .disabled       = 1,
218 };
219
220 /* Callback function for perf event subsystem */
221 static void watchdog_overflow_callback(struct perf_event *event,
222                  struct perf_sample_data *data,
223                  struct pt_regs *regs)
224 {
225         /* Ensure the watchdog never gets throttled */
226         event->hw.interrupts = 0;
227
228         if (__this_cpu_read(watchdog_nmi_touch) == true) {
229                 __this_cpu_write(watchdog_nmi_touch, false);
230                 return;
231         }
232
233         /* check for a hardlockup
234          * This is done by making sure our timer interrupt
235          * is incrementing.  The timer interrupt should have
236          * fired multiple times before we overflow'd.  If it hasn't
237          * then this is a good indication the cpu is stuck
238          */
239         if (is_hardlockup()) {
240                 int this_cpu = smp_processor_id();
241
242                 /* only print hardlockups once */
243                 if (__this_cpu_read(hard_watchdog_warn) == true)
244                         return;
245
246                 if (hardlockup_panic)
247                         panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
248                 else
249                         WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
250
251                 __this_cpu_write(hard_watchdog_warn, true);
252                 return;
253         }
254
255         __this_cpu_write(hard_watchdog_warn, false);
256         return;
257 }
258 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
259
260 static void watchdog_interrupt_count(void)
261 {
262         __this_cpu_inc(hrtimer_interrupts);
263 }
264
265 static int watchdog_nmi_enable(unsigned int cpu);
266 static void watchdog_nmi_disable(unsigned int cpu);
267
268 /* watchdog kicker functions */
269 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
270 {
271         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
272         struct pt_regs *regs = get_irq_regs();
273         int duration;
274
275         /* kick the hardlockup detector */
276         watchdog_interrupt_count();
277
278         /* kick the softlockup detector */
279         wake_up_process(__this_cpu_read(softlockup_watchdog));
280
281         /* .. and repeat */
282         hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
283
284         if (touch_ts == 0) {
285                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
286                         /*
287                          * If the time stamp was touched atomically
288                          * make sure the scheduler tick is up to date.
289                          */
290                         __this_cpu_write(softlockup_touch_sync, false);
291                         sched_clock_tick();
292                 }
293
294                 /* Clear the guest paused flag on watchdog reset */
295                 kvm_check_and_clear_guest_paused();
296                 __touch_watchdog();
297                 return HRTIMER_RESTART;
298         }
299
300         /* check for a softlockup
301          * This is done by making sure a high priority task is
302          * being scheduled.  The task touches the watchdog to
303          * indicate it is getting cpu time.  If it hasn't then
304          * this is a good indication some task is hogging the cpu
305          */
306         duration = is_softlockup(touch_ts);
307         if (unlikely(duration)) {
308                 /*
309                  * If a virtual machine is stopped by the host it can look to
310                  * the watchdog like a soft lockup, check to see if the host
311                  * stopped the vm before we issue the warning
312                  */
313                 if (kvm_check_and_clear_guest_paused())
314                         return HRTIMER_RESTART;
315
316                 /* only warn once */
317                 if (__this_cpu_read(soft_watchdog_warn) == true)
318                         return HRTIMER_RESTART;
319
320                 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
321                         smp_processor_id(), duration,
322                         current->comm, task_pid_nr(current));
323                 print_modules();
324                 print_irqtrace_events(current);
325                 if (regs)
326                         show_regs(regs);
327                 else
328                         dump_stack();
329
330                 if (softlockup_panic)
331                         panic("softlockup: hung tasks");
332                 __this_cpu_write(soft_watchdog_warn, true);
333         } else
334                 __this_cpu_write(soft_watchdog_warn, false);
335
336         return HRTIMER_RESTART;
337 }
338
339 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
340 {
341         struct sched_param param = { .sched_priority = prio };
342
343         sched_setscheduler(current, policy, &param);
344 }
345
346 static void watchdog_enable(unsigned int cpu)
347 {
348         struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
349
350         /* kick off the timer for the hardlockup detector */
351         hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
352         hrtimer->function = watchdog_timer_fn;
353
354         /* Enable the perf event */
355         watchdog_nmi_enable(cpu);
356
357         /* done here because hrtimer_start can only pin to smp_processor_id() */
358         hrtimer_start(hrtimer, ns_to_ktime(sample_period),
359                       HRTIMER_MODE_REL_PINNED);
360
361         /* initialize timestamp */
362         watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
363         __touch_watchdog();
364 }
365
366 static void watchdog_disable(unsigned int cpu)
367 {
368         struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
369
370         watchdog_set_prio(SCHED_NORMAL, 0);
371         hrtimer_cancel(hrtimer);
372         /* disable the perf event */
373         watchdog_nmi_disable(cpu);
374 }
375
376 static void watchdog_cleanup(unsigned int cpu, bool online)
377 {
378         watchdog_disable(cpu);
379 }
380
381 static int watchdog_should_run(unsigned int cpu)
382 {
383         return __this_cpu_read(hrtimer_interrupts) !=
384                 __this_cpu_read(soft_lockup_hrtimer_cnt);
385 }
386
387 /*
388  * The watchdog thread function - touches the timestamp.
389  *
390  * It only runs once every sample_period seconds (4 seconds by
391  * default) to reset the softlockup timestamp. If this gets delayed
392  * for more than 2*watchdog_thresh seconds then the debug-printout
393  * triggers in watchdog_timer_fn().
394  */
395 static void watchdog(unsigned int cpu)
396 {
397         __this_cpu_write(soft_lockup_hrtimer_cnt,
398                          __this_cpu_read(hrtimer_interrupts));
399         __touch_watchdog();
400 }
401
402 #ifdef CONFIG_HARDLOCKUP_DETECTOR
403 /*
404  * People like the simple clean cpu node info on boot.
405  * Reduce the watchdog noise by only printing messages
406  * that are different from what cpu0 displayed.
407  */
408 static unsigned long cpu0_err;
409
410 static int watchdog_nmi_enable(unsigned int cpu)
411 {
412         struct perf_event_attr *wd_attr;
413         struct perf_event *event = per_cpu(watchdog_ev, cpu);
414
415         /* is it already setup and enabled? */
416         if (event && event->state > PERF_EVENT_STATE_OFF)
417                 goto out;
418
419         /* it is setup but not enabled */
420         if (event != NULL)
421                 goto out_enable;
422
423         wd_attr = &wd_hw_attr;
424         wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
425
426         /* Try to register using hardware perf events */
427         event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
428
429         /* save cpu0 error for future comparision */
430         if (cpu == 0 && IS_ERR(event))
431                 cpu0_err = PTR_ERR(event);
432
433         if (!IS_ERR(event)) {
434                 /* only print for cpu0 or different than cpu0 */
435                 if (cpu == 0 || cpu0_err)
436                         pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
437                 goto out_save;
438         }
439
440         /* skip displaying the same error again */
441         if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
442                 return PTR_ERR(event);
443
444         /* vary the KERN level based on the returned errno */
445         if (PTR_ERR(event) == -EOPNOTSUPP)
446                 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
447         else if (PTR_ERR(event) == -ENOENT)
448                 pr_warning("disabled (cpu%i): hardware events not enabled\n",
449                          cpu);
450         else
451                 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
452                         cpu, PTR_ERR(event));
453         return PTR_ERR(event);
454
455         /* success path */
456 out_save:
457         per_cpu(watchdog_ev, cpu) = event;
458 out_enable:
459         perf_event_enable(per_cpu(watchdog_ev, cpu));
460 out:
461         return 0;
462 }
463
464 static void watchdog_nmi_disable(unsigned int cpu)
465 {
466         struct perf_event *event = per_cpu(watchdog_ev, cpu);
467
468         if (event) {
469                 perf_event_disable(event);
470                 per_cpu(watchdog_ev, cpu) = NULL;
471
472                 /* should be in cleanup, but blocks oprofile */
473                 perf_event_release_kernel(event);
474         }
475         return;
476 }
477 #else
478 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
479 static void watchdog_nmi_disable(unsigned int cpu) { return; }
480 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
481
482 static struct smp_hotplug_thread watchdog_threads = {
483         .store                  = &softlockup_watchdog,
484         .thread_should_run      = watchdog_should_run,
485         .thread_fn              = watchdog,
486         .thread_comm            = "watchdog/%u",
487         .setup                  = watchdog_enable,
488         .cleanup                = watchdog_cleanup,
489         .park                   = watchdog_disable,
490         .unpark                 = watchdog_enable,
491 };
492
493 static void restart_watchdog_hrtimer(void *info)
494 {
495         struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
496         int ret;
497
498         /*
499          * No need to cancel and restart hrtimer if it is currently executing
500          * because it will reprogram itself with the new period now.
501          * We should never see it unqueued here because we are running per-cpu
502          * with interrupts disabled.
503          */
504         ret = hrtimer_try_to_cancel(hrtimer);
505         if (ret == 1)
506                 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
507                                 HRTIMER_MODE_REL_PINNED);
508 }
509
510 static void update_timers(int cpu)
511 {
512         /*
513          * Make sure that perf event counter will adopt to a new
514          * sampling period. Updating the sampling period directly would
515          * be much nicer but we do not have an API for that now so
516          * let's use a big hammer.
517          * Hrtimer will adopt the new period on the next tick but this
518          * might be late already so we have to restart the timer as well.
519          */
520         watchdog_nmi_disable(cpu);
521         smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
522         watchdog_nmi_enable(cpu);
523 }
524
525 static void update_timers_all_cpus(void)
526 {
527         int cpu;
528
529         get_online_cpus();
530         preempt_disable();
531         for_each_online_cpu(cpu)
532                 update_timers(cpu);
533         preempt_enable();
534         put_online_cpus();
535 }
536
537 static int watchdog_enable_all_cpus(bool sample_period_changed)
538 {
539         int err = 0;
540
541         if (!watchdog_running) {
542                 err = smpboot_register_percpu_thread(&watchdog_threads);
543                 if (err)
544                         pr_err("Failed to create watchdog threads, disabled\n");
545                 else
546                         watchdog_running = 1;
547         } else if (sample_period_changed) {
548                 update_timers_all_cpus();
549         }
550
551         return err;
552 }
553
554 /* prepare/enable/disable routines */
555 /* sysctl functions */
556 #ifdef CONFIG_SYSCTL
557 static void watchdog_disable_all_cpus(void)
558 {
559         if (watchdog_running) {
560                 watchdog_running = 0;
561                 smpboot_unregister_percpu_thread(&watchdog_threads);
562         }
563 }
564
565 /*
566  * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
567  */
568
569 int proc_dowatchdog(struct ctl_table *table, int write,
570                     void __user *buffer, size_t *lenp, loff_t *ppos)
571 {
572         int err, old_thresh, old_enabled;
573         static DEFINE_MUTEX(watchdog_proc_mutex);
574
575         mutex_lock(&watchdog_proc_mutex);
576         old_thresh = ACCESS_ONCE(watchdog_thresh);
577         old_enabled = ACCESS_ONCE(watchdog_user_enabled);
578
579         err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
580         if (err || !write)
581                 goto out;
582
583         set_sample_period();
584         /*
585          * Watchdog threads shouldn't be enabled if they are
586          * disabled. The 'watchdog_running' variable check in
587          * watchdog_*_all_cpus() function takes care of this.
588          */
589         if (watchdog_user_enabled && watchdog_thresh)
590                 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
591         else
592                 watchdog_disable_all_cpus();
593
594         /* Restore old values on failure */
595         if (err) {
596                 watchdog_thresh = old_thresh;
597                 watchdog_user_enabled = old_enabled;
598         }
599 out:
600         mutex_unlock(&watchdog_proc_mutex);
601         return err;
602 }
603 #endif /* CONFIG_SYSCTL */
604
605 void __init lockup_detector_init(void)
606 {
607         set_sample_period();
608
609         if (watchdog_user_enabled)
610                 watchdog_enable_all_cpus(false);
611 }