]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/watchdog.c
workqueue: replace pool->manager_arb mutex with a flag
[mirror_ubuntu-artful-kernel.git] / kernel / watchdog.c
1 /*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12 #define pr_fmt(fmt) "watchdog: " fmt
13
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
27
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
31
32 /* Watchdog configuration */
33 static DEFINE_MUTEX(watchdog_proc_mutex);
34
35 int __read_mostly nmi_watchdog_enabled;
36
37 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
38 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
39 NMI_WATCHDOG_ENABLED;
40 #else
41 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
42 #endif
43
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
45 /* boot commands */
46 /*
47 * Should we panic when a soft-lockup or hard-lockup occurs:
48 */
49 unsigned int __read_mostly hardlockup_panic =
50 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
51 /*
52 * We may not want to enable hard lockup detection by default in all cases,
53 * for example when running the kernel as a guest on a hypervisor. In these
54 * cases this function can be called to disable hard lockup detection. This
55 * function should only be executed once by the boot processor before the
56 * kernel command line parameters are parsed, because otherwise it is not
57 * possible to override this in hardlockup_panic_setup().
58 */
59 void hardlockup_detector_disable(void)
60 {
61 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
62 }
63
64 static int __init hardlockup_panic_setup(char *str)
65 {
66 if (!strncmp(str, "panic", 5))
67 hardlockup_panic = 1;
68 else if (!strncmp(str, "nopanic", 7))
69 hardlockup_panic = 0;
70 else if (!strncmp(str, "0", 1))
71 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
72 else if (!strncmp(str, "1", 1))
73 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
74 return 1;
75 }
76 __setup("nmi_watchdog=", hardlockup_panic_setup);
77
78 #endif
79
80 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
81 int __read_mostly soft_watchdog_enabled;
82 #endif
83
84 int __read_mostly watchdog_user_enabled;
85 int __read_mostly watchdog_thresh = 10;
86
87 #ifdef CONFIG_SMP
88 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
89 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
90 #endif
91 struct cpumask watchdog_cpumask __read_mostly;
92 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
93
94 /*
95 * The 'watchdog_running' variable is set to 1 when the watchdog threads
96 * are registered/started and is set to 0 when the watchdog threads are
97 * unregistered/stopped, so it is an indicator whether the threads exist.
98 */
99 static int __read_mostly watchdog_running;
100 /*
101 * If a subsystem has a need to deactivate the watchdog temporarily, it
102 * can use the suspend/resume interface to achieve this. The content of
103 * the 'watchdog_suspended' variable reflects this state. Existing threads
104 * are parked/unparked by the lockup_detector_{suspend|resume} functions
105 * (see comment blocks pertaining to those functions for further details).
106 *
107 * 'watchdog_suspended' also prevents threads from being registered/started
108 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
109 * of 'watchdog_running' cannot change while the watchdog is deactivated
110 * temporarily (see related code in 'proc' handlers).
111 */
112 int __read_mostly watchdog_suspended;
113
114 /*
115 * These functions can be overridden if an architecture implements its
116 * own hardlockup detector.
117 *
118 * watchdog_nmi_enable/disable can be implemented to start and stop when
119 * softlockup watchdog threads start and stop. The arch must select the
120 * SOFTLOCKUP_DETECTOR Kconfig.
121 */
122 int __weak watchdog_nmi_enable(unsigned int cpu)
123 {
124 return 0;
125 }
126 void __weak watchdog_nmi_disable(unsigned int cpu)
127 {
128 }
129
130 /*
131 * watchdog_nmi_reconfigure can be implemented to be notified after any
132 * watchdog configuration change. The arch hardlockup watchdog should
133 * respond to the following variables:
134 * - nmi_watchdog_enabled
135 * - watchdog_thresh
136 * - watchdog_cpumask
137 * - sysctl_hardlockup_all_cpu_backtrace
138 * - hardlockup_panic
139 * - watchdog_suspended
140 */
141 void __weak watchdog_nmi_reconfigure(void)
142 {
143 }
144
145
146 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
147
148 /* Helper for online, unparked cpus. */
149 #define for_each_watchdog_cpu(cpu) \
150 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
151
152 atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
153
154 static u64 __read_mostly sample_period;
155
156 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
157 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
158 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
159 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
160 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
161 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
162 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
163 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
164 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
165 static unsigned long soft_lockup_nmi_warn;
166
167 unsigned int __read_mostly softlockup_panic =
168 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
169
170 static int __init softlockup_panic_setup(char *str)
171 {
172 softlockup_panic = simple_strtoul(str, NULL, 0);
173
174 return 1;
175 }
176 __setup("softlockup_panic=", softlockup_panic_setup);
177
178 static int __init nowatchdog_setup(char *str)
179 {
180 watchdog_enabled = 0;
181 return 1;
182 }
183 __setup("nowatchdog", nowatchdog_setup);
184
185 static int __init nosoftlockup_setup(char *str)
186 {
187 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
188 return 1;
189 }
190 __setup("nosoftlockup", nosoftlockup_setup);
191
192 #ifdef CONFIG_SMP
193 static int __init softlockup_all_cpu_backtrace_setup(char *str)
194 {
195 sysctl_softlockup_all_cpu_backtrace =
196 !!simple_strtol(str, NULL, 0);
197 return 1;
198 }
199 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
200 #ifdef CONFIG_HARDLOCKUP_DETECTOR
201 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
202 {
203 sysctl_hardlockup_all_cpu_backtrace =
204 !!simple_strtol(str, NULL, 0);
205 return 1;
206 }
207 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
208 #endif
209 #endif
210
211 /*
212 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
213 * lockups can have false positives under extreme conditions. So we generally
214 * want a higher threshold for soft lockups than for hard lockups. So we couple
215 * the thresholds with a factor: we make the soft threshold twice the amount of
216 * time the hard threshold is.
217 */
218 static int get_softlockup_thresh(void)
219 {
220 return watchdog_thresh * 2;
221 }
222
223 /*
224 * Returns seconds, approximately. We don't need nanosecond
225 * resolution, and we don't need to waste time with a big divide when
226 * 2^30ns == 1.074s.
227 */
228 static unsigned long get_timestamp(void)
229 {
230 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
231 }
232
233 static void set_sample_period(void)
234 {
235 /*
236 * convert watchdog_thresh from seconds to ns
237 * the divide by 5 is to give hrtimer several chances (two
238 * or three with the current relation between the soft
239 * and hard thresholds) to increment before the
240 * hardlockup detector generates a warning
241 */
242 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
243 watchdog_update_hrtimer_threshold(sample_period);
244 }
245
246 /* Commands for resetting the watchdog */
247 static void __touch_watchdog(void)
248 {
249 __this_cpu_write(watchdog_touch_ts, get_timestamp());
250 }
251
252 /**
253 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
254 *
255 * Call when the scheduler may have stalled for legitimate reasons
256 * preventing the watchdog task from executing - e.g. the scheduler
257 * entering idle state. This should only be used for scheduler events.
258 * Use touch_softlockup_watchdog() for everything else.
259 */
260 void touch_softlockup_watchdog_sched(void)
261 {
262 /*
263 * Preemption can be enabled. It doesn't matter which CPU's timestamp
264 * gets zeroed here, so use the raw_ operation.
265 */
266 raw_cpu_write(watchdog_touch_ts, 0);
267 }
268
269 void touch_softlockup_watchdog(void)
270 {
271 touch_softlockup_watchdog_sched();
272 wq_watchdog_touch(raw_smp_processor_id());
273 }
274 EXPORT_SYMBOL(touch_softlockup_watchdog);
275
276 void touch_all_softlockup_watchdogs(void)
277 {
278 int cpu;
279
280 /*
281 * this is done lockless
282 * do we care if a 0 races with a timestamp?
283 * all it means is the softlock check starts one cycle later
284 */
285 for_each_watchdog_cpu(cpu)
286 per_cpu(watchdog_touch_ts, cpu) = 0;
287 wq_watchdog_touch(-1);
288 }
289
290 void touch_softlockup_watchdog_sync(void)
291 {
292 __this_cpu_write(softlockup_touch_sync, true);
293 __this_cpu_write(watchdog_touch_ts, 0);
294 }
295
296 static int is_softlockup(unsigned long touch_ts)
297 {
298 unsigned long now = get_timestamp();
299
300 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
301 /* Warn about unreasonable delays. */
302 if (time_after(now, touch_ts + get_softlockup_thresh()))
303 return now - touch_ts;
304 }
305 return 0;
306 }
307
308 /* watchdog detector functions */
309 bool is_hardlockup(void)
310 {
311 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
312
313 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
314 return true;
315
316 __this_cpu_write(hrtimer_interrupts_saved, hrint);
317 return false;
318 }
319
320 static void watchdog_interrupt_count(void)
321 {
322 __this_cpu_inc(hrtimer_interrupts);
323 }
324
325 static int watchdog_enable_all_cpus(void);
326 static void watchdog_disable_all_cpus(void);
327
328 /* watchdog kicker functions */
329 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
330 {
331 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
332 struct pt_regs *regs = get_irq_regs();
333 int duration;
334 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
335
336 if (atomic_read(&watchdog_park_in_progress) != 0)
337 return HRTIMER_NORESTART;
338
339 /* kick the hardlockup detector */
340 watchdog_interrupt_count();
341
342 /* kick the softlockup detector */
343 wake_up_process(__this_cpu_read(softlockup_watchdog));
344
345 /* .. and repeat */
346 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
347
348 if (touch_ts == 0) {
349 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
350 /*
351 * If the time stamp was touched atomically
352 * make sure the scheduler tick is up to date.
353 */
354 __this_cpu_write(softlockup_touch_sync, false);
355 sched_clock_tick();
356 }
357
358 /* Clear the guest paused flag on watchdog reset */
359 kvm_check_and_clear_guest_paused();
360 __touch_watchdog();
361 return HRTIMER_RESTART;
362 }
363
364 /* check for a softlockup
365 * This is done by making sure a high priority task is
366 * being scheduled. The task touches the watchdog to
367 * indicate it is getting cpu time. If it hasn't then
368 * this is a good indication some task is hogging the cpu
369 */
370 duration = is_softlockup(touch_ts);
371 if (unlikely(duration)) {
372 /*
373 * If a virtual machine is stopped by the host it can look to
374 * the watchdog like a soft lockup, check to see if the host
375 * stopped the vm before we issue the warning
376 */
377 if (kvm_check_and_clear_guest_paused())
378 return HRTIMER_RESTART;
379
380 /* only warn once */
381 if (__this_cpu_read(soft_watchdog_warn) == true) {
382 /*
383 * When multiple processes are causing softlockups the
384 * softlockup detector only warns on the first one
385 * because the code relies on a full quiet cycle to
386 * re-arm. The second process prevents the quiet cycle
387 * and never gets reported. Use task pointers to detect
388 * this.
389 */
390 if (__this_cpu_read(softlockup_task_ptr_saved) !=
391 current) {
392 __this_cpu_write(soft_watchdog_warn, false);
393 __touch_watchdog();
394 }
395 return HRTIMER_RESTART;
396 }
397
398 if (softlockup_all_cpu_backtrace) {
399 /* Prevent multiple soft-lockup reports if one cpu is already
400 * engaged in dumping cpu back traces
401 */
402 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
403 /* Someone else will report us. Let's give up */
404 __this_cpu_write(soft_watchdog_warn, true);
405 return HRTIMER_RESTART;
406 }
407 }
408
409 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
410 smp_processor_id(), duration,
411 current->comm, task_pid_nr(current));
412 __this_cpu_write(softlockup_task_ptr_saved, current);
413 print_modules();
414 print_irqtrace_events(current);
415 if (regs)
416 show_regs(regs);
417 else
418 dump_stack();
419
420 if (softlockup_all_cpu_backtrace) {
421 /* Avoid generating two back traces for current
422 * given that one is already made above
423 */
424 trigger_allbutself_cpu_backtrace();
425
426 clear_bit(0, &soft_lockup_nmi_warn);
427 /* Barrier to sync with other cpus */
428 smp_mb__after_atomic();
429 }
430
431 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
432 if (softlockup_panic)
433 panic("softlockup: hung tasks");
434 __this_cpu_write(soft_watchdog_warn, true);
435 } else
436 __this_cpu_write(soft_watchdog_warn, false);
437
438 return HRTIMER_RESTART;
439 }
440
441 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
442 {
443 struct sched_param param = { .sched_priority = prio };
444
445 sched_setscheduler(current, policy, &param);
446 }
447
448 static void watchdog_enable(unsigned int cpu)
449 {
450 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
451
452 /* kick off the timer for the hardlockup detector */
453 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
454 hrtimer->function = watchdog_timer_fn;
455
456 /* Enable the perf event */
457 watchdog_nmi_enable(cpu);
458
459 /* done here because hrtimer_start can only pin to smp_processor_id() */
460 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
461 HRTIMER_MODE_REL_PINNED);
462
463 /* initialize timestamp */
464 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
465 __touch_watchdog();
466 }
467
468 static void watchdog_disable(unsigned int cpu)
469 {
470 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
471
472 watchdog_set_prio(SCHED_NORMAL, 0);
473 hrtimer_cancel(hrtimer);
474 /* disable the perf event */
475 watchdog_nmi_disable(cpu);
476 }
477
478 static void watchdog_cleanup(unsigned int cpu, bool online)
479 {
480 watchdog_disable(cpu);
481 }
482
483 static int watchdog_should_run(unsigned int cpu)
484 {
485 return __this_cpu_read(hrtimer_interrupts) !=
486 __this_cpu_read(soft_lockup_hrtimer_cnt);
487 }
488
489 /*
490 * The watchdog thread function - touches the timestamp.
491 *
492 * It only runs once every sample_period seconds (4 seconds by
493 * default) to reset the softlockup timestamp. If this gets delayed
494 * for more than 2*watchdog_thresh seconds then the debug-printout
495 * triggers in watchdog_timer_fn().
496 */
497 static void watchdog(unsigned int cpu)
498 {
499 __this_cpu_write(soft_lockup_hrtimer_cnt,
500 __this_cpu_read(hrtimer_interrupts));
501 __touch_watchdog();
502
503 /*
504 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
505 * failure path. Check for failures that can occur asynchronously -
506 * for example, when CPUs are on-lined - and shut down the hardware
507 * perf event on each CPU accordingly.
508 *
509 * The only non-obvious place this bit can be cleared is through
510 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
511 * pr_info here would be too noisy as it would result in a message
512 * every few seconds if the hardlockup was disabled but the softlockup
513 * enabled.
514 */
515 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
516 watchdog_nmi_disable(cpu);
517 }
518
519 static struct smp_hotplug_thread watchdog_threads = {
520 .store = &softlockup_watchdog,
521 .thread_should_run = watchdog_should_run,
522 .thread_fn = watchdog,
523 .thread_comm = "watchdog/%u",
524 .setup = watchdog_enable,
525 .cleanup = watchdog_cleanup,
526 .park = watchdog_disable,
527 .unpark = watchdog_enable,
528 };
529
530 /*
531 * park all watchdog threads that are specified in 'watchdog_cpumask'
532 *
533 * This function returns an error if kthread_park() of a watchdog thread
534 * fails. In this situation, the watchdog threads of some CPUs can already
535 * be parked and the watchdog threads of other CPUs can still be runnable.
536 * Callers are expected to handle this special condition as appropriate in
537 * their context.
538 *
539 * This function may only be called in a context that is protected against
540 * races with CPU hotplug - for example, via get_online_cpus().
541 */
542 static int watchdog_park_threads(void)
543 {
544 int cpu, ret = 0;
545
546 atomic_set(&watchdog_park_in_progress, 1);
547
548 for_each_watchdog_cpu(cpu) {
549 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
550 if (ret)
551 break;
552 }
553
554 atomic_set(&watchdog_park_in_progress, 0);
555
556 return ret;
557 }
558
559 /*
560 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
561 *
562 * This function may only be called in a context that is protected against
563 * races with CPU hotplug - for example, via get_online_cpus().
564 */
565 static void watchdog_unpark_threads(void)
566 {
567 int cpu;
568
569 for_each_watchdog_cpu(cpu)
570 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
571 }
572
573 static int update_watchdog_all_cpus(void)
574 {
575 int ret;
576
577 ret = watchdog_park_threads();
578 if (ret)
579 return ret;
580
581 watchdog_unpark_threads();
582
583 return 0;
584 }
585
586 static int watchdog_enable_all_cpus(void)
587 {
588 int err = 0;
589
590 if (!watchdog_running) {
591 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
592 &watchdog_cpumask);
593 if (err)
594 pr_err("Failed to create watchdog threads, disabled\n");
595 else
596 watchdog_running = 1;
597 } else {
598 /*
599 * Enable/disable the lockup detectors or
600 * change the sample period 'on the fly'.
601 */
602 err = update_watchdog_all_cpus();
603
604 if (err) {
605 watchdog_disable_all_cpus();
606 pr_err("Failed to update lockup detectors, disabled\n");
607 }
608 }
609
610 if (err)
611 watchdog_enabled = 0;
612
613 return err;
614 }
615
616 static void watchdog_disable_all_cpus(void)
617 {
618 if (watchdog_running) {
619 watchdog_running = 0;
620 smpboot_unregister_percpu_thread(&watchdog_threads);
621 }
622 }
623
624 #ifdef CONFIG_SYSCTL
625 static int watchdog_update_cpus(void)
626 {
627 return smpboot_update_cpumask_percpu_thread(
628 &watchdog_threads, &watchdog_cpumask);
629 }
630 #endif
631
632 #else /* SOFTLOCKUP */
633 static int watchdog_park_threads(void)
634 {
635 return 0;
636 }
637
638 static void watchdog_unpark_threads(void)
639 {
640 }
641
642 static int watchdog_enable_all_cpus(void)
643 {
644 return 0;
645 }
646
647 static void watchdog_disable_all_cpus(void)
648 {
649 }
650
651 #ifdef CONFIG_SYSCTL
652 static int watchdog_update_cpus(void)
653 {
654 return 0;
655 }
656 #endif
657
658 static void set_sample_period(void)
659 {
660 }
661 #endif /* SOFTLOCKUP */
662
663 /*
664 * Suspend the hard and soft lockup detector by parking the watchdog threads.
665 */
666 int lockup_detector_suspend(void)
667 {
668 int ret = 0;
669
670 get_online_cpus();
671 mutex_lock(&watchdog_proc_mutex);
672 /*
673 * Multiple suspend requests can be active in parallel (counted by
674 * the 'watchdog_suspended' variable). If the watchdog threads are
675 * running, the first caller takes care that they will be parked.
676 * The state of 'watchdog_running' cannot change while a suspend
677 * request is active (see related code in 'proc' handlers).
678 */
679 if (watchdog_running && !watchdog_suspended)
680 ret = watchdog_park_threads();
681
682 if (ret == 0)
683 watchdog_suspended++;
684 else {
685 watchdog_disable_all_cpus();
686 pr_err("Failed to suspend lockup detectors, disabled\n");
687 watchdog_enabled = 0;
688 }
689
690 watchdog_nmi_reconfigure();
691
692 mutex_unlock(&watchdog_proc_mutex);
693
694 return ret;
695 }
696
697 /*
698 * Resume the hard and soft lockup detector by unparking the watchdog threads.
699 */
700 void lockup_detector_resume(void)
701 {
702 mutex_lock(&watchdog_proc_mutex);
703
704 watchdog_suspended--;
705 /*
706 * The watchdog threads are unparked if they were previously running
707 * and if there is no more active suspend request.
708 */
709 if (watchdog_running && !watchdog_suspended)
710 watchdog_unpark_threads();
711
712 watchdog_nmi_reconfigure();
713
714 mutex_unlock(&watchdog_proc_mutex);
715 put_online_cpus();
716 }
717
718 #ifdef CONFIG_SYSCTL
719
720 /*
721 * Update the run state of the lockup detectors.
722 */
723 static int proc_watchdog_update(void)
724 {
725 int err = 0;
726
727 /*
728 * Watchdog threads won't be started if they are already active.
729 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
730 * care of this. If those threads are already active, the sample
731 * period will be updated and the lockup detectors will be enabled
732 * or disabled 'on the fly'.
733 */
734 if (watchdog_enabled && watchdog_thresh)
735 err = watchdog_enable_all_cpus();
736 else
737 watchdog_disable_all_cpus();
738
739 watchdog_nmi_reconfigure();
740
741 return err;
742
743 }
744
745 /*
746 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
747 *
748 * caller | table->data points to | 'which' contains the flag(s)
749 * -------------------|-----------------------|-----------------------------
750 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
751 * | | with SOFT_WATCHDOG_ENABLED
752 * -------------------|-----------------------|-----------------------------
753 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
754 * -------------------|-----------------------|-----------------------------
755 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
756 */
757 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
758 void __user *buffer, size_t *lenp, loff_t *ppos)
759 {
760 int err, old, new;
761 int *watchdog_param = (int *)table->data;
762
763 get_online_cpus();
764 mutex_lock(&watchdog_proc_mutex);
765
766 if (watchdog_suspended) {
767 /* no parameter changes allowed while watchdog is suspended */
768 err = -EAGAIN;
769 goto out;
770 }
771
772 /*
773 * If the parameter is being read return the state of the corresponding
774 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
775 * run state of the lockup detectors.
776 */
777 if (!write) {
778 *watchdog_param = (watchdog_enabled & which) != 0;
779 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
780 } else {
781 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
782 if (err)
783 goto out;
784
785 /*
786 * There is a race window between fetching the current value
787 * from 'watchdog_enabled' and storing the new value. During
788 * this race window, watchdog_nmi_enable() can sneak in and
789 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
790 * The 'cmpxchg' detects this race and the loop retries.
791 */
792 do {
793 old = watchdog_enabled;
794 /*
795 * If the parameter value is not zero set the
796 * corresponding bit(s), else clear it(them).
797 */
798 if (*watchdog_param)
799 new = old | which;
800 else
801 new = old & ~which;
802 } while (cmpxchg(&watchdog_enabled, old, new) != old);
803
804 /*
805 * Update the run state of the lockup detectors. There is _no_
806 * need to check the value returned by proc_watchdog_update()
807 * and to restore the previous value of 'watchdog_enabled' as
808 * both lockup detectors are disabled if proc_watchdog_update()
809 * returns an error.
810 */
811 if (old == new)
812 goto out;
813
814 err = proc_watchdog_update();
815 }
816 out:
817 mutex_unlock(&watchdog_proc_mutex);
818 put_online_cpus();
819 return err;
820 }
821
822 /*
823 * /proc/sys/kernel/watchdog
824 */
825 int proc_watchdog(struct ctl_table *table, int write,
826 void __user *buffer, size_t *lenp, loff_t *ppos)
827 {
828 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
829 table, write, buffer, lenp, ppos);
830 }
831
832 /*
833 * /proc/sys/kernel/nmi_watchdog
834 */
835 int proc_nmi_watchdog(struct ctl_table *table, int write,
836 void __user *buffer, size_t *lenp, loff_t *ppos)
837 {
838 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
839 table, write, buffer, lenp, ppos);
840 }
841
842 /*
843 * /proc/sys/kernel/soft_watchdog
844 */
845 int proc_soft_watchdog(struct ctl_table *table, int write,
846 void __user *buffer, size_t *lenp, loff_t *ppos)
847 {
848 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
849 table, write, buffer, lenp, ppos);
850 }
851
852 /*
853 * /proc/sys/kernel/watchdog_thresh
854 */
855 int proc_watchdog_thresh(struct ctl_table *table, int write,
856 void __user *buffer, size_t *lenp, loff_t *ppos)
857 {
858 int err, old, new;
859
860 get_online_cpus();
861 mutex_lock(&watchdog_proc_mutex);
862
863 if (watchdog_suspended) {
864 /* no parameter changes allowed while watchdog is suspended */
865 err = -EAGAIN;
866 goto out;
867 }
868
869 old = ACCESS_ONCE(watchdog_thresh);
870 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
871
872 if (err || !write)
873 goto out;
874
875 /*
876 * Update the sample period. Restore on failure.
877 */
878 new = ACCESS_ONCE(watchdog_thresh);
879 if (old == new)
880 goto out;
881
882 set_sample_period();
883 err = proc_watchdog_update();
884 if (err) {
885 watchdog_thresh = old;
886 set_sample_period();
887 }
888 out:
889 mutex_unlock(&watchdog_proc_mutex);
890 put_online_cpus();
891 return err;
892 }
893
894 /*
895 * The cpumask is the mask of possible cpus that the watchdog can run
896 * on, not the mask of cpus it is actually running on. This allows the
897 * user to specify a mask that will include cpus that have not yet
898 * been brought online, if desired.
899 */
900 int proc_watchdog_cpumask(struct ctl_table *table, int write,
901 void __user *buffer, size_t *lenp, loff_t *ppos)
902 {
903 int err;
904
905 get_online_cpus();
906 mutex_lock(&watchdog_proc_mutex);
907
908 if (watchdog_suspended) {
909 /* no parameter changes allowed while watchdog is suspended */
910 err = -EAGAIN;
911 goto out;
912 }
913
914 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
915 if (!err && write) {
916 /* Remove impossible cpus to keep sysctl output cleaner. */
917 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
918 cpu_possible_mask);
919
920 if (watchdog_running) {
921 /*
922 * Failure would be due to being unable to allocate
923 * a temporary cpumask, so we are likely not in a
924 * position to do much else to make things better.
925 */
926 if (watchdog_update_cpus() != 0)
927 pr_err("cpumask update failed\n");
928 }
929
930 watchdog_nmi_reconfigure();
931 }
932 out:
933 mutex_unlock(&watchdog_proc_mutex);
934 put_online_cpus();
935 return err;
936 }
937
938 #endif /* CONFIG_SYSCTL */
939
940 void __init lockup_detector_init(void)
941 {
942 set_sample_period();
943
944 #ifdef CONFIG_NO_HZ_FULL
945 if (tick_nohz_full_enabled()) {
946 pr_info("Disabling watchdog on nohz_full cores by default\n");
947 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
948 } else
949 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
950 #else
951 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
952 #endif
953
954 if (watchdog_enabled)
955 watchdog_enable_all_cpus();
956 }