]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/watchdog.c
watchdog/core: Remove broken suspend/resume interfaces
[mirror_ubuntu-bionic-kernel.git] / kernel / watchdog.c
1 /*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12 #define pr_fmt(fmt) "watchdog: " fmt
13
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
27
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
31
32 /* Watchdog configuration */
33 static DEFINE_MUTEX(watchdog_proc_mutex);
34
35 int __read_mostly nmi_watchdog_enabled;
36
37 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
38 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
39 NMI_WATCHDOG_ENABLED;
40 #else
41 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
42 #endif
43
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
45 /* boot commands */
46 /*
47 * Should we panic when a soft-lockup or hard-lockup occurs:
48 */
49 unsigned int __read_mostly hardlockup_panic =
50 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
51 /*
52 * We may not want to enable hard lockup detection by default in all cases,
53 * for example when running the kernel as a guest on a hypervisor. In these
54 * cases this function can be called to disable hard lockup detection. This
55 * function should only be executed once by the boot processor before the
56 * kernel command line parameters are parsed, because otherwise it is not
57 * possible to override this in hardlockup_panic_setup().
58 */
59 void hardlockup_detector_disable(void)
60 {
61 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
62 }
63
64 static int __init hardlockup_panic_setup(char *str)
65 {
66 if (!strncmp(str, "panic", 5))
67 hardlockup_panic = 1;
68 else if (!strncmp(str, "nopanic", 7))
69 hardlockup_panic = 0;
70 else if (!strncmp(str, "0", 1))
71 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
72 else if (!strncmp(str, "1", 1))
73 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
74 return 1;
75 }
76 __setup("nmi_watchdog=", hardlockup_panic_setup);
77
78 #endif
79
80 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
81 int __read_mostly soft_watchdog_enabled;
82 #endif
83
84 int __read_mostly watchdog_user_enabled;
85 int __read_mostly watchdog_thresh = 10;
86
87 #ifdef CONFIG_SMP
88 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
89 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
90 #endif
91 struct cpumask watchdog_cpumask __read_mostly;
92 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
93
94 /*
95 * The 'watchdog_running' variable is set to 1 when the watchdog threads
96 * are registered/started and is set to 0 when the watchdog threads are
97 * unregistered/stopped, so it is an indicator whether the threads exist.
98 */
99 static int __read_mostly watchdog_running;
100
101 /*
102 * These functions can be overridden if an architecture implements its
103 * own hardlockup detector.
104 *
105 * watchdog_nmi_enable/disable can be implemented to start and stop when
106 * softlockup watchdog threads start and stop. The arch must select the
107 * SOFTLOCKUP_DETECTOR Kconfig.
108 */
109 int __weak watchdog_nmi_enable(unsigned int cpu)
110 {
111 return 0;
112 }
113 void __weak watchdog_nmi_disable(unsigned int cpu)
114 {
115 }
116
117 /*
118 * watchdog_nmi_reconfigure can be implemented to be notified after any
119 * watchdog configuration change. The arch hardlockup watchdog should
120 * respond to the following variables:
121 * - nmi_watchdog_enabled
122 * - watchdog_thresh
123 * - watchdog_cpumask
124 * - sysctl_hardlockup_all_cpu_backtrace
125 * - hardlockup_panic
126 */
127 void __weak watchdog_nmi_reconfigure(void)
128 {
129 }
130
131
132 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
133
134 /* Helper for online, unparked cpus. */
135 #define for_each_watchdog_cpu(cpu) \
136 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
137
138 atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
139
140 static u64 __read_mostly sample_period;
141
142 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
143 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
144 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
145 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
146 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
147 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
148 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
149 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
150 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
151 static unsigned long soft_lockup_nmi_warn;
152
153 unsigned int __read_mostly softlockup_panic =
154 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
155
156 static int __init softlockup_panic_setup(char *str)
157 {
158 softlockup_panic = simple_strtoul(str, NULL, 0);
159
160 return 1;
161 }
162 __setup("softlockup_panic=", softlockup_panic_setup);
163
164 static int __init nowatchdog_setup(char *str)
165 {
166 watchdog_enabled = 0;
167 return 1;
168 }
169 __setup("nowatchdog", nowatchdog_setup);
170
171 static int __init nosoftlockup_setup(char *str)
172 {
173 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
174 return 1;
175 }
176 __setup("nosoftlockup", nosoftlockup_setup);
177
178 #ifdef CONFIG_SMP
179 static int __init softlockup_all_cpu_backtrace_setup(char *str)
180 {
181 sysctl_softlockup_all_cpu_backtrace =
182 !!simple_strtol(str, NULL, 0);
183 return 1;
184 }
185 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
186 #ifdef CONFIG_HARDLOCKUP_DETECTOR
187 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
188 {
189 sysctl_hardlockup_all_cpu_backtrace =
190 !!simple_strtol(str, NULL, 0);
191 return 1;
192 }
193 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
194 #endif
195 #endif
196
197 /*
198 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
199 * lockups can have false positives under extreme conditions. So we generally
200 * want a higher threshold for soft lockups than for hard lockups. So we couple
201 * the thresholds with a factor: we make the soft threshold twice the amount of
202 * time the hard threshold is.
203 */
204 static int get_softlockup_thresh(void)
205 {
206 return watchdog_thresh * 2;
207 }
208
209 /*
210 * Returns seconds, approximately. We don't need nanosecond
211 * resolution, and we don't need to waste time with a big divide when
212 * 2^30ns == 1.074s.
213 */
214 static unsigned long get_timestamp(void)
215 {
216 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
217 }
218
219 static void set_sample_period(void)
220 {
221 /*
222 * convert watchdog_thresh from seconds to ns
223 * the divide by 5 is to give hrtimer several chances (two
224 * or three with the current relation between the soft
225 * and hard thresholds) to increment before the
226 * hardlockup detector generates a warning
227 */
228 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
229 watchdog_update_hrtimer_threshold(sample_period);
230 }
231
232 /* Commands for resetting the watchdog */
233 static void __touch_watchdog(void)
234 {
235 __this_cpu_write(watchdog_touch_ts, get_timestamp());
236 }
237
238 /**
239 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
240 *
241 * Call when the scheduler may have stalled for legitimate reasons
242 * preventing the watchdog task from executing - e.g. the scheduler
243 * entering idle state. This should only be used for scheduler events.
244 * Use touch_softlockup_watchdog() for everything else.
245 */
246 void touch_softlockup_watchdog_sched(void)
247 {
248 /*
249 * Preemption can be enabled. It doesn't matter which CPU's timestamp
250 * gets zeroed here, so use the raw_ operation.
251 */
252 raw_cpu_write(watchdog_touch_ts, 0);
253 }
254
255 void touch_softlockup_watchdog(void)
256 {
257 touch_softlockup_watchdog_sched();
258 wq_watchdog_touch(raw_smp_processor_id());
259 }
260 EXPORT_SYMBOL(touch_softlockup_watchdog);
261
262 void touch_all_softlockup_watchdogs(void)
263 {
264 int cpu;
265
266 /*
267 * this is done lockless
268 * do we care if a 0 races with a timestamp?
269 * all it means is the softlock check starts one cycle later
270 */
271 for_each_watchdog_cpu(cpu)
272 per_cpu(watchdog_touch_ts, cpu) = 0;
273 wq_watchdog_touch(-1);
274 }
275
276 void touch_softlockup_watchdog_sync(void)
277 {
278 __this_cpu_write(softlockup_touch_sync, true);
279 __this_cpu_write(watchdog_touch_ts, 0);
280 }
281
282 static int is_softlockup(unsigned long touch_ts)
283 {
284 unsigned long now = get_timestamp();
285
286 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
287 /* Warn about unreasonable delays. */
288 if (time_after(now, touch_ts + get_softlockup_thresh()))
289 return now - touch_ts;
290 }
291 return 0;
292 }
293
294 /* watchdog detector functions */
295 bool is_hardlockup(void)
296 {
297 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
298
299 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
300 return true;
301
302 __this_cpu_write(hrtimer_interrupts_saved, hrint);
303 return false;
304 }
305
306 static void watchdog_interrupt_count(void)
307 {
308 __this_cpu_inc(hrtimer_interrupts);
309 }
310
311 static int watchdog_enable_all_cpus(void);
312 static void watchdog_disable_all_cpus(void);
313
314 /* watchdog kicker functions */
315 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
316 {
317 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
318 struct pt_regs *regs = get_irq_regs();
319 int duration;
320 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
321
322 if (!watchdog_enabled ||
323 atomic_read(&watchdog_park_in_progress) != 0)
324 return HRTIMER_NORESTART;
325
326 /* kick the hardlockup detector */
327 watchdog_interrupt_count();
328
329 /* kick the softlockup detector */
330 wake_up_process(__this_cpu_read(softlockup_watchdog));
331
332 /* .. and repeat */
333 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
334
335 if (touch_ts == 0) {
336 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
337 /*
338 * If the time stamp was touched atomically
339 * make sure the scheduler tick is up to date.
340 */
341 __this_cpu_write(softlockup_touch_sync, false);
342 sched_clock_tick();
343 }
344
345 /* Clear the guest paused flag on watchdog reset */
346 kvm_check_and_clear_guest_paused();
347 __touch_watchdog();
348 return HRTIMER_RESTART;
349 }
350
351 /* check for a softlockup
352 * This is done by making sure a high priority task is
353 * being scheduled. The task touches the watchdog to
354 * indicate it is getting cpu time. If it hasn't then
355 * this is a good indication some task is hogging the cpu
356 */
357 duration = is_softlockup(touch_ts);
358 if (unlikely(duration)) {
359 /*
360 * If a virtual machine is stopped by the host it can look to
361 * the watchdog like a soft lockup, check to see if the host
362 * stopped the vm before we issue the warning
363 */
364 if (kvm_check_and_clear_guest_paused())
365 return HRTIMER_RESTART;
366
367 /* only warn once */
368 if (__this_cpu_read(soft_watchdog_warn) == true) {
369 /*
370 * When multiple processes are causing softlockups the
371 * softlockup detector only warns on the first one
372 * because the code relies on a full quiet cycle to
373 * re-arm. The second process prevents the quiet cycle
374 * and never gets reported. Use task pointers to detect
375 * this.
376 */
377 if (__this_cpu_read(softlockup_task_ptr_saved) !=
378 current) {
379 __this_cpu_write(soft_watchdog_warn, false);
380 __touch_watchdog();
381 }
382 return HRTIMER_RESTART;
383 }
384
385 if (softlockup_all_cpu_backtrace) {
386 /* Prevent multiple soft-lockup reports if one cpu is already
387 * engaged in dumping cpu back traces
388 */
389 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
390 /* Someone else will report us. Let's give up */
391 __this_cpu_write(soft_watchdog_warn, true);
392 return HRTIMER_RESTART;
393 }
394 }
395
396 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
397 smp_processor_id(), duration,
398 current->comm, task_pid_nr(current));
399 __this_cpu_write(softlockup_task_ptr_saved, current);
400 print_modules();
401 print_irqtrace_events(current);
402 if (regs)
403 show_regs(regs);
404 else
405 dump_stack();
406
407 if (softlockup_all_cpu_backtrace) {
408 /* Avoid generating two back traces for current
409 * given that one is already made above
410 */
411 trigger_allbutself_cpu_backtrace();
412
413 clear_bit(0, &soft_lockup_nmi_warn);
414 /* Barrier to sync with other cpus */
415 smp_mb__after_atomic();
416 }
417
418 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
419 if (softlockup_panic)
420 panic("softlockup: hung tasks");
421 __this_cpu_write(soft_watchdog_warn, true);
422 } else
423 __this_cpu_write(soft_watchdog_warn, false);
424
425 return HRTIMER_RESTART;
426 }
427
428 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
429 {
430 struct sched_param param = { .sched_priority = prio };
431
432 sched_setscheduler(current, policy, &param);
433 }
434
435 static void watchdog_enable(unsigned int cpu)
436 {
437 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
438
439 /* kick off the timer for the hardlockup detector */
440 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
441 hrtimer->function = watchdog_timer_fn;
442
443 /* Enable the perf event */
444 watchdog_nmi_enable(cpu);
445
446 /* done here because hrtimer_start can only pin to smp_processor_id() */
447 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
448 HRTIMER_MODE_REL_PINNED);
449
450 /* initialize timestamp */
451 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
452 __touch_watchdog();
453 }
454
455 static void watchdog_disable(unsigned int cpu)
456 {
457 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
458
459 watchdog_set_prio(SCHED_NORMAL, 0);
460 hrtimer_cancel(hrtimer);
461 /* disable the perf event */
462 watchdog_nmi_disable(cpu);
463 }
464
465 static void watchdog_cleanup(unsigned int cpu, bool online)
466 {
467 watchdog_disable(cpu);
468 }
469
470 static int watchdog_should_run(unsigned int cpu)
471 {
472 return __this_cpu_read(hrtimer_interrupts) !=
473 __this_cpu_read(soft_lockup_hrtimer_cnt);
474 }
475
476 /*
477 * The watchdog thread function - touches the timestamp.
478 *
479 * It only runs once every sample_period seconds (4 seconds by
480 * default) to reset the softlockup timestamp. If this gets delayed
481 * for more than 2*watchdog_thresh seconds then the debug-printout
482 * triggers in watchdog_timer_fn().
483 */
484 static void watchdog(unsigned int cpu)
485 {
486 __this_cpu_write(soft_lockup_hrtimer_cnt,
487 __this_cpu_read(hrtimer_interrupts));
488 __touch_watchdog();
489
490 /*
491 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
492 * failure path. Check for failures that can occur asynchronously -
493 * for example, when CPUs are on-lined - and shut down the hardware
494 * perf event on each CPU accordingly.
495 *
496 * The only non-obvious place this bit can be cleared is through
497 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
498 * pr_info here would be too noisy as it would result in a message
499 * every few seconds if the hardlockup was disabled but the softlockup
500 * enabled.
501 */
502 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
503 watchdog_nmi_disable(cpu);
504 }
505
506 static struct smp_hotplug_thread watchdog_threads = {
507 .store = &softlockup_watchdog,
508 .thread_should_run = watchdog_should_run,
509 .thread_fn = watchdog,
510 .thread_comm = "watchdog/%u",
511 .setup = watchdog_enable,
512 .cleanup = watchdog_cleanup,
513 .park = watchdog_disable,
514 .unpark = watchdog_enable,
515 };
516
517 /*
518 * park all watchdog threads that are specified in 'watchdog_cpumask'
519 *
520 * This function returns an error if kthread_park() of a watchdog thread
521 * fails. In this situation, the watchdog threads of some CPUs can already
522 * be parked and the watchdog threads of other CPUs can still be runnable.
523 * Callers are expected to handle this special condition as appropriate in
524 * their context.
525 *
526 * This function may only be called in a context that is protected against
527 * races with CPU hotplug - for example, via get_online_cpus().
528 */
529 static int watchdog_park_threads(void)
530 {
531 int cpu, ret = 0;
532
533 atomic_set(&watchdog_park_in_progress, 1);
534
535 for_each_watchdog_cpu(cpu) {
536 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
537 if (ret)
538 break;
539 }
540
541 atomic_set(&watchdog_park_in_progress, 0);
542
543 return ret;
544 }
545
546 /*
547 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
548 *
549 * This function may only be called in a context that is protected against
550 * races with CPU hotplug - for example, via get_online_cpus().
551 */
552 static void watchdog_unpark_threads(void)
553 {
554 int cpu;
555
556 for_each_watchdog_cpu(cpu)
557 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
558 }
559
560 static int update_watchdog_all_cpus(void)
561 {
562 int ret;
563
564 ret = watchdog_park_threads();
565 if (ret)
566 return ret;
567
568 watchdog_unpark_threads();
569
570 return 0;
571 }
572
573 static int watchdog_enable_all_cpus(void)
574 {
575 int err = 0;
576
577 if (!watchdog_running) {
578 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
579 &watchdog_cpumask);
580 if (err)
581 pr_err("Failed to create watchdog threads, disabled\n");
582 else
583 watchdog_running = 1;
584 } else {
585 /*
586 * Enable/disable the lockup detectors or
587 * change the sample period 'on the fly'.
588 */
589 err = update_watchdog_all_cpus();
590
591 if (err) {
592 watchdog_disable_all_cpus();
593 pr_err("Failed to update lockup detectors, disabled\n");
594 }
595 }
596
597 if (err)
598 watchdog_enabled = 0;
599
600 return err;
601 }
602
603 static void watchdog_disable_all_cpus(void)
604 {
605 if (watchdog_running) {
606 watchdog_running = 0;
607 smpboot_unregister_percpu_thread(&watchdog_threads);
608 }
609 }
610
611 #ifdef CONFIG_SYSCTL
612 static int watchdog_update_cpus(void)
613 {
614 return smpboot_update_cpumask_percpu_thread(
615 &watchdog_threads, &watchdog_cpumask);
616 }
617 #endif
618
619 #else /* SOFTLOCKUP */
620 static int watchdog_park_threads(void)
621 {
622 return 0;
623 }
624
625 static void watchdog_unpark_threads(void)
626 {
627 }
628
629 static int watchdog_enable_all_cpus(void)
630 {
631 return 0;
632 }
633
634 static void watchdog_disable_all_cpus(void)
635 {
636 }
637
638 #ifdef CONFIG_SYSCTL
639 static int watchdog_update_cpus(void)
640 {
641 return 0;
642 }
643 #endif
644
645 static void set_sample_period(void)
646 {
647 }
648 #endif /* SOFTLOCKUP */
649
650 /**
651 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
652 *
653 * Special interface for parisc. It prevents lockup detector warnings from
654 * the default pm_poweroff() function which busy loops forever.
655 */
656 void lockup_detector_soft_poweroff(void)
657 {
658 watchdog_enabled = 0;
659 }
660
661 #ifdef CONFIG_SYSCTL
662
663 /*
664 * Update the run state of the lockup detectors.
665 */
666 static int proc_watchdog_update(void)
667 {
668 int err = 0;
669
670 /*
671 * Watchdog threads won't be started if they are already active.
672 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
673 * care of this. If those threads are already active, the sample
674 * period will be updated and the lockup detectors will be enabled
675 * or disabled 'on the fly'.
676 */
677 if (watchdog_enabled && watchdog_thresh)
678 err = watchdog_enable_all_cpus();
679 else
680 watchdog_disable_all_cpus();
681
682 watchdog_nmi_reconfigure();
683
684 return err;
685
686 }
687
688 /*
689 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
690 *
691 * caller | table->data points to | 'which' contains the flag(s)
692 * -------------------|-----------------------|-----------------------------
693 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
694 * | | with SOFT_WATCHDOG_ENABLED
695 * -------------------|-----------------------|-----------------------------
696 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
697 * -------------------|-----------------------|-----------------------------
698 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
699 */
700 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
701 void __user *buffer, size_t *lenp, loff_t *ppos)
702 {
703 int err, old, new;
704 int *watchdog_param = (int *)table->data;
705
706 get_online_cpus();
707 mutex_lock(&watchdog_proc_mutex);
708
709 /*
710 * If the parameter is being read return the state of the corresponding
711 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
712 * run state of the lockup detectors.
713 */
714 if (!write) {
715 *watchdog_param = (watchdog_enabled & which) != 0;
716 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
717 } else {
718 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
719 if (err)
720 goto out;
721
722 /*
723 * There is a race window between fetching the current value
724 * from 'watchdog_enabled' and storing the new value. During
725 * this race window, watchdog_nmi_enable() can sneak in and
726 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
727 * The 'cmpxchg' detects this race and the loop retries.
728 */
729 do {
730 old = watchdog_enabled;
731 /*
732 * If the parameter value is not zero set the
733 * corresponding bit(s), else clear it(them).
734 */
735 if (*watchdog_param)
736 new = old | which;
737 else
738 new = old & ~which;
739 } while (cmpxchg(&watchdog_enabled, old, new) != old);
740
741 /*
742 * Update the run state of the lockup detectors. There is _no_
743 * need to check the value returned by proc_watchdog_update()
744 * and to restore the previous value of 'watchdog_enabled' as
745 * both lockup detectors are disabled if proc_watchdog_update()
746 * returns an error.
747 */
748 if (old == new)
749 goto out;
750
751 err = proc_watchdog_update();
752 }
753 out:
754 mutex_unlock(&watchdog_proc_mutex);
755 put_online_cpus();
756 return err;
757 }
758
759 /*
760 * /proc/sys/kernel/watchdog
761 */
762 int proc_watchdog(struct ctl_table *table, int write,
763 void __user *buffer, size_t *lenp, loff_t *ppos)
764 {
765 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
766 table, write, buffer, lenp, ppos);
767 }
768
769 /*
770 * /proc/sys/kernel/nmi_watchdog
771 */
772 int proc_nmi_watchdog(struct ctl_table *table, int write,
773 void __user *buffer, size_t *lenp, loff_t *ppos)
774 {
775 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
776 table, write, buffer, lenp, ppos);
777 }
778
779 /*
780 * /proc/sys/kernel/soft_watchdog
781 */
782 int proc_soft_watchdog(struct ctl_table *table, int write,
783 void __user *buffer, size_t *lenp, loff_t *ppos)
784 {
785 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
786 table, write, buffer, lenp, ppos);
787 }
788
789 /*
790 * /proc/sys/kernel/watchdog_thresh
791 */
792 int proc_watchdog_thresh(struct ctl_table *table, int write,
793 void __user *buffer, size_t *lenp, loff_t *ppos)
794 {
795 int err, old, new;
796
797 get_online_cpus();
798 mutex_lock(&watchdog_proc_mutex);
799
800 old = ACCESS_ONCE(watchdog_thresh);
801 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
802
803 if (err || !write)
804 goto out;
805
806 /*
807 * Update the sample period. Restore on failure.
808 */
809 new = ACCESS_ONCE(watchdog_thresh);
810 if (old == new)
811 goto out;
812
813 set_sample_period();
814 err = proc_watchdog_update();
815 if (err) {
816 watchdog_thresh = old;
817 set_sample_period();
818 }
819 out:
820 mutex_unlock(&watchdog_proc_mutex);
821 put_online_cpus();
822 return err;
823 }
824
825 /*
826 * The cpumask is the mask of possible cpus that the watchdog can run
827 * on, not the mask of cpus it is actually running on. This allows the
828 * user to specify a mask that will include cpus that have not yet
829 * been brought online, if desired.
830 */
831 int proc_watchdog_cpumask(struct ctl_table *table, int write,
832 void __user *buffer, size_t *lenp, loff_t *ppos)
833 {
834 int err;
835
836 get_online_cpus();
837 mutex_lock(&watchdog_proc_mutex);
838
839 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
840 if (!err && write) {
841 /* Remove impossible cpus to keep sysctl output cleaner. */
842 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
843 cpu_possible_mask);
844
845 if (watchdog_running) {
846 /*
847 * Failure would be due to being unable to allocate
848 * a temporary cpumask, so we are likely not in a
849 * position to do much else to make things better.
850 */
851 if (watchdog_update_cpus() != 0)
852 pr_err("cpumask update failed\n");
853 }
854
855 watchdog_nmi_reconfigure();
856 }
857
858 mutex_unlock(&watchdog_proc_mutex);
859 put_online_cpus();
860 return err;
861 }
862
863 #endif /* CONFIG_SYSCTL */
864
865 void __init lockup_detector_init(void)
866 {
867 set_sample_period();
868
869 #ifdef CONFIG_NO_HZ_FULL
870 if (tick_nohz_full_enabled()) {
871 pr_info("Disabling watchdog on nohz_full cores by default\n");
872 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
873 } else
874 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
875 #else
876 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
877 #endif
878
879 if (watchdog_enabled)
880 watchdog_enable_all_cpus();
881 }