]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/watchdog.c
sched/headers: Prepare for new header dependencies before moving code to <uapi/linux...
[mirror_ubuntu-artful-kernel.git] / kernel / watchdog.c
CommitLineData
58687acb
DZ
1/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
86f5e6a7
FLVC
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
58687acb
DZ
9 * to those contributors as well.
10 */
11
4501980a
AM
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
58687acb
DZ
14#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
58687acb
DZ
18#include <linux/module.h>
19#include <linux/sysctl.h>
bcd951cf 20#include <linux/smpboot.h>
8bd75c77 21#include <linux/sched/rt.h>
ae7e81c0 22#include <uapi/linux/sched/types.h>
fe4ba3c3 23#include <linux/tick.h>
82607adc 24#include <linux/workqueue.h>
e6017571 25#include <linux/sched/clock.h>
58687acb
DZ
26
27#include <asm/irq_regs.h>
5d1c0f4a 28#include <linux/kvm_para.h>
81a4beef 29#include <linux/kthread.h>
58687acb 30
ab992dc3
PZ
31static DEFINE_MUTEX(watchdog_proc_mutex);
32
249e52e3
BM
33#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
34unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
84d56e66 35#else
249e52e3 36unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
84d56e66
UO
37#endif
38int __read_mostly nmi_watchdog_enabled;
39int __read_mostly soft_watchdog_enabled;
40int __read_mostly watchdog_user_enabled;
4eec42f3 41int __read_mostly watchdog_thresh = 10;
84d56e66 42
ed235875
AT
43#ifdef CONFIG_SMP
44int __read_mostly sysctl_softlockup_all_cpu_backtrace;
55537871 45int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
ed235875 46#endif
fe4ba3c3
CM
47static struct cpumask watchdog_cpumask __read_mostly;
48unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
49
50/* Helper for online, unparked cpus. */
51#define for_each_watchdog_cpu(cpu) \
52 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
ed235875 53
b94f5118
DZ
54atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
55
ec6a9066
UO
56/*
57 * The 'watchdog_running' variable is set to 1 when the watchdog threads
58 * are registered/started and is set to 0 when the watchdog threads are
59 * unregistered/stopped, so it is an indicator whether the threads exist.
60 */
3c00ea82 61static int __read_mostly watchdog_running;
ec6a9066
UO
62/*
63 * If a subsystem has a need to deactivate the watchdog temporarily, it
64 * can use the suspend/resume interface to achieve this. The content of
65 * the 'watchdog_suspended' variable reflects this state. Existing threads
66 * are parked/unparked by the lockup_detector_{suspend|resume} functions
67 * (see comment blocks pertaining to those functions for further details).
68 *
69 * 'watchdog_suspended' also prevents threads from being registered/started
70 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
71 * of 'watchdog_running' cannot change while the watchdog is deactivated
72 * temporarily (see related code in 'proc' handlers).
73 */
74static int __read_mostly watchdog_suspended;
75
0f34c400 76static u64 __read_mostly sample_period;
58687acb
DZ
77
78static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
79static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
80static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
81static DEFINE_PER_CPU(bool, softlockup_touch_sync);
58687acb 82static DEFINE_PER_CPU(bool, soft_watchdog_warn);
bcd951cf
TG
83static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
84static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
b1a8de1f 85static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
58687acb 86static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
ed235875 87static unsigned long soft_lockup_nmi_warn;
58687acb 88
58687acb
DZ
89unsigned int __read_mostly softlockup_panic =
90 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
91
92static int __init softlockup_panic_setup(char *str)
93{
94 softlockup_panic = simple_strtoul(str, NULL, 0);
95
96 return 1;
97}
98__setup("softlockup_panic=", softlockup_panic_setup);
99
100static int __init nowatchdog_setup(char *str)
101{
195daf66 102 watchdog_enabled = 0;
58687acb
DZ
103 return 1;
104}
105__setup("nowatchdog", nowatchdog_setup);
106
58687acb
DZ
107static int __init nosoftlockup_setup(char *str)
108{
195daf66 109 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
58687acb
DZ
110 return 1;
111}
112__setup("nosoftlockup", nosoftlockup_setup);
195daf66 113
ed235875
AT
114#ifdef CONFIG_SMP
115static int __init softlockup_all_cpu_backtrace_setup(char *str)
116{
117 sysctl_softlockup_all_cpu_backtrace =
118 !!simple_strtol(str, NULL, 0);
119 return 1;
120}
121__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
55537871
JK
122static int __init hardlockup_all_cpu_backtrace_setup(char *str)
123{
124 sysctl_hardlockup_all_cpu_backtrace =
125 !!simple_strtol(str, NULL, 0);
126 return 1;
127}
128__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
ed235875 129#endif
58687acb 130
4eec42f3
MSB
131/*
132 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
133 * lockups can have false positives under extreme conditions. So we generally
134 * want a higher threshold for soft lockups than for hard lockups. So we couple
135 * the thresholds with a factor: we make the soft threshold twice the amount of
136 * time the hard threshold is.
137 */
6e9101ae 138static int get_softlockup_thresh(void)
4eec42f3
MSB
139{
140 return watchdog_thresh * 2;
141}
58687acb
DZ
142
143/*
144 * Returns seconds, approximately. We don't need nanosecond
145 * resolution, and we don't need to waste time with a big divide when
146 * 2^30ns == 1.074s.
147 */
c06b4f19 148static unsigned long get_timestamp(void)
58687acb 149{
545a2bf7 150 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
58687acb
DZ
151}
152
0f34c400 153static void set_sample_period(void)
58687acb
DZ
154{
155 /*
586692a5 156 * convert watchdog_thresh from seconds to ns
86f5e6a7
FLVC
157 * the divide by 5 is to give hrtimer several chances (two
158 * or three with the current relation between the soft
159 * and hard thresholds) to increment before the
160 * hardlockup detector generates a warning
58687acb 161 */
0f34c400 162 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
58687acb
DZ
163}
164
165/* Commands for resetting the watchdog */
166static void __touch_watchdog(void)
167{
c06b4f19 168 __this_cpu_write(watchdog_touch_ts, get_timestamp());
58687acb
DZ
169}
170
03e0d461
TH
171/**
172 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
173 *
174 * Call when the scheduler may have stalled for legitimate reasons
175 * preventing the watchdog task from executing - e.g. the scheduler
176 * entering idle state. This should only be used for scheduler events.
177 * Use touch_softlockup_watchdog() for everything else.
178 */
179void touch_softlockup_watchdog_sched(void)
58687acb 180{
7861144b
AM
181 /*
182 * Preemption can be enabled. It doesn't matter which CPU's timestamp
183 * gets zeroed here, so use the raw_ operation.
184 */
185 raw_cpu_write(watchdog_touch_ts, 0);
58687acb 186}
03e0d461
TH
187
188void touch_softlockup_watchdog(void)
189{
190 touch_softlockup_watchdog_sched();
82607adc 191 wq_watchdog_touch(raw_smp_processor_id());
03e0d461 192}
0167c781 193EXPORT_SYMBOL(touch_softlockup_watchdog);
58687acb 194
332fbdbc 195void touch_all_softlockup_watchdogs(void)
58687acb
DZ
196{
197 int cpu;
198
199 /*
200 * this is done lockless
201 * do we care if a 0 races with a timestamp?
202 * all it means is the softlock check starts one cycle later
203 */
fe4ba3c3 204 for_each_watchdog_cpu(cpu)
58687acb 205 per_cpu(watchdog_touch_ts, cpu) = 0;
82607adc 206 wq_watchdog_touch(-1);
58687acb
DZ
207}
208
58687acb
DZ
209void touch_softlockup_watchdog_sync(void)
210{
f7f66b05
CL
211 __this_cpu_write(softlockup_touch_sync, true);
212 __this_cpu_write(watchdog_touch_ts, 0);
58687acb
DZ
213}
214
58687acb 215/* watchdog detector functions */
249e52e3 216bool is_hardlockup(void)
58687acb 217{
909ea964 218 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
58687acb 219
909ea964 220 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
451637e4 221 return true;
58687acb 222
909ea964 223 __this_cpu_write(hrtimer_interrupts_saved, hrint);
451637e4 224 return false;
58687acb 225}
58687acb 226
26e09c6e 227static int is_softlockup(unsigned long touch_ts)
58687acb 228{
c06b4f19 229 unsigned long now = get_timestamp();
58687acb 230
39d2da21 231 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
195daf66
UO
232 /* Warn about unreasonable delays. */
233 if (time_after(now, touch_ts + get_softlockup_thresh()))
234 return now - touch_ts;
235 }
58687acb
DZ
236 return 0;
237}
238
58687acb
DZ
239static void watchdog_interrupt_count(void)
240{
909ea964 241 __this_cpu_inc(hrtimer_interrupts);
58687acb 242}
bcd951cf 243
73ce0511
BM
244/*
245 * These two functions are mostly architecture specific
246 * defining them as weak here.
247 */
248int __weak watchdog_nmi_enable(unsigned int cpu)
249{
250 return 0;
251}
252void __weak watchdog_nmi_disable(unsigned int cpu)
253{
254}
58687acb 255
58cf690a
UO
256static int watchdog_enable_all_cpus(void);
257static void watchdog_disable_all_cpus(void);
258
58687acb
DZ
259/* watchdog kicker functions */
260static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
261{
909ea964 262 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
58687acb
DZ
263 struct pt_regs *regs = get_irq_regs();
264 int duration;
ed235875 265 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
58687acb 266
b94f5118
DZ
267 if (atomic_read(&watchdog_park_in_progress) != 0)
268 return HRTIMER_NORESTART;
269
58687acb
DZ
270 /* kick the hardlockup detector */
271 watchdog_interrupt_count();
272
273 /* kick the softlockup detector */
909ea964 274 wake_up_process(__this_cpu_read(softlockup_watchdog));
58687acb
DZ
275
276 /* .. and repeat */
0f34c400 277 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
58687acb
DZ
278
279 if (touch_ts == 0) {
909ea964 280 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
58687acb
DZ
281 /*
282 * If the time stamp was touched atomically
283 * make sure the scheduler tick is up to date.
284 */
909ea964 285 __this_cpu_write(softlockup_touch_sync, false);
58687acb
DZ
286 sched_clock_tick();
287 }
5d1c0f4a
EM
288
289 /* Clear the guest paused flag on watchdog reset */
290 kvm_check_and_clear_guest_paused();
58687acb
DZ
291 __touch_watchdog();
292 return HRTIMER_RESTART;
293 }
294
295 /* check for a softlockup
296 * This is done by making sure a high priority task is
297 * being scheduled. The task touches the watchdog to
298 * indicate it is getting cpu time. If it hasn't then
299 * this is a good indication some task is hogging the cpu
300 */
26e09c6e 301 duration = is_softlockup(touch_ts);
58687acb 302 if (unlikely(duration)) {
5d1c0f4a
EM
303 /*
304 * If a virtual machine is stopped by the host it can look to
305 * the watchdog like a soft lockup, check to see if the host
306 * stopped the vm before we issue the warning
307 */
308 if (kvm_check_and_clear_guest_paused())
309 return HRTIMER_RESTART;
310
58687acb 311 /* only warn once */
b1a8de1f 312 if (__this_cpu_read(soft_watchdog_warn) == true) {
313 /*
314 * When multiple processes are causing softlockups the
315 * softlockup detector only warns on the first one
316 * because the code relies on a full quiet cycle to
317 * re-arm. The second process prevents the quiet cycle
318 * and never gets reported. Use task pointers to detect
319 * this.
320 */
321 if (__this_cpu_read(softlockup_task_ptr_saved) !=
322 current) {
323 __this_cpu_write(soft_watchdog_warn, false);
324 __touch_watchdog();
325 }
58687acb 326 return HRTIMER_RESTART;
b1a8de1f 327 }
58687acb 328
ed235875
AT
329 if (softlockup_all_cpu_backtrace) {
330 /* Prevent multiple soft-lockup reports if one cpu is already
331 * engaged in dumping cpu back traces
332 */
333 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
334 /* Someone else will report us. Let's give up */
335 __this_cpu_write(soft_watchdog_warn, true);
336 return HRTIMER_RESTART;
337 }
338 }
339
656c3b79 340 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
26e09c6e 341 smp_processor_id(), duration,
58687acb 342 current->comm, task_pid_nr(current));
b1a8de1f 343 __this_cpu_write(softlockup_task_ptr_saved, current);
58687acb
DZ
344 print_modules();
345 print_irqtrace_events(current);
346 if (regs)
347 show_regs(regs);
348 else
349 dump_stack();
350
ed235875
AT
351 if (softlockup_all_cpu_backtrace) {
352 /* Avoid generating two back traces for current
353 * given that one is already made above
354 */
355 trigger_allbutself_cpu_backtrace();
356
357 clear_bit(0, &soft_lockup_nmi_warn);
358 /* Barrier to sync with other cpus */
359 smp_mb__after_atomic();
360 }
361
69361eef 362 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
58687acb
DZ
363 if (softlockup_panic)
364 panic("softlockup: hung tasks");
909ea964 365 __this_cpu_write(soft_watchdog_warn, true);
58687acb 366 } else
909ea964 367 __this_cpu_write(soft_watchdog_warn, false);
58687acb
DZ
368
369 return HRTIMER_RESTART;
370}
371
bcd951cf
TG
372static void watchdog_set_prio(unsigned int policy, unsigned int prio)
373{
374 struct sched_param param = { .sched_priority = prio };
58687acb 375
bcd951cf
TG
376 sched_setscheduler(current, policy, &param);
377}
378
379static void watchdog_enable(unsigned int cpu)
58687acb 380{
f7f66b05 381 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
58687acb 382
3935e895
BM
383 /* kick off the timer for the hardlockup detector */
384 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
385 hrtimer->function = watchdog_timer_fn;
386
bcd951cf
TG
387 /* Enable the perf event */
388 watchdog_nmi_enable(cpu);
58687acb 389
58687acb 390 /* done here because hrtimer_start can only pin to smp_processor_id() */
0f34c400 391 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
58687acb
DZ
392 HRTIMER_MODE_REL_PINNED);
393
bcd951cf
TG
394 /* initialize timestamp */
395 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
396 __touch_watchdog();
397}
58687acb 398
bcd951cf
TG
399static void watchdog_disable(unsigned int cpu)
400{
f7f66b05 401 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
58687acb 402
bcd951cf
TG
403 watchdog_set_prio(SCHED_NORMAL, 0);
404 hrtimer_cancel(hrtimer);
405 /* disable the perf event */
406 watchdog_nmi_disable(cpu);
58687acb
DZ
407}
408
b8900bc0
FW
409static void watchdog_cleanup(unsigned int cpu, bool online)
410{
411 watchdog_disable(cpu);
412}
413
bcd951cf
TG
414static int watchdog_should_run(unsigned int cpu)
415{
416 return __this_cpu_read(hrtimer_interrupts) !=
417 __this_cpu_read(soft_lockup_hrtimer_cnt);
418}
419
420/*
421 * The watchdog thread function - touches the timestamp.
422 *
0f34c400 423 * It only runs once every sample_period seconds (4 seconds by
bcd951cf
TG
424 * default) to reset the softlockup timestamp. If this gets delayed
425 * for more than 2*watchdog_thresh seconds then the debug-printout
426 * triggers in watchdog_timer_fn().
427 */
428static void watchdog(unsigned int cpu)
429{
430 __this_cpu_write(soft_lockup_hrtimer_cnt,
431 __this_cpu_read(hrtimer_interrupts));
432 __touch_watchdog();
bcfba4f4
UO
433
434 /*
435 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
436 * failure path. Check for failures that can occur asynchronously -
437 * for example, when CPUs are on-lined - and shut down the hardware
438 * perf event on each CPU accordingly.
439 *
440 * The only non-obvious place this bit can be cleared is through
441 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
442 * pr_info here would be too noisy as it would result in a message
443 * every few seconds if the hardlockup was disabled but the softlockup
444 * enabled.
445 */
446 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
447 watchdog_nmi_disable(cpu);
bcd951cf 448}
58687acb 449
b8900bc0
FW
450static struct smp_hotplug_thread watchdog_threads = {
451 .store = &softlockup_watchdog,
452 .thread_should_run = watchdog_should_run,
453 .thread_fn = watchdog,
454 .thread_comm = "watchdog/%u",
455 .setup = watchdog_enable,
456 .cleanup = watchdog_cleanup,
457 .park = watchdog_disable,
458 .unpark = watchdog_enable,
459};
460
81a4beef
UO
461/*
462 * park all watchdog threads that are specified in 'watchdog_cpumask'
ee7fed54
UO
463 *
464 * This function returns an error if kthread_park() of a watchdog thread
465 * fails. In this situation, the watchdog threads of some CPUs can already
466 * be parked and the watchdog threads of other CPUs can still be runnable.
467 * Callers are expected to handle this special condition as appropriate in
468 * their context.
a2a45b85
UO
469 *
470 * This function may only be called in a context that is protected against
471 * races with CPU hotplug - for example, via get_online_cpus().
81a4beef
UO
472 */
473static int watchdog_park_threads(void)
474{
475 int cpu, ret = 0;
476
b94f5118
DZ
477 atomic_set(&watchdog_park_in_progress, 1);
478
81a4beef
UO
479 for_each_watchdog_cpu(cpu) {
480 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
481 if (ret)
482 break;
483 }
81a4beef 484
b94f5118
DZ
485 atomic_set(&watchdog_park_in_progress, 0);
486
81a4beef
UO
487 return ret;
488}
489
490/*
491 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
a2a45b85
UO
492 *
493 * This function may only be called in a context that is protected against
494 * races with CPU hotplug - for example, via get_online_cpus().
81a4beef
UO
495 */
496static void watchdog_unpark_threads(void)
497{
498 int cpu;
499
81a4beef
UO
500 for_each_watchdog_cpu(cpu)
501 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
81a4beef
UO
502}
503
8c073d27
UO
504/*
505 * Suspend the hard and soft lockup detector by parking the watchdog threads.
506 */
ec6a9066 507int lockup_detector_suspend(void)
8c073d27
UO
508{
509 int ret = 0;
510
ee89e71e 511 get_online_cpus();
8c073d27
UO
512 mutex_lock(&watchdog_proc_mutex);
513 /*
514 * Multiple suspend requests can be active in parallel (counted by
515 * the 'watchdog_suspended' variable). If the watchdog threads are
516 * running, the first caller takes care that they will be parked.
517 * The state of 'watchdog_running' cannot change while a suspend
ec6a9066 518 * request is active (see related code in 'proc' handlers).
8c073d27
UO
519 */
520 if (watchdog_running && !watchdog_suspended)
521 ret = watchdog_park_threads();
522
523 if (ret == 0)
524 watchdog_suspended++;
c993590c
UO
525 else {
526 watchdog_disable_all_cpus();
527 pr_err("Failed to suspend lockup detectors, disabled\n");
528 watchdog_enabled = 0;
529 }
8c073d27
UO
530
531 mutex_unlock(&watchdog_proc_mutex);
532
533 return ret;
534}
535
536/*
537 * Resume the hard and soft lockup detector by unparking the watchdog threads.
538 */
ec6a9066 539void lockup_detector_resume(void)
8c073d27
UO
540{
541 mutex_lock(&watchdog_proc_mutex);
542
543 watchdog_suspended--;
544 /*
545 * The watchdog threads are unparked if they were previously running
546 * and if there is no more active suspend request.
547 */
548 if (watchdog_running && !watchdog_suspended)
549 watchdog_unpark_threads();
550
551 mutex_unlock(&watchdog_proc_mutex);
ee89e71e 552 put_online_cpus();
8c073d27
UO
553}
554
b43cb43c 555static int update_watchdog_all_cpus(void)
9809b18f 556{
b43cb43c
UO
557 int ret;
558
559 ret = watchdog_park_threads();
560 if (ret)
561 return ret;
562
d4bdd0b2 563 watchdog_unpark_threads();
b43cb43c
UO
564
565 return 0;
9809b18f
MH
566}
567
b2f57c3a 568static int watchdog_enable_all_cpus(void)
58687acb 569{
b8900bc0 570 int err = 0;
58687acb 571
3c00ea82 572 if (!watchdog_running) {
230ec939
FW
573 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
574 &watchdog_cpumask);
b8900bc0
FW
575 if (err)
576 pr_err("Failed to create watchdog threads, disabled\n");
230ec939 577 else
3c00ea82 578 watchdog_running = 1;
b2f57c3a
UO
579 } else {
580 /*
581 * Enable/disable the lockup detectors or
582 * change the sample period 'on the fly'.
583 */
b43cb43c
UO
584 err = update_watchdog_all_cpus();
585
586 if (err) {
587 watchdog_disable_all_cpus();
588 pr_err("Failed to update lockup detectors, disabled\n");
589 }
bcd951cf 590 }
b8900bc0 591
b43cb43c
UO
592 if (err)
593 watchdog_enabled = 0;
594
b8900bc0 595 return err;
58687acb
DZ
596}
597
598static void watchdog_disable_all_cpus(void)
599{
3c00ea82
FW
600 if (watchdog_running) {
601 watchdog_running = 0;
b8900bc0 602 smpboot_unregister_percpu_thread(&watchdog_threads);
bcd951cf 603 }
58687acb
DZ
604}
605
58cf690a
UO
606#ifdef CONFIG_SYSCTL
607
58687acb 608/*
a0c9cbb9
UO
609 * Update the run state of the lockup detectors.
610 */
611static int proc_watchdog_update(void)
612{
613 int err = 0;
614
615 /*
616 * Watchdog threads won't be started if they are already active.
617 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
618 * care of this. If those threads are already active, the sample
619 * period will be updated and the lockup detectors will be enabled
620 * or disabled 'on the fly'.
621 */
622 if (watchdog_enabled && watchdog_thresh)
b2f57c3a 623 err = watchdog_enable_all_cpus();
a0c9cbb9
UO
624 else
625 watchdog_disable_all_cpus();
626
627 return err;
628
629}
630
ef246a21
UO
631/*
632 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
633 *
634 * caller | table->data points to | 'which' contains the flag(s)
635 * -------------------|-----------------------|-----------------------------
636 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
637 * | | with SOFT_WATCHDOG_ENABLED
638 * -------------------|-----------------------|-----------------------------
639 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
640 * -------------------|-----------------------|-----------------------------
641 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
642 */
643static int proc_watchdog_common(int which, struct ctl_table *table, int write,
644 void __user *buffer, size_t *lenp, loff_t *ppos)
645{
646 int err, old, new;
647 int *watchdog_param = (int *)table->data;
648
8614ddef 649 get_online_cpus();
ef246a21
UO
650 mutex_lock(&watchdog_proc_mutex);
651
8c073d27
UO
652 if (watchdog_suspended) {
653 /* no parameter changes allowed while watchdog is suspended */
654 err = -EAGAIN;
655 goto out;
656 }
657
ef246a21
UO
658 /*
659 * If the parameter is being read return the state of the corresponding
660 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
661 * run state of the lockup detectors.
662 */
663 if (!write) {
664 *watchdog_param = (watchdog_enabled & which) != 0;
665 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
666 } else {
667 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
668 if (err)
669 goto out;
670
671 /*
672 * There is a race window between fetching the current value
673 * from 'watchdog_enabled' and storing the new value. During
674 * this race window, watchdog_nmi_enable() can sneak in and
675 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
676 * The 'cmpxchg' detects this race and the loop retries.
677 */
678 do {
679 old = watchdog_enabled;
680 /*
681 * If the parameter value is not zero set the
682 * corresponding bit(s), else clear it(them).
683 */
684 if (*watchdog_param)
685 new = old | which;
686 else
687 new = old & ~which;
688 } while (cmpxchg(&watchdog_enabled, old, new) != old);
689
690 /*
b43cb43c
UO
691 * Update the run state of the lockup detectors. There is _no_
692 * need to check the value returned by proc_watchdog_update()
693 * and to restore the previous value of 'watchdog_enabled' as
694 * both lockup detectors are disabled if proc_watchdog_update()
695 * returns an error.
ef246a21 696 */
a1ee1932
JH
697 if (old == new)
698 goto out;
699
ef246a21 700 err = proc_watchdog_update();
ef246a21
UO
701 }
702out:
703 mutex_unlock(&watchdog_proc_mutex);
8614ddef 704 put_online_cpus();
ef246a21
UO
705 return err;
706}
707
83a80a39
UO
708/*
709 * /proc/sys/kernel/watchdog
710 */
711int proc_watchdog(struct ctl_table *table, int write,
712 void __user *buffer, size_t *lenp, loff_t *ppos)
713{
714 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
715 table, write, buffer, lenp, ppos);
716}
717
718/*
719 * /proc/sys/kernel/nmi_watchdog
58687acb 720 */
83a80a39
UO
721int proc_nmi_watchdog(struct ctl_table *table, int write,
722 void __user *buffer, size_t *lenp, loff_t *ppos)
723{
724 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
725 table, write, buffer, lenp, ppos);
726}
727
728/*
729 * /proc/sys/kernel/soft_watchdog
730 */
731int proc_soft_watchdog(struct ctl_table *table, int write,
732 void __user *buffer, size_t *lenp, loff_t *ppos)
733{
734 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
735 table, write, buffer, lenp, ppos);
736}
58687acb 737
83a80a39
UO
738/*
739 * /proc/sys/kernel/watchdog_thresh
740 */
741int proc_watchdog_thresh(struct ctl_table *table, int write,
742 void __user *buffer, size_t *lenp, loff_t *ppos)
58687acb 743{
a1ee1932 744 int err, old, new;
58687acb 745
8614ddef 746 get_online_cpus();
359e6fab 747 mutex_lock(&watchdog_proc_mutex);
bcd951cf 748
8c073d27
UO
749 if (watchdog_suspended) {
750 /* no parameter changes allowed while watchdog is suspended */
751 err = -EAGAIN;
752 goto out;
753 }
754
83a80a39 755 old = ACCESS_ONCE(watchdog_thresh);
b8900bc0 756 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
83a80a39 757
b8900bc0 758 if (err || !write)
359e6fab 759 goto out;
e04ab2bc 760
b66a2356 761 /*
d283c640 762 * Update the sample period. Restore on failure.
b66a2356 763 */
a1ee1932
JH
764 new = ACCESS_ONCE(watchdog_thresh);
765 if (old == new)
766 goto out;
767
83a80a39
UO
768 set_sample_period();
769 err = proc_watchdog_update();
d283c640 770 if (err) {
83a80a39 771 watchdog_thresh = old;
d283c640
UO
772 set_sample_period();
773 }
359e6fab
MH
774out:
775 mutex_unlock(&watchdog_proc_mutex);
8614ddef 776 put_online_cpus();
b8900bc0 777 return err;
58687acb 778}
fe4ba3c3
CM
779
780/*
781 * The cpumask is the mask of possible cpus that the watchdog can run
782 * on, not the mask of cpus it is actually running on. This allows the
783 * user to specify a mask that will include cpus that have not yet
784 * been brought online, if desired.
785 */
786int proc_watchdog_cpumask(struct ctl_table *table, int write,
787 void __user *buffer, size_t *lenp, loff_t *ppos)
788{
789 int err;
790
8614ddef 791 get_online_cpus();
fe4ba3c3 792 mutex_lock(&watchdog_proc_mutex);
8c073d27
UO
793
794 if (watchdog_suspended) {
795 /* no parameter changes allowed while watchdog is suspended */
796 err = -EAGAIN;
797 goto out;
798 }
799
fe4ba3c3
CM
800 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
801 if (!err && write) {
802 /* Remove impossible cpus to keep sysctl output cleaner. */
803 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
804 cpu_possible_mask);
805
806 if (watchdog_running) {
807 /*
808 * Failure would be due to being unable to allocate
809 * a temporary cpumask, so we are likely not in a
810 * position to do much else to make things better.
811 */
812 if (smpboot_update_cpumask_percpu_thread(
813 &watchdog_threads, &watchdog_cpumask) != 0)
814 pr_err("cpumask update failed\n");
815 }
816 }
8c073d27 817out:
fe4ba3c3 818 mutex_unlock(&watchdog_proc_mutex);
8614ddef 819 put_online_cpus();
fe4ba3c3
CM
820 return err;
821}
822
58687acb
DZ
823#endif /* CONFIG_SYSCTL */
824
004417a6 825void __init lockup_detector_init(void)
58687acb 826{
0f34c400 827 set_sample_period();
b8900bc0 828
fe4ba3c3
CM
829#ifdef CONFIG_NO_HZ_FULL
830 if (tick_nohz_full_enabled()) {
314b08ff
FW
831 pr_info("Disabling watchdog on nohz_full cores by default\n");
832 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
fe4ba3c3
CM
833 } else
834 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
835#else
836 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
837#endif
838
195daf66 839 if (watchdog_enabled)
b2f57c3a 840 watchdog_enable_all_cpus();
58687acb 841}