2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
32 /* Watchdog configuration */
33 static DEFINE_MUTEX(watchdog_proc_mutex
);
35 int __read_mostly nmi_watchdog_enabled
;
37 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
38 unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
|
41 unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
;
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
47 * Should we panic when a soft-lockup or hard-lockup occurs:
49 unsigned int __read_mostly hardlockup_panic
=
50 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
52 * We may not want to enable hard lockup detection by default in all cases,
53 * for example when running the kernel as a guest on a hypervisor. In these
54 * cases this function can be called to disable hard lockup detection. This
55 * function should only be executed once by the boot processor before the
56 * kernel command line parameters are parsed, because otherwise it is not
57 * possible to override this in hardlockup_panic_setup().
59 void hardlockup_detector_disable(void)
61 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
64 static int __init
hardlockup_panic_setup(char *str
)
66 if (!strncmp(str
, "panic", 5))
68 else if (!strncmp(str
, "nopanic", 7))
70 else if (!strncmp(str
, "0", 1))
71 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
72 else if (!strncmp(str
, "1", 1))
73 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
76 __setup("nmi_watchdog=", hardlockup_panic_setup
);
80 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
81 int __read_mostly soft_watchdog_enabled
;
84 int __read_mostly watchdog_user_enabled
;
85 int __read_mostly watchdog_thresh
= 10;
88 int __read_mostly sysctl_softlockup_all_cpu_backtrace
;
89 int __read_mostly sysctl_hardlockup_all_cpu_backtrace
;
91 struct cpumask watchdog_cpumask __read_mostly
;
92 unsigned long *watchdog_cpumask_bits
= cpumask_bits(&watchdog_cpumask
);
95 * The 'watchdog_running' variable is set to 1 when the watchdog threads
96 * are registered/started and is set to 0 when the watchdog threads are
97 * unregistered/stopped, so it is an indicator whether the threads exist.
99 static int __read_mostly watchdog_running
;
101 * If a subsystem has a need to deactivate the watchdog temporarily, it
102 * can use the suspend/resume interface to achieve this. The content of
103 * the 'watchdog_suspended' variable reflects this state. Existing threads
104 * are parked/unparked by the lockup_detector_{suspend|resume} functions
105 * (see comment blocks pertaining to those functions for further details).
107 * 'watchdog_suspended' also prevents threads from being registered/started
108 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
109 * of 'watchdog_running' cannot change while the watchdog is deactivated
110 * temporarily (see related code in 'proc' handlers).
112 int __read_mostly watchdog_suspended
;
115 * These functions can be overridden if an architecture implements its
116 * own hardlockup detector.
118 * watchdog_nmi_enable/disable can be implemented to start and stop when
119 * softlockup watchdog threads start and stop. The arch must select the
120 * SOFTLOCKUP_DETECTOR Kconfig.
122 int __weak
watchdog_nmi_enable(unsigned int cpu
)
126 void __weak
watchdog_nmi_disable(unsigned int cpu
)
131 * watchdog_nmi_reconfigure can be implemented to be notified after any
132 * watchdog configuration change. The arch hardlockup watchdog should
133 * respond to the following variables:
134 * - nmi_watchdog_enabled
137 * - sysctl_hardlockup_all_cpu_backtrace
139 * - watchdog_suspended
141 void __weak
watchdog_nmi_reconfigure(void)
146 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
148 /* Helper for online, unparked cpus. */
149 #define for_each_watchdog_cpu(cpu) \
150 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
152 atomic_t watchdog_park_in_progress
= ATOMIC_INIT(0);
154 static u64 __read_mostly sample_period
;
156 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
157 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
158 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
159 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
160 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
161 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
162 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt
);
163 static DEFINE_PER_CPU(struct task_struct
*, softlockup_task_ptr_saved
);
164 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
165 static unsigned long soft_lockup_nmi_warn
;
167 unsigned int __read_mostly softlockup_panic
=
168 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
170 static int __init
softlockup_panic_setup(char *str
)
172 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
176 __setup("softlockup_panic=", softlockup_panic_setup
);
178 static int __init
nowatchdog_setup(char *str
)
180 watchdog_enabled
= 0;
183 __setup("nowatchdog", nowatchdog_setup
);
185 static int __init
nosoftlockup_setup(char *str
)
187 watchdog_enabled
&= ~SOFT_WATCHDOG_ENABLED
;
190 __setup("nosoftlockup", nosoftlockup_setup
);
193 static int __init
softlockup_all_cpu_backtrace_setup(char *str
)
195 sysctl_softlockup_all_cpu_backtrace
=
196 !!simple_strtol(str
, NULL
, 0);
199 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup
);
200 #ifdef CONFIG_HARDLOCKUP_DETECTOR
201 static int __init
hardlockup_all_cpu_backtrace_setup(char *str
)
203 sysctl_hardlockup_all_cpu_backtrace
=
204 !!simple_strtol(str
, NULL
, 0);
207 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup
);
212 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
213 * lockups can have false positives under extreme conditions. So we generally
214 * want a higher threshold for soft lockups than for hard lockups. So we couple
215 * the thresholds with a factor: we make the soft threshold twice the amount of
216 * time the hard threshold is.
218 static int get_softlockup_thresh(void)
220 return watchdog_thresh
* 2;
224 * Returns seconds, approximately. We don't need nanosecond
225 * resolution, and we don't need to waste time with a big divide when
228 static unsigned long get_timestamp(void)
230 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
233 static void set_sample_period(void)
236 * convert watchdog_thresh from seconds to ns
237 * the divide by 5 is to give hrtimer several chances (two
238 * or three with the current relation between the soft
239 * and hard thresholds) to increment before the
240 * hardlockup detector generates a warning
242 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
245 /* Commands for resetting the watchdog */
246 static void __touch_watchdog(void)
248 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
252 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
254 * Call when the scheduler may have stalled for legitimate reasons
255 * preventing the watchdog task from executing - e.g. the scheduler
256 * entering idle state. This should only be used for scheduler events.
257 * Use touch_softlockup_watchdog() for everything else.
259 void touch_softlockup_watchdog_sched(void)
262 * Preemption can be enabled. It doesn't matter which CPU's timestamp
263 * gets zeroed here, so use the raw_ operation.
265 raw_cpu_write(watchdog_touch_ts
, 0);
268 void touch_softlockup_watchdog(void)
270 touch_softlockup_watchdog_sched();
271 wq_watchdog_touch(raw_smp_processor_id());
273 EXPORT_SYMBOL(touch_softlockup_watchdog
);
275 void touch_all_softlockup_watchdogs(void)
280 * this is done lockless
281 * do we care if a 0 races with a timestamp?
282 * all it means is the softlock check starts one cycle later
284 for_each_watchdog_cpu(cpu
)
285 per_cpu(watchdog_touch_ts
, cpu
) = 0;
286 wq_watchdog_touch(-1);
289 void touch_softlockup_watchdog_sync(void)
291 __this_cpu_write(softlockup_touch_sync
, true);
292 __this_cpu_write(watchdog_touch_ts
, 0);
295 static int is_softlockup(unsigned long touch_ts
)
297 unsigned long now
= get_timestamp();
299 if ((watchdog_enabled
& SOFT_WATCHDOG_ENABLED
) && watchdog_thresh
){
300 /* Warn about unreasonable delays. */
301 if (time_after(now
, touch_ts
+ get_softlockup_thresh()))
302 return now
- touch_ts
;
307 /* watchdog detector functions */
308 bool is_hardlockup(void)
310 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
312 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
315 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
319 static void watchdog_interrupt_count(void)
321 __this_cpu_inc(hrtimer_interrupts
);
324 static int watchdog_enable_all_cpus(void);
325 static void watchdog_disable_all_cpus(void);
327 /* watchdog kicker functions */
328 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
330 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
331 struct pt_regs
*regs
= get_irq_regs();
333 int softlockup_all_cpu_backtrace
= sysctl_softlockup_all_cpu_backtrace
;
335 if (atomic_read(&watchdog_park_in_progress
) != 0)
336 return HRTIMER_NORESTART
;
338 /* kick the hardlockup detector */
339 watchdog_interrupt_count();
341 /* kick the softlockup detector */
342 wake_up_process(__this_cpu_read(softlockup_watchdog
));
345 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
348 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
350 * If the time stamp was touched atomically
351 * make sure the scheduler tick is up to date.
353 __this_cpu_write(softlockup_touch_sync
, false);
357 /* Clear the guest paused flag on watchdog reset */
358 kvm_check_and_clear_guest_paused();
360 return HRTIMER_RESTART
;
363 /* check for a softlockup
364 * This is done by making sure a high priority task is
365 * being scheduled. The task touches the watchdog to
366 * indicate it is getting cpu time. If it hasn't then
367 * this is a good indication some task is hogging the cpu
369 duration
= is_softlockup(touch_ts
);
370 if (unlikely(duration
)) {
372 * If a virtual machine is stopped by the host it can look to
373 * the watchdog like a soft lockup, check to see if the host
374 * stopped the vm before we issue the warning
376 if (kvm_check_and_clear_guest_paused())
377 return HRTIMER_RESTART
;
380 if (__this_cpu_read(soft_watchdog_warn
) == true) {
382 * When multiple processes are causing softlockups the
383 * softlockup detector only warns on the first one
384 * because the code relies on a full quiet cycle to
385 * re-arm. The second process prevents the quiet cycle
386 * and never gets reported. Use task pointers to detect
389 if (__this_cpu_read(softlockup_task_ptr_saved
) !=
391 __this_cpu_write(soft_watchdog_warn
, false);
394 return HRTIMER_RESTART
;
397 if (softlockup_all_cpu_backtrace
) {
398 /* Prevent multiple soft-lockup reports if one cpu is already
399 * engaged in dumping cpu back traces
401 if (test_and_set_bit(0, &soft_lockup_nmi_warn
)) {
402 /* Someone else will report us. Let's give up */
403 __this_cpu_write(soft_watchdog_warn
, true);
404 return HRTIMER_RESTART
;
408 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
409 smp_processor_id(), duration
,
410 current
->comm
, task_pid_nr(current
));
411 __this_cpu_write(softlockup_task_ptr_saved
, current
);
413 print_irqtrace_events(current
);
419 if (softlockup_all_cpu_backtrace
) {
420 /* Avoid generating two back traces for current
421 * given that one is already made above
423 trigger_allbutself_cpu_backtrace();
425 clear_bit(0, &soft_lockup_nmi_warn
);
426 /* Barrier to sync with other cpus */
427 smp_mb__after_atomic();
430 add_taint(TAINT_SOFTLOCKUP
, LOCKDEP_STILL_OK
);
431 if (softlockup_panic
)
432 panic("softlockup: hung tasks");
433 __this_cpu_write(soft_watchdog_warn
, true);
435 __this_cpu_write(soft_watchdog_warn
, false);
437 return HRTIMER_RESTART
;
440 static void watchdog_set_prio(unsigned int policy
, unsigned int prio
)
442 struct sched_param param
= { .sched_priority
= prio
};
444 sched_setscheduler(current
, policy
, ¶m
);
447 static void watchdog_enable(unsigned int cpu
)
449 struct hrtimer
*hrtimer
= raw_cpu_ptr(&watchdog_hrtimer
);
451 /* kick off the timer for the hardlockup detector */
452 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
453 hrtimer
->function
= watchdog_timer_fn
;
455 /* Enable the perf event */
456 watchdog_nmi_enable(cpu
);
458 /* done here because hrtimer_start can only pin to smp_processor_id() */
459 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
460 HRTIMER_MODE_REL_PINNED
);
462 /* initialize timestamp */
463 watchdog_set_prio(SCHED_FIFO
, MAX_RT_PRIO
- 1);
467 static void watchdog_disable(unsigned int cpu
)
469 struct hrtimer
*hrtimer
= raw_cpu_ptr(&watchdog_hrtimer
);
471 watchdog_set_prio(SCHED_NORMAL
, 0);
472 hrtimer_cancel(hrtimer
);
473 /* disable the perf event */
474 watchdog_nmi_disable(cpu
);
477 static void watchdog_cleanup(unsigned int cpu
, bool online
)
479 watchdog_disable(cpu
);
482 static int watchdog_should_run(unsigned int cpu
)
484 return __this_cpu_read(hrtimer_interrupts
) !=
485 __this_cpu_read(soft_lockup_hrtimer_cnt
);
489 * The watchdog thread function - touches the timestamp.
491 * It only runs once every sample_period seconds (4 seconds by
492 * default) to reset the softlockup timestamp. If this gets delayed
493 * for more than 2*watchdog_thresh seconds then the debug-printout
494 * triggers in watchdog_timer_fn().
496 static void watchdog(unsigned int cpu
)
498 __this_cpu_write(soft_lockup_hrtimer_cnt
,
499 __this_cpu_read(hrtimer_interrupts
));
503 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
504 * failure path. Check for failures that can occur asynchronously -
505 * for example, when CPUs are on-lined - and shut down the hardware
506 * perf event on each CPU accordingly.
508 * The only non-obvious place this bit can be cleared is through
509 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
510 * pr_info here would be too noisy as it would result in a message
511 * every few seconds if the hardlockup was disabled but the softlockup
514 if (!(watchdog_enabled
& NMI_WATCHDOG_ENABLED
))
515 watchdog_nmi_disable(cpu
);
518 static struct smp_hotplug_thread watchdog_threads
= {
519 .store
= &softlockup_watchdog
,
520 .thread_should_run
= watchdog_should_run
,
521 .thread_fn
= watchdog
,
522 .thread_comm
= "watchdog/%u",
523 .setup
= watchdog_enable
,
524 .cleanup
= watchdog_cleanup
,
525 .park
= watchdog_disable
,
526 .unpark
= watchdog_enable
,
530 * park all watchdog threads that are specified in 'watchdog_cpumask'
532 * This function returns an error if kthread_park() of a watchdog thread
533 * fails. In this situation, the watchdog threads of some CPUs can already
534 * be parked and the watchdog threads of other CPUs can still be runnable.
535 * Callers are expected to handle this special condition as appropriate in
538 * This function may only be called in a context that is protected against
539 * races with CPU hotplug - for example, via get_online_cpus().
541 static int watchdog_park_threads(void)
545 atomic_set(&watchdog_park_in_progress
, 1);
547 for_each_watchdog_cpu(cpu
) {
548 ret
= kthread_park(per_cpu(softlockup_watchdog
, cpu
));
553 atomic_set(&watchdog_park_in_progress
, 0);
559 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
561 * This function may only be called in a context that is protected against
562 * races with CPU hotplug - for example, via get_online_cpus().
564 static void watchdog_unpark_threads(void)
568 for_each_watchdog_cpu(cpu
)
569 kthread_unpark(per_cpu(softlockup_watchdog
, cpu
));
572 static int update_watchdog_all_cpus(void)
576 ret
= watchdog_park_threads();
580 watchdog_unpark_threads();
585 static int watchdog_enable_all_cpus(void)
589 if (!watchdog_running
) {
590 err
= smpboot_register_percpu_thread_cpumask(&watchdog_threads
,
593 pr_err("Failed to create watchdog threads, disabled\n");
595 watchdog_running
= 1;
598 * Enable/disable the lockup detectors or
599 * change the sample period 'on the fly'.
601 err
= update_watchdog_all_cpus();
604 watchdog_disable_all_cpus();
605 pr_err("Failed to update lockup detectors, disabled\n");
610 watchdog_enabled
= 0;
615 static void watchdog_disable_all_cpus(void)
617 if (watchdog_running
) {
618 watchdog_running
= 0;
619 smpboot_unregister_percpu_thread(&watchdog_threads
);
624 static int watchdog_update_cpus(void)
626 return smpboot_update_cpumask_percpu_thread(
627 &watchdog_threads
, &watchdog_cpumask
);
631 #else /* SOFTLOCKUP */
632 static int watchdog_park_threads(void)
637 static void watchdog_unpark_threads(void)
641 static int watchdog_enable_all_cpus(void)
646 static void watchdog_disable_all_cpus(void)
651 static int watchdog_update_cpus(void)
657 static void set_sample_period(void)
660 #endif /* SOFTLOCKUP */
663 * Suspend the hard and soft lockup detector by parking the watchdog threads.
665 int lockup_detector_suspend(void)
670 mutex_lock(&watchdog_proc_mutex
);
672 * Multiple suspend requests can be active in parallel (counted by
673 * the 'watchdog_suspended' variable). If the watchdog threads are
674 * running, the first caller takes care that they will be parked.
675 * The state of 'watchdog_running' cannot change while a suspend
676 * request is active (see related code in 'proc' handlers).
678 if (watchdog_running
&& !watchdog_suspended
)
679 ret
= watchdog_park_threads();
682 watchdog_suspended
++;
684 watchdog_disable_all_cpus();
685 pr_err("Failed to suspend lockup detectors, disabled\n");
686 watchdog_enabled
= 0;
689 watchdog_nmi_reconfigure();
691 mutex_unlock(&watchdog_proc_mutex
);
697 * Resume the hard and soft lockup detector by unparking the watchdog threads.
699 void lockup_detector_resume(void)
701 mutex_lock(&watchdog_proc_mutex
);
703 watchdog_suspended
--;
705 * The watchdog threads are unparked if they were previously running
706 * and if there is no more active suspend request.
708 if (watchdog_running
&& !watchdog_suspended
)
709 watchdog_unpark_threads();
711 watchdog_nmi_reconfigure();
713 mutex_unlock(&watchdog_proc_mutex
);
720 * Update the run state of the lockup detectors.
722 static int proc_watchdog_update(void)
727 * Watchdog threads won't be started if they are already active.
728 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
729 * care of this. If those threads are already active, the sample
730 * period will be updated and the lockup detectors will be enabled
731 * or disabled 'on the fly'.
733 if (watchdog_enabled
&& watchdog_thresh
)
734 err
= watchdog_enable_all_cpus();
736 watchdog_disable_all_cpus();
738 watchdog_nmi_reconfigure();
745 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
747 * caller | table->data points to | 'which' contains the flag(s)
748 * -------------------|-----------------------|-----------------------------
749 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
750 * | | with SOFT_WATCHDOG_ENABLED
751 * -------------------|-----------------------|-----------------------------
752 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
753 * -------------------|-----------------------|-----------------------------
754 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
756 static int proc_watchdog_common(int which
, struct ctl_table
*table
, int write
,
757 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
760 int *watchdog_param
= (int *)table
->data
;
763 mutex_lock(&watchdog_proc_mutex
);
765 if (watchdog_suspended
) {
766 /* no parameter changes allowed while watchdog is suspended */
772 * If the parameter is being read return the state of the corresponding
773 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
774 * run state of the lockup detectors.
777 *watchdog_param
= (watchdog_enabled
& which
) != 0;
778 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
780 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
785 * There is a race window between fetching the current value
786 * from 'watchdog_enabled' and storing the new value. During
787 * this race window, watchdog_nmi_enable() can sneak in and
788 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
789 * The 'cmpxchg' detects this race and the loop retries.
792 old
= watchdog_enabled
;
794 * If the parameter value is not zero set the
795 * corresponding bit(s), else clear it(them).
801 } while (cmpxchg(&watchdog_enabled
, old
, new) != old
);
804 * Update the run state of the lockup detectors. There is _no_
805 * need to check the value returned by proc_watchdog_update()
806 * and to restore the previous value of 'watchdog_enabled' as
807 * both lockup detectors are disabled if proc_watchdog_update()
813 err
= proc_watchdog_update();
816 mutex_unlock(&watchdog_proc_mutex
);
822 * /proc/sys/kernel/watchdog
824 int proc_watchdog(struct ctl_table
*table
, int write
,
825 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
827 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
|SOFT_WATCHDOG_ENABLED
,
828 table
, write
, buffer
, lenp
, ppos
);
832 * /proc/sys/kernel/nmi_watchdog
834 int proc_nmi_watchdog(struct ctl_table
*table
, int write
,
835 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
837 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
,
838 table
, write
, buffer
, lenp
, ppos
);
842 * /proc/sys/kernel/soft_watchdog
844 int proc_soft_watchdog(struct ctl_table
*table
, int write
,
845 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
847 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED
,
848 table
, write
, buffer
, lenp
, ppos
);
852 * /proc/sys/kernel/watchdog_thresh
854 int proc_watchdog_thresh(struct ctl_table
*table
, int write
,
855 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
860 mutex_lock(&watchdog_proc_mutex
);
862 if (watchdog_suspended
) {
863 /* no parameter changes allowed while watchdog is suspended */
868 old
= ACCESS_ONCE(watchdog_thresh
);
869 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
875 * Update the sample period. Restore on failure.
877 new = ACCESS_ONCE(watchdog_thresh
);
882 err
= proc_watchdog_update();
884 watchdog_thresh
= old
;
888 mutex_unlock(&watchdog_proc_mutex
);
894 * The cpumask is the mask of possible cpus that the watchdog can run
895 * on, not the mask of cpus it is actually running on. This allows the
896 * user to specify a mask that will include cpus that have not yet
897 * been brought online, if desired.
899 int proc_watchdog_cpumask(struct ctl_table
*table
, int write
,
900 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
905 mutex_lock(&watchdog_proc_mutex
);
907 if (watchdog_suspended
) {
908 /* no parameter changes allowed while watchdog is suspended */
913 err
= proc_do_large_bitmap(table
, write
, buffer
, lenp
, ppos
);
915 /* Remove impossible cpus to keep sysctl output cleaner. */
916 cpumask_and(&watchdog_cpumask
, &watchdog_cpumask
,
919 if (watchdog_running
) {
921 * Failure would be due to being unable to allocate
922 * a temporary cpumask, so we are likely not in a
923 * position to do much else to make things better.
925 if (watchdog_update_cpus() != 0)
926 pr_err("cpumask update failed\n");
929 watchdog_nmi_reconfigure();
932 mutex_unlock(&watchdog_proc_mutex
);
937 #endif /* CONFIG_SYSCTL */
939 void __init
lockup_detector_init(void)
943 #ifdef CONFIG_NO_HZ_FULL
944 if (tick_nohz_full_enabled()) {
945 pr_info("Disabling watchdog on nohz_full cores by default\n");
946 cpumask_copy(&watchdog_cpumask
, housekeeping_mask
);
948 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
950 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
953 if (watchdog_enabled
)
954 watchdog_enable_all_cpus();