]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/watchdog.c
watchdog: move definition of 'watchdog_proc_mutex' outside of proc_dowatchdog()
[mirror_ubuntu-bionic-kernel.git] / kernel / watchdog.c
CommitLineData
58687acb
DZ
1/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
86f5e6a7
FLVC
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
58687acb
DZ
9 * to those contributors as well.
10 */
11
4501980a
AM
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
58687acb
DZ
14#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
58687acb
DZ
18#include <linux/module.h>
19#include <linux/sysctl.h>
bcd951cf 20#include <linux/smpboot.h>
8bd75c77 21#include <linux/sched/rt.h>
58687acb
DZ
22
23#include <asm/irq_regs.h>
5d1c0f4a 24#include <linux/kvm_para.h>
58687acb
DZ
25#include <linux/perf_event.h>
26
84d56e66
UO
27/*
28 * The run state of the lockup detectors is controlled by the content of the
29 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
30 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
31 *
32 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
33 * are variables that are only used as an 'interface' between the parameters
34 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
35 * 'watchdog_thresh' variable is handled differently because its value is not
36 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
37 * is equal zero.
38 */
39#define NMI_WATCHDOG_ENABLED_BIT 0
40#define SOFT_WATCHDOG_ENABLED_BIT 1
41#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
42#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
43
44#ifdef CONFIG_HARDLOCKUP_DETECTOR
45static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
46#else
47static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
48#endif
49int __read_mostly nmi_watchdog_enabled;
50int __read_mostly soft_watchdog_enabled;
51int __read_mostly watchdog_user_enabled;
4eec42f3 52int __read_mostly watchdog_thresh = 10;
84d56e66 53
ed235875
AT
54#ifdef CONFIG_SMP
55int __read_mostly sysctl_softlockup_all_cpu_backtrace;
56#else
57#define sysctl_softlockup_all_cpu_backtrace 0
58#endif
59
3c00ea82 60static int __read_mostly watchdog_running;
0f34c400 61static u64 __read_mostly sample_period;
58687acb
DZ
62
63static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
64static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
65static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
66static DEFINE_PER_CPU(bool, softlockup_touch_sync);
58687acb 67static DEFINE_PER_CPU(bool, soft_watchdog_warn);
bcd951cf
TG
68static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
69static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
b1a8de1f 70static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
23637d47 71#ifdef CONFIG_HARDLOCKUP_DETECTOR
cafcd80d
DZ
72static DEFINE_PER_CPU(bool, hard_watchdog_warn);
73static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
58687acb
DZ
74static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
75static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
76#endif
ed235875 77static unsigned long soft_lockup_nmi_warn;
58687acb 78
58687acb
DZ
79/* boot commands */
80/*
81 * Should we panic when a soft-lockup or hard-lockup occurs:
82 */
23637d47 83#ifdef CONFIG_HARDLOCKUP_DETECTOR
fef2c9bc
DZ
84static int hardlockup_panic =
85 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58687acb 86
6e7458a6
UO
87static bool hardlockup_detector_enabled = true;
88/*
89 * We may not want to enable hard lockup detection by default in all cases,
90 * for example when running the kernel as a guest on a hypervisor. In these
91 * cases this function can be called to disable hard lockup detection. This
92 * function should only be executed once by the boot processor before the
93 * kernel command line parameters are parsed, because otherwise it is not
94 * possible to override this in hardlockup_panic_setup().
95 */
96void watchdog_enable_hardlockup_detector(bool val)
97{
98 hardlockup_detector_enabled = val;
99}
100
101bool watchdog_hardlockup_detector_is_enabled(void)
102{
103 return hardlockup_detector_enabled;
104}
105
58687acb
DZ
106static int __init hardlockup_panic_setup(char *str)
107{
108 if (!strncmp(str, "panic", 5))
109 hardlockup_panic = 1;
fef2c9bc
DZ
110 else if (!strncmp(str, "nopanic", 7))
111 hardlockup_panic = 0;
5dc30558 112 else if (!strncmp(str, "0", 1))
3c00ea82 113 watchdog_user_enabled = 0;
6e7458a6
UO
114 else if (!strncmp(str, "1", 1) || !strncmp(str, "2", 1)) {
115 /*
116 * Setting 'nmi_watchdog=1' or 'nmi_watchdog=2' (legacy option)
117 * has the same effect.
118 */
119 watchdog_user_enabled = 1;
120 watchdog_enable_hardlockup_detector(true);
121 }
58687acb
DZ
122 return 1;
123}
124__setup("nmi_watchdog=", hardlockup_panic_setup);
125#endif
126
127unsigned int __read_mostly softlockup_panic =
128 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
129
130static int __init softlockup_panic_setup(char *str)
131{
132 softlockup_panic = simple_strtoul(str, NULL, 0);
133
134 return 1;
135}
136__setup("softlockup_panic=", softlockup_panic_setup);
137
138static int __init nowatchdog_setup(char *str)
139{
3c00ea82 140 watchdog_user_enabled = 0;
58687acb
DZ
141 return 1;
142}
143__setup("nowatchdog", nowatchdog_setup);
144
145/* deprecated */
146static int __init nosoftlockup_setup(char *str)
147{
3c00ea82 148 watchdog_user_enabled = 0;
58687acb
DZ
149 return 1;
150}
151__setup("nosoftlockup", nosoftlockup_setup);
152/* */
ed235875
AT
153#ifdef CONFIG_SMP
154static int __init softlockup_all_cpu_backtrace_setup(char *str)
155{
156 sysctl_softlockup_all_cpu_backtrace =
157 !!simple_strtol(str, NULL, 0);
158 return 1;
159}
160__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
161#endif
58687acb 162
4eec42f3
MSB
163/*
164 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
165 * lockups can have false positives under extreme conditions. So we generally
166 * want a higher threshold for soft lockups than for hard lockups. So we couple
167 * the thresholds with a factor: we make the soft threshold twice the amount of
168 * time the hard threshold is.
169 */
6e9101ae 170static int get_softlockup_thresh(void)
4eec42f3
MSB
171{
172 return watchdog_thresh * 2;
173}
58687acb
DZ
174
175/*
176 * Returns seconds, approximately. We don't need nanosecond
177 * resolution, and we don't need to waste time with a big divide when
178 * 2^30ns == 1.074s.
179 */
c06b4f19 180static unsigned long get_timestamp(void)
58687acb 181{
545a2bf7 182 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
58687acb
DZ
183}
184
0f34c400 185static void set_sample_period(void)
58687acb
DZ
186{
187 /*
586692a5 188 * convert watchdog_thresh from seconds to ns
86f5e6a7
FLVC
189 * the divide by 5 is to give hrtimer several chances (two
190 * or three with the current relation between the soft
191 * and hard thresholds) to increment before the
192 * hardlockup detector generates a warning
58687acb 193 */
0f34c400 194 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
58687acb
DZ
195}
196
197/* Commands for resetting the watchdog */
198static void __touch_watchdog(void)
199{
c06b4f19 200 __this_cpu_write(watchdog_touch_ts, get_timestamp());
58687acb
DZ
201}
202
332fbdbc 203void touch_softlockup_watchdog(void)
58687acb 204{
7861144b
AM
205 /*
206 * Preemption can be enabled. It doesn't matter which CPU's timestamp
207 * gets zeroed here, so use the raw_ operation.
208 */
209 raw_cpu_write(watchdog_touch_ts, 0);
58687acb 210}
0167c781 211EXPORT_SYMBOL(touch_softlockup_watchdog);
58687acb 212
332fbdbc 213void touch_all_softlockup_watchdogs(void)
58687acb
DZ
214{
215 int cpu;
216
217 /*
218 * this is done lockless
219 * do we care if a 0 races with a timestamp?
220 * all it means is the softlock check starts one cycle later
221 */
222 for_each_online_cpu(cpu)
223 per_cpu(watchdog_touch_ts, cpu) = 0;
224}
225
cafcd80d 226#ifdef CONFIG_HARDLOCKUP_DETECTOR
58687acb
DZ
227void touch_nmi_watchdog(void)
228{
62572e29
BZ
229 /*
230 * Using __raw here because some code paths have
231 * preemption enabled. If preemption is enabled
232 * then interrupts should be enabled too, in which
233 * case we shouldn't have to worry about the watchdog
234 * going off.
235 */
f7f66b05 236 raw_cpu_write(watchdog_nmi_touch, true);
332fbdbc 237 touch_softlockup_watchdog();
58687acb
DZ
238}
239EXPORT_SYMBOL(touch_nmi_watchdog);
240
cafcd80d
DZ
241#endif
242
58687acb
DZ
243void touch_softlockup_watchdog_sync(void)
244{
f7f66b05
CL
245 __this_cpu_write(softlockup_touch_sync, true);
246 __this_cpu_write(watchdog_touch_ts, 0);
58687acb
DZ
247}
248
23637d47 249#ifdef CONFIG_HARDLOCKUP_DETECTOR
58687acb 250/* watchdog detector functions */
26e09c6e 251static int is_hardlockup(void)
58687acb 252{
909ea964 253 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
58687acb 254
909ea964 255 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
58687acb
DZ
256 return 1;
257
909ea964 258 __this_cpu_write(hrtimer_interrupts_saved, hrint);
58687acb
DZ
259 return 0;
260}
261#endif
262
26e09c6e 263static int is_softlockup(unsigned long touch_ts)
58687acb 264{
c06b4f19 265 unsigned long now = get_timestamp();
58687acb
DZ
266
267 /* Warn about unreasonable delays: */
4eec42f3 268 if (time_after(now, touch_ts + get_softlockup_thresh()))
58687acb
DZ
269 return now - touch_ts;
270
271 return 0;
272}
273
23637d47 274#ifdef CONFIG_HARDLOCKUP_DETECTOR
1880c4ae 275
58687acb
DZ
276static struct perf_event_attr wd_hw_attr = {
277 .type = PERF_TYPE_HARDWARE,
278 .config = PERF_COUNT_HW_CPU_CYCLES,
279 .size = sizeof(struct perf_event_attr),
280 .pinned = 1,
281 .disabled = 1,
282};
283
284/* Callback function for perf event subsystem */
a8b0ca17 285static void watchdog_overflow_callback(struct perf_event *event,
58687acb
DZ
286 struct perf_sample_data *data,
287 struct pt_regs *regs)
288{
c6db67cd
PZ
289 /* Ensure the watchdog never gets throttled */
290 event->hw.interrupts = 0;
291
909ea964
CL
292 if (__this_cpu_read(watchdog_nmi_touch) == true) {
293 __this_cpu_write(watchdog_nmi_touch, false);
58687acb
DZ
294 return;
295 }
296
297 /* check for a hardlockup
298 * This is done by making sure our timer interrupt
299 * is incrementing. The timer interrupt should have
300 * fired multiple times before we overflow'd. If it hasn't
301 * then this is a good indication the cpu is stuck
302 */
26e09c6e
DZ
303 if (is_hardlockup()) {
304 int this_cpu = smp_processor_id();
305
58687acb 306 /* only print hardlockups once */
909ea964 307 if (__this_cpu_read(hard_watchdog_warn) == true)
58687acb
DZ
308 return;
309
310 if (hardlockup_panic)
656c3b79
FF
311 panic("Watchdog detected hard LOCKUP on cpu %d",
312 this_cpu);
58687acb 313 else
656c3b79
FF
314 WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
315 this_cpu);
58687acb 316
909ea964 317 __this_cpu_write(hard_watchdog_warn, true);
58687acb
DZ
318 return;
319 }
320
909ea964 321 __this_cpu_write(hard_watchdog_warn, false);
58687acb
DZ
322 return;
323}
bcd951cf
TG
324#endif /* CONFIG_HARDLOCKUP_DETECTOR */
325
58687acb
DZ
326static void watchdog_interrupt_count(void)
327{
909ea964 328 __this_cpu_inc(hrtimer_interrupts);
58687acb 329}
bcd951cf
TG
330
331static int watchdog_nmi_enable(unsigned int cpu);
332static void watchdog_nmi_disable(unsigned int cpu);
58687acb
DZ
333
334/* watchdog kicker functions */
335static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
336{
909ea964 337 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
58687acb
DZ
338 struct pt_regs *regs = get_irq_regs();
339 int duration;
ed235875 340 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
58687acb
DZ
341
342 /* kick the hardlockup detector */
343 watchdog_interrupt_count();
344
345 /* kick the softlockup detector */
909ea964 346 wake_up_process(__this_cpu_read(softlockup_watchdog));
58687acb
DZ
347
348 /* .. and repeat */
0f34c400 349 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
58687acb
DZ
350
351 if (touch_ts == 0) {
909ea964 352 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
58687acb
DZ
353 /*
354 * If the time stamp was touched atomically
355 * make sure the scheduler tick is up to date.
356 */
909ea964 357 __this_cpu_write(softlockup_touch_sync, false);
58687acb
DZ
358 sched_clock_tick();
359 }
5d1c0f4a
EM
360
361 /* Clear the guest paused flag on watchdog reset */
362 kvm_check_and_clear_guest_paused();
58687acb
DZ
363 __touch_watchdog();
364 return HRTIMER_RESTART;
365 }
366
367 /* check for a softlockup
368 * This is done by making sure a high priority task is
369 * being scheduled. The task touches the watchdog to
370 * indicate it is getting cpu time. If it hasn't then
371 * this is a good indication some task is hogging the cpu
372 */
26e09c6e 373 duration = is_softlockup(touch_ts);
58687acb 374 if (unlikely(duration)) {
5d1c0f4a
EM
375 /*
376 * If a virtual machine is stopped by the host it can look to
377 * the watchdog like a soft lockup, check to see if the host
378 * stopped the vm before we issue the warning
379 */
380 if (kvm_check_and_clear_guest_paused())
381 return HRTIMER_RESTART;
382
58687acb 383 /* only warn once */
b1a8de1f 384 if (__this_cpu_read(soft_watchdog_warn) == true) {
385 /*
386 * When multiple processes are causing softlockups the
387 * softlockup detector only warns on the first one
388 * because the code relies on a full quiet cycle to
389 * re-arm. The second process prevents the quiet cycle
390 * and never gets reported. Use task pointers to detect
391 * this.
392 */
393 if (__this_cpu_read(softlockup_task_ptr_saved) !=
394 current) {
395 __this_cpu_write(soft_watchdog_warn, false);
396 __touch_watchdog();
397 }
58687acb 398 return HRTIMER_RESTART;
b1a8de1f 399 }
58687acb 400
ed235875
AT
401 if (softlockup_all_cpu_backtrace) {
402 /* Prevent multiple soft-lockup reports if one cpu is already
403 * engaged in dumping cpu back traces
404 */
405 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
406 /* Someone else will report us. Let's give up */
407 __this_cpu_write(soft_watchdog_warn, true);
408 return HRTIMER_RESTART;
409 }
410 }
411
656c3b79 412 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
26e09c6e 413 smp_processor_id(), duration,
58687acb 414 current->comm, task_pid_nr(current));
b1a8de1f 415 __this_cpu_write(softlockup_task_ptr_saved, current);
58687acb
DZ
416 print_modules();
417 print_irqtrace_events(current);
418 if (regs)
419 show_regs(regs);
420 else
421 dump_stack();
422
ed235875
AT
423 if (softlockup_all_cpu_backtrace) {
424 /* Avoid generating two back traces for current
425 * given that one is already made above
426 */
427 trigger_allbutself_cpu_backtrace();
428
429 clear_bit(0, &soft_lockup_nmi_warn);
430 /* Barrier to sync with other cpus */
431 smp_mb__after_atomic();
432 }
433
69361eef 434 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
58687acb
DZ
435 if (softlockup_panic)
436 panic("softlockup: hung tasks");
909ea964 437 __this_cpu_write(soft_watchdog_warn, true);
58687acb 438 } else
909ea964 439 __this_cpu_write(soft_watchdog_warn, false);
58687acb
DZ
440
441 return HRTIMER_RESTART;
442}
443
bcd951cf
TG
444static void watchdog_set_prio(unsigned int policy, unsigned int prio)
445{
446 struct sched_param param = { .sched_priority = prio };
58687acb 447
bcd951cf
TG
448 sched_setscheduler(current, policy, &param);
449}
450
451static void watchdog_enable(unsigned int cpu)
58687acb 452{
f7f66b05 453 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
58687acb 454
3935e895
BM
455 /* kick off the timer for the hardlockup detector */
456 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
457 hrtimer->function = watchdog_timer_fn;
458
bcd951cf
TG
459 /* Enable the perf event */
460 watchdog_nmi_enable(cpu);
58687acb 461
58687acb 462 /* done here because hrtimer_start can only pin to smp_processor_id() */
0f34c400 463 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
58687acb
DZ
464 HRTIMER_MODE_REL_PINNED);
465
bcd951cf
TG
466 /* initialize timestamp */
467 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
468 __touch_watchdog();
469}
58687acb 470
bcd951cf
TG
471static void watchdog_disable(unsigned int cpu)
472{
f7f66b05 473 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
58687acb 474
bcd951cf
TG
475 watchdog_set_prio(SCHED_NORMAL, 0);
476 hrtimer_cancel(hrtimer);
477 /* disable the perf event */
478 watchdog_nmi_disable(cpu);
58687acb
DZ
479}
480
b8900bc0
FW
481static void watchdog_cleanup(unsigned int cpu, bool online)
482{
483 watchdog_disable(cpu);
484}
485
bcd951cf
TG
486static int watchdog_should_run(unsigned int cpu)
487{
488 return __this_cpu_read(hrtimer_interrupts) !=
489 __this_cpu_read(soft_lockup_hrtimer_cnt);
490}
491
492/*
493 * The watchdog thread function - touches the timestamp.
494 *
0f34c400 495 * It only runs once every sample_period seconds (4 seconds by
bcd951cf
TG
496 * default) to reset the softlockup timestamp. If this gets delayed
497 * for more than 2*watchdog_thresh seconds then the debug-printout
498 * triggers in watchdog_timer_fn().
499 */
500static void watchdog(unsigned int cpu)
501{
502 __this_cpu_write(soft_lockup_hrtimer_cnt,
503 __this_cpu_read(hrtimer_interrupts));
504 __touch_watchdog();
505}
58687acb 506
23637d47 507#ifdef CONFIG_HARDLOCKUP_DETECTOR
a7027046
DZ
508/*
509 * People like the simple clean cpu node info on boot.
510 * Reduce the watchdog noise by only printing messages
511 * that are different from what cpu0 displayed.
512 */
513static unsigned long cpu0_err;
514
bcd951cf 515static int watchdog_nmi_enable(unsigned int cpu)
58687acb
DZ
516{
517 struct perf_event_attr *wd_attr;
518 struct perf_event *event = per_cpu(watchdog_ev, cpu);
519
6e7458a6
UO
520 /*
521 * Some kernels need to default hard lockup detection to
522 * 'disabled', for example a guest on a hypervisor.
523 */
524 if (!watchdog_hardlockup_detector_is_enabled()) {
525 event = ERR_PTR(-ENOENT);
526 goto handle_err;
527 }
528
58687acb
DZ
529 /* is it already setup and enabled? */
530 if (event && event->state > PERF_EVENT_STATE_OFF)
531 goto out;
532
533 /* it is setup but not enabled */
534 if (event != NULL)
535 goto out_enable;
536
58687acb 537 wd_attr = &wd_hw_attr;
4eec42f3 538 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
1880c4ae
CG
539
540 /* Try to register using hardware perf events */
4dc0da86 541 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
a7027046 542
6e7458a6 543handle_err:
a7027046
DZ
544 /* save cpu0 error for future comparision */
545 if (cpu == 0 && IS_ERR(event))
546 cpu0_err = PTR_ERR(event);
547
58687acb 548 if (!IS_ERR(event)) {
a7027046
DZ
549 /* only print for cpu0 or different than cpu0 */
550 if (cpu == 0 || cpu0_err)
551 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
58687acb
DZ
552 goto out_save;
553 }
554
a7027046
DZ
555 /* skip displaying the same error again */
556 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
557 return PTR_ERR(event);
5651f7f4
DZ
558
559 /* vary the KERN level based on the returned errno */
560 if (PTR_ERR(event) == -EOPNOTSUPP)
4501980a 561 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
5651f7f4 562 else if (PTR_ERR(event) == -ENOENT)
656c3b79 563 pr_warn("disabled (cpu%i): hardware events not enabled\n",
4501980a 564 cpu);
5651f7f4 565 else
4501980a
AM
566 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
567 cpu, PTR_ERR(event));
eac24335 568 return PTR_ERR(event);
58687acb
DZ
569
570 /* success path */
571out_save:
572 per_cpu(watchdog_ev, cpu) = event;
573out_enable:
574 perf_event_enable(per_cpu(watchdog_ev, cpu));
575out:
576 return 0;
577}
578
bcd951cf 579static void watchdog_nmi_disable(unsigned int cpu)
58687acb
DZ
580{
581 struct perf_event *event = per_cpu(watchdog_ev, cpu);
582
583 if (event) {
584 perf_event_disable(event);
585 per_cpu(watchdog_ev, cpu) = NULL;
586
587 /* should be in cleanup, but blocks oprofile */
588 perf_event_release_kernel(event);
589 }
df577149
UO
590 if (cpu == 0) {
591 /* watchdog_nmi_enable() expects this to be zero initially. */
592 cpu0_err = 0;
593 }
58687acb
DZ
594}
595#else
bcd951cf
TG
596static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
597static void watchdog_nmi_disable(unsigned int cpu) { return; }
23637d47 598#endif /* CONFIG_HARDLOCKUP_DETECTOR */
58687acb 599
b8900bc0
FW
600static struct smp_hotplug_thread watchdog_threads = {
601 .store = &softlockup_watchdog,
602 .thread_should_run = watchdog_should_run,
603 .thread_fn = watchdog,
604 .thread_comm = "watchdog/%u",
605 .setup = watchdog_enable,
606 .cleanup = watchdog_cleanup,
607 .park = watchdog_disable,
608 .unpark = watchdog_enable,
609};
610
9809b18f
MH
611static void restart_watchdog_hrtimer(void *info)
612{
f7f66b05 613 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
9809b18f
MH
614 int ret;
615
616 /*
617 * No need to cancel and restart hrtimer if it is currently executing
618 * because it will reprogram itself with the new period now.
619 * We should never see it unqueued here because we are running per-cpu
620 * with interrupts disabled.
621 */
622 ret = hrtimer_try_to_cancel(hrtimer);
623 if (ret == 1)
624 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
625 HRTIMER_MODE_REL_PINNED);
626}
627
628static void update_timers(int cpu)
629{
9809b18f
MH
630 /*
631 * Make sure that perf event counter will adopt to a new
632 * sampling period. Updating the sampling period directly would
633 * be much nicer but we do not have an API for that now so
634 * let's use a big hammer.
635 * Hrtimer will adopt the new period on the next tick but this
636 * might be late already so we have to restart the timer as well.
637 */
638 watchdog_nmi_disable(cpu);
e0a23b06 639 smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
9809b18f
MH
640 watchdog_nmi_enable(cpu);
641}
642
643static void update_timers_all_cpus(void)
644{
645 int cpu;
646
647 get_online_cpus();
9809b18f
MH
648 for_each_online_cpu(cpu)
649 update_timers(cpu);
9809b18f
MH
650 put_online_cpus();
651}
652
653static int watchdog_enable_all_cpus(bool sample_period_changed)
58687acb 654{
b8900bc0 655 int err = 0;
58687acb 656
3c00ea82 657 if (!watchdog_running) {
b8900bc0
FW
658 err = smpboot_register_percpu_thread(&watchdog_threads);
659 if (err)
660 pr_err("Failed to create watchdog threads, disabled\n");
661 else
3c00ea82 662 watchdog_running = 1;
9809b18f
MH
663 } else if (sample_period_changed) {
664 update_timers_all_cpus();
bcd951cf 665 }
b8900bc0
FW
666
667 return err;
58687acb
DZ
668}
669
b8900bc0
FW
670/* prepare/enable/disable routines */
671/* sysctl functions */
672#ifdef CONFIG_SYSCTL
58687acb
DZ
673static void watchdog_disable_all_cpus(void)
674{
3c00ea82
FW
675 if (watchdog_running) {
676 watchdog_running = 0;
b8900bc0 677 smpboot_unregister_percpu_thread(&watchdog_threads);
bcd951cf 678 }
58687acb
DZ
679}
680
a0c9cbb9
UO
681/*
682 * Update the run state of the lockup detectors.
683 */
684static int proc_watchdog_update(void)
685{
686 int err = 0;
687
688 /*
689 * Watchdog threads won't be started if they are already active.
690 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
691 * care of this. If those threads are already active, the sample
692 * period will be updated and the lockup detectors will be enabled
693 * or disabled 'on the fly'.
694 */
695 if (watchdog_enabled && watchdog_thresh)
696 err = watchdog_enable_all_cpus(true);
697 else
698 watchdog_disable_all_cpus();
699
700 return err;
701
702}
703
f54c2274
UO
704static DEFINE_MUTEX(watchdog_proc_mutex);
705
58687acb 706/*
586692a5 707 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
58687acb
DZ
708 */
709
586692a5
MSB
710int proc_dowatchdog(struct ctl_table *table, int write,
711 void __user *buffer, size_t *lenp, loff_t *ppos)
58687acb 712{
b8900bc0 713 int err, old_thresh, old_enabled;
6e7458a6 714 bool old_hardlockup;
58687acb 715
359e6fab 716 mutex_lock(&watchdog_proc_mutex);
b8900bc0 717 old_thresh = ACCESS_ONCE(watchdog_thresh);
3c00ea82 718 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
6e7458a6 719 old_hardlockup = watchdog_hardlockup_detector_is_enabled();
bcd951cf 720
b8900bc0
FW
721 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
722 if (err || !write)
359e6fab 723 goto out;
e04ab2bc 724
0f34c400 725 set_sample_period();
b66a2356 726 /*
727 * Watchdog threads shouldn't be enabled if they are
3c00ea82 728 * disabled. The 'watchdog_running' variable check in
b66a2356 729 * watchdog_*_all_cpus() function takes care of this.
730 */
6e7458a6
UO
731 if (watchdog_user_enabled && watchdog_thresh) {
732 /*
733 * Prevent a change in watchdog_thresh accidentally overriding
734 * the enablement of the hardlockup detector.
735 */
736 if (watchdog_user_enabled != old_enabled)
737 watchdog_enable_hardlockup_detector(true);
9809b18f 738 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
6e7458a6 739 } else
e04ab2bc
MSB
740 watchdog_disable_all_cpus();
741
b8900bc0
FW
742 /* Restore old values on failure */
743 if (err) {
744 watchdog_thresh = old_thresh;
3c00ea82 745 watchdog_user_enabled = old_enabled;
6e7458a6 746 watchdog_enable_hardlockup_detector(old_hardlockup);
b8900bc0 747 }
359e6fab
MH
748out:
749 mutex_unlock(&watchdog_proc_mutex);
b8900bc0 750 return err;
58687acb 751}
58687acb
DZ
752#endif /* CONFIG_SYSCTL */
753
004417a6 754void __init lockup_detector_init(void)
58687acb 755{
0f34c400 756 set_sample_period();
b8900bc0 757
3c00ea82 758 if (watchdog_user_enabled)
9809b18f 759 watchdog_enable_all_cpus(false);
58687acb 760}