2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * this code detects hard lockups: incidents in where on a CPU
7 * the kernel does not respond to anything except NMI.
9 * Note: Most of this code is borrowed heavily from softlockup.c,
10 * so thanks to Ingo for the initial implementation.
11 * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
12 * to those contributors as well.
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/lockdep.h>
23 #include <linux/notifier.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
27 #include <asm/irq_regs.h>
28 #include <linux/perf_event.h>
31 int __read_mostly softlockup_thresh
= 60;
33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
34 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
35 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
36 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
37 static DEFINE_PER_CPU(bool, hard_watchdog_warn
);
38 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
39 #ifdef CONFIG_PERF_EVENTS_NMI
40 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
42 static DEFINE_PER_CPU(struct perf_event
*, watchdog_ev
);
45 static int __read_mostly did_panic
;
46 static int __initdata no_watchdog
;
51 * Should we panic when a soft-lockup or hard-lockup occurs:
53 #ifdef CONFIG_PERF_EVENTS_NMI
54 static int hardlockup_panic
;
56 static int __init
hardlockup_panic_setup(char *str
)
58 if (!strncmp(str
, "panic", 5))
62 __setup("nmi_watchdog=", hardlockup_panic_setup
);
65 unsigned int __read_mostly softlockup_panic
=
66 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
68 static int __init
softlockup_panic_setup(char *str
)
70 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
74 __setup("softlockup_panic=", softlockup_panic_setup
);
76 static int __init
nowatchdog_setup(char *str
)
81 __setup("nowatchdog", nowatchdog_setup
);
84 static int __init
nosoftlockup_setup(char *str
)
89 __setup("nosoftlockup", nosoftlockup_setup
);
94 * Returns seconds, approximately. We don't need nanosecond
95 * resolution, and we don't need to waste time with a big divide when
98 static unsigned long get_timestamp(int this_cpu
)
100 return cpu_clock(this_cpu
) >> 30LL; /* 2^30 ~= 10^9 */
103 static unsigned long get_sample_period(void)
106 * convert softlockup_thresh from seconds to ns
107 * the divide by 5 is to give hrtimer 5 chances to
108 * increment before the hardlockup detector generates
111 return softlockup_thresh
/ 5 * NSEC_PER_SEC
;
114 /* Commands for resetting the watchdog */
115 static void __touch_watchdog(void)
117 int this_cpu
= raw_smp_processor_id();
119 __get_cpu_var(watchdog_touch_ts
) = get_timestamp(this_cpu
);
122 void touch_watchdog(void)
124 __get_cpu_var(watchdog_touch_ts
) = 0;
126 EXPORT_SYMBOL(touch_watchdog
);
128 void touch_all_watchdog(void)
133 * this is done lockless
134 * do we care if a 0 races with a timestamp?
135 * all it means is the softlock check starts one cycle later
137 for_each_online_cpu(cpu
)
138 per_cpu(watchdog_touch_ts
, cpu
) = 0;
141 void touch_nmi_watchdog(void)
145 EXPORT_SYMBOL(touch_nmi_watchdog
);
147 void touch_all_nmi_watchdog(void)
149 touch_all_watchdog();
152 void touch_softlockup_watchdog(void)
157 void touch_all_softlockup_watchdogs(void)
159 touch_all_watchdog();
162 void touch_softlockup_watchdog_sync(void)
164 __raw_get_cpu_var(softlockup_touch_sync
) = true;
165 __raw_get_cpu_var(watchdog_touch_ts
) = 0;
168 void softlockup_tick(void)
172 #ifdef CONFIG_PERF_EVENTS_NMI
173 /* watchdog detector functions */
174 static int is_hardlockup(int cpu
)
176 unsigned long hrint
= per_cpu(hrtimer_interrupts
, cpu
);
178 if (per_cpu(hrtimer_interrupts_saved
, cpu
) == hrint
)
181 per_cpu(hrtimer_interrupts_saved
, cpu
) = hrint
;
186 static int is_softlockup(unsigned long touch_ts
, int cpu
)
188 unsigned long now
= get_timestamp(cpu
);
190 /* Warn about unreasonable delays: */
191 if (time_after(now
, touch_ts
+ softlockup_thresh
))
192 return now
- touch_ts
;
198 watchdog_panic(struct notifier_block
*this, unsigned long event
, void *ptr
)
205 static struct notifier_block panic_block
= {
206 .notifier_call
= watchdog_panic
,
209 #ifdef CONFIG_PERF_EVENTS_NMI
210 static struct perf_event_attr wd_hw_attr
= {
211 .type
= PERF_TYPE_HARDWARE
,
212 .config
= PERF_COUNT_HW_CPU_CYCLES
,
213 .size
= sizeof(struct perf_event_attr
),
218 /* Callback function for perf event subsystem */
219 void watchdog_overflow_callback(struct perf_event
*event
, int nmi
,
220 struct perf_sample_data
*data
,
221 struct pt_regs
*regs
)
223 int this_cpu
= smp_processor_id();
224 unsigned long touch_ts
= per_cpu(watchdog_touch_ts
, this_cpu
);
231 /* check for a hardlockup
232 * This is done by making sure our timer interrupt
233 * is incrementing. The timer interrupt should have
234 * fired multiple times before we overflow'd. If it hasn't
235 * then this is a good indication the cpu is stuck
237 if (is_hardlockup(this_cpu
)) {
238 /* only print hardlockups once */
239 if (__get_cpu_var(hard_watchdog_warn
) == true)
242 if (hardlockup_panic
)
243 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu
);
245 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu
);
247 __get_cpu_var(hard_watchdog_warn
) = true;
251 __get_cpu_var(hard_watchdog_warn
) = false;
254 static void watchdog_interrupt_count(void)
256 __get_cpu_var(hrtimer_interrupts
)++;
259 static inline void watchdog_interrupt_count(void) { return; }
260 #endif /* CONFIG_PERF_EVENTS_NMI */
262 /* watchdog kicker functions */
263 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
265 int this_cpu
= smp_processor_id();
266 unsigned long touch_ts
= __get_cpu_var(watchdog_touch_ts
);
267 struct pt_regs
*regs
= get_irq_regs();
270 /* kick the hardlockup detector */
271 watchdog_interrupt_count();
273 /* kick the softlockup detector */
274 wake_up_process(__get_cpu_var(softlockup_watchdog
));
277 hrtimer_forward_now(hrtimer
, ns_to_ktime(get_sample_period()));
280 if (unlikely(per_cpu(softlockup_touch_sync
, this_cpu
))) {
282 * If the time stamp was touched atomically
283 * make sure the scheduler tick is up to date.
285 per_cpu(softlockup_touch_sync
, this_cpu
) = false;
289 return HRTIMER_RESTART
;
292 /* check for a softlockup
293 * This is done by making sure a high priority task is
294 * being scheduled. The task touches the watchdog to
295 * indicate it is getting cpu time. If it hasn't then
296 * this is a good indication some task is hogging the cpu
298 duration
= is_softlockup(touch_ts
, this_cpu
);
299 if (unlikely(duration
)) {
301 if (__get_cpu_var(soft_watchdog_warn
) == true)
302 return HRTIMER_RESTART
;
304 printk(KERN_ERR
"BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
306 current
->comm
, task_pid_nr(current
));
308 print_irqtrace_events(current
);
314 if (softlockup_panic
)
315 panic("softlockup: hung tasks");
316 __get_cpu_var(soft_watchdog_warn
) = true;
318 __get_cpu_var(soft_watchdog_warn
) = false;
320 return HRTIMER_RESTART
;
325 * The watchdog thread - touches the timestamp.
327 static int watchdog(void *__bind_cpu
)
329 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
330 struct hrtimer
*hrtimer
= &per_cpu(watchdog_hrtimer
, (unsigned long)__bind_cpu
);
332 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
334 /* initialize timestamp */
337 /* kick off the timer for the hardlockup detector */
338 /* done here because hrtimer_start can only pin to smp_processor_id() */
339 hrtimer_start(hrtimer
, ns_to_ktime(get_sample_period()),
340 HRTIMER_MODE_REL_PINNED
);
342 set_current_state(TASK_INTERRUPTIBLE
);
344 * Run briefly once per second to reset the softlockup timestamp.
345 * If this gets delayed for more than 60 seconds then the
346 * debug-printout triggers in softlockup_tick().
348 while (!kthread_should_stop()) {
352 if (kthread_should_stop())
355 set_current_state(TASK_INTERRUPTIBLE
);
357 __set_current_state(TASK_RUNNING
);
363 #ifdef CONFIG_PERF_EVENTS_NMI
364 static int watchdog_nmi_enable(int cpu
)
366 struct perf_event_attr
*wd_attr
;
367 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
369 /* is it already setup and enabled? */
370 if (event
&& event
->state
> PERF_EVENT_STATE_OFF
)
373 /* it is setup but not enabled */
377 /* Try to register using hardware perf events */
378 wd_attr
= &wd_hw_attr
;
379 wd_attr
->sample_period
= hw_nmi_get_sample_period();
380 event
= perf_event_create_kernel_counter(wd_attr
, cpu
, -1, watchdog_overflow_callback
);
381 if (!IS_ERR(event
)) {
382 printk(KERN_INFO
"NMI watchdog enabled, takes one hw-pmu counter.\n");
386 printk(KERN_ERR
"NMI watchdog failed to create perf event on cpu%i: %p\n", cpu
, event
);
391 per_cpu(watchdog_ev
, cpu
) = event
;
393 perf_event_enable(per_cpu(watchdog_ev
, cpu
));
398 static void watchdog_nmi_disable(int cpu
)
400 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
403 perf_event_disable(event
);
404 per_cpu(watchdog_ev
, cpu
) = NULL
;
406 /* should be in cleanup, but blocks oprofile */
407 perf_event_release_kernel(event
);
412 static int watchdog_nmi_enable(int cpu
) { return 0; }
413 static void watchdog_nmi_disable(int cpu
) { return; }
414 #endif /* CONFIG_PERF_EVENTS_NMI */
416 /* prepare/enable/disable routines */
417 static int watchdog_prepare_cpu(int cpu
)
419 struct hrtimer
*hrtimer
= &per_cpu(watchdog_hrtimer
, cpu
);
421 WARN_ON(per_cpu(softlockup_watchdog
, cpu
));
422 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
423 hrtimer
->function
= watchdog_timer_fn
;
428 static int watchdog_enable(int cpu
)
430 struct task_struct
*p
= per_cpu(softlockup_watchdog
, cpu
);
432 /* enable the perf event */
433 if (watchdog_nmi_enable(cpu
) != 0)
436 /* create the watchdog thread */
438 p
= kthread_create(watchdog
, (void *)(unsigned long)cpu
, "watchdog/%d", cpu
);
440 printk(KERN_ERR
"softlockup watchdog for %i failed\n", cpu
);
443 kthread_bind(p
, cpu
);
444 per_cpu(watchdog_touch_ts
, cpu
) = 0;
445 per_cpu(softlockup_watchdog
, cpu
) = p
;
452 static void watchdog_disable(int cpu
)
454 struct task_struct
*p
= per_cpu(softlockup_watchdog
, cpu
);
455 struct hrtimer
*hrtimer
= &per_cpu(watchdog_hrtimer
, cpu
);
458 * cancel the timer first to stop incrementing the stats
459 * and waking up the kthread
461 hrtimer_cancel(hrtimer
);
463 /* disable the perf event */
464 watchdog_nmi_disable(cpu
);
466 /* stop the watchdog thread */
468 per_cpu(softlockup_watchdog
, cpu
) = NULL
;
472 /* if any cpu succeeds, watchdog is considered enabled for the system */
473 watchdog_enabled
= 1;
476 static void watchdog_enable_all_cpus(void)
481 for_each_online_cpu(cpu
)
482 result
+= watchdog_enable(cpu
);
485 printk(KERN_ERR
"watchdog: failed to be enabled on some cpus\n");
489 static void watchdog_disable_all_cpus(void)
493 for_each_online_cpu(cpu
)
494 watchdog_disable(cpu
);
496 /* if all watchdogs are disabled, then they are disabled for the system */
497 watchdog_enabled
= 0;
501 /* sysctl functions */
504 * proc handler for /proc/sys/kernel/nmi_watchdog
507 int proc_dowatchdog_enabled(struct ctl_table
*table
, int write
,
508 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
510 proc_dointvec(table
, write
, buffer
, length
, ppos
);
512 if (watchdog_enabled
)
513 watchdog_enable_all_cpus();
515 watchdog_disable_all_cpus();
519 int proc_dowatchdog_thresh(struct ctl_table
*table
, int write
,
521 size_t *lenp
, loff_t
*ppos
)
523 return proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
527 int proc_dosoftlockup_thresh(struct ctl_table
*table
, int write
,
529 size_t *lenp
, loff_t
*ppos
)
531 return proc_dowatchdog_thresh(table
, write
, buffer
, lenp
, ppos
);
533 /* end of stub functions */
534 #endif /* CONFIG_SYSCTL */
538 * Create/destroy watchdog threads as CPUs come and go:
541 cpu_callback(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
543 int hotcpu
= (unsigned long)hcpu
;
547 case CPU_UP_PREPARE_FROZEN
:
548 if (watchdog_prepare_cpu(hotcpu
))
552 case CPU_ONLINE_FROZEN
:
553 if (watchdog_enable(hotcpu
))
556 #ifdef CONFIG_HOTPLUG_CPU
557 case CPU_UP_CANCELED
:
558 case CPU_UP_CANCELED_FROZEN
:
559 watchdog_disable(hotcpu
);
562 case CPU_DEAD_FROZEN
:
563 watchdog_disable(hotcpu
);
565 #endif /* CONFIG_HOTPLUG_CPU */
570 static struct notifier_block __cpuinitdata cpu_nfb
= {
571 .notifier_call
= cpu_callback
574 static int __init
spawn_watchdog_task(void)
576 void *cpu
= (void *)(long)smp_processor_id();
582 err
= cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
583 WARN_ON(err
== NOTIFY_BAD
);
585 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
586 register_cpu_notifier(&cpu_nfb
);
588 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);
592 early_initcall(spawn_watchdog_task
);