2 * Detect hard lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
14 #include <linux/nmi.h>
15 #include <linux/module.h>
16 #include <linux/sched/debug.h>
18 #include <asm/irq_regs.h>
19 #include <linux/perf_event.h>
21 static DEFINE_PER_CPU(bool, hard_watchdog_warn
);
22 static DEFINE_PER_CPU(bool, watchdog_nmi_touch
);
23 static DEFINE_PER_CPU(struct perf_event
*, watchdog_ev
);
24 static DEFINE_PER_CPU(struct perf_event
*, dead_event
);
25 static struct cpumask dead_events_mask
;
27 static unsigned long hardlockup_allcpu_dumped
;
28 static bool hardlockup_detector_disabled
;
30 void arch_touch_nmi_watchdog(void)
33 * Using __raw here because some code paths have
34 * preemption enabled. If preemption is enabled
35 * then interrupts should be enabled too, in which
36 * case we shouldn't have to worry about the watchdog
39 raw_cpu_write(watchdog_nmi_touch
, true);
41 EXPORT_SYMBOL(arch_touch_nmi_watchdog
);
43 #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
44 static DEFINE_PER_CPU(ktime_t
, last_timestamp
);
45 static DEFINE_PER_CPU(unsigned int, nmi_rearmed
);
46 static ktime_t watchdog_hrtimer_sample_threshold __read_mostly
;
48 void watchdog_update_hrtimer_threshold(u64 period
)
51 * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
53 * So it runs effectively with 2.5 times the rate of the NMI
54 * watchdog. That means the hrtimer should fire 2-3 times before
55 * the NMI watchdog expires. The NMI watchdog on x86 is based on
56 * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
57 * might run way faster than expected and the NMI fires in a
58 * smaller period than the one deduced from the nominal CPU
59 * frequency. Depending on the Turbo-Mode factor this might be fast
60 * enough to get the NMI period smaller than the hrtimer watchdog
61 * period and trigger false positives.
63 * The sample threshold is used to check in the NMI handler whether
64 * the minimum time between two NMI samples has elapsed. That
65 * prevents false positives.
67 * Set this to 4/5 of the actual watchdog threshold period so the
68 * hrtimer is guaranteed to fire at least once within the real
71 watchdog_hrtimer_sample_threshold
= period
* 2;
74 static bool watchdog_check_timestamp(void)
76 ktime_t delta
, now
= ktime_get_mono_fast_ns();
78 delta
= now
- __this_cpu_read(last_timestamp
);
79 if (delta
< watchdog_hrtimer_sample_threshold
) {
81 * If ktime is jiffies based, a stalled timer would prevent
82 * jiffies from being incremented and the filter would look
83 * at a stale timestamp and never trigger.
85 if (__this_cpu_inc_return(nmi_rearmed
) < 10)
88 __this_cpu_write(nmi_rearmed
, 0);
89 __this_cpu_write(last_timestamp
, now
);
93 static inline bool watchdog_check_timestamp(void)
99 static struct perf_event_attr wd_hw_attr
= {
100 .type
= PERF_TYPE_HARDWARE
,
101 .config
= PERF_COUNT_HW_CPU_CYCLES
,
102 .size
= sizeof(struct perf_event_attr
),
107 /* Callback function for perf event subsystem */
108 static void watchdog_overflow_callback(struct perf_event
*event
,
109 struct perf_sample_data
*data
,
110 struct pt_regs
*regs
)
112 /* Ensure the watchdog never gets throttled */
113 event
->hw
.interrupts
= 0;
115 if (__this_cpu_read(watchdog_nmi_touch
) == true) {
116 __this_cpu_write(watchdog_nmi_touch
, false);
120 if (!watchdog_check_timestamp())
123 /* check for a hardlockup
124 * This is done by making sure our timer interrupt
125 * is incrementing. The timer interrupt should have
126 * fired multiple times before we overflow'd. If it hasn't
127 * then this is a good indication the cpu is stuck
129 if (is_hardlockup()) {
130 int this_cpu
= smp_processor_id();
132 /* only print hardlockups once */
133 if (__this_cpu_read(hard_watchdog_warn
) == true)
136 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu
);
138 print_irqtrace_events(current
);
145 * Perform all-CPU dump only once to avoid multiple hardlockups
146 * generating interleaving traces
148 if (sysctl_hardlockup_all_cpu_backtrace
&&
149 !test_and_set_bit(0, &hardlockup_allcpu_dumped
))
150 trigger_allbutself_cpu_backtrace();
152 if (hardlockup_panic
)
153 nmi_panic(regs
, "Hard LOCKUP");
155 __this_cpu_write(hard_watchdog_warn
, true);
159 __this_cpu_write(hard_watchdog_warn
, false);
164 * People like the simple clean cpu node info on boot.
165 * Reduce the watchdog noise by only printing messages
166 * that are different from what cpu0 displayed.
168 static unsigned long firstcpu_err
;
169 static atomic_t watchdog_cpus
;
171 int watchdog_nmi_enable(unsigned int cpu
)
173 struct perf_event_attr
*wd_attr
;
174 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
177 /* nothing to do if the hard lockup detector is disabled */
178 if (!(watchdog_enabled
& NMI_WATCHDOG_ENABLED
))
181 /* A failure disabled the hardlockup detector permanently */
182 if (hardlockup_detector_disabled
)
185 /* is it already setup and enabled? */
186 if (event
&& event
->state
> PERF_EVENT_STATE_OFF
)
189 /* it is setup but not enabled */
193 if (atomic_inc_return(&watchdog_cpus
) == 1)
196 wd_attr
= &wd_hw_attr
;
197 wd_attr
->sample_period
= hw_nmi_get_sample_period(watchdog_thresh
);
199 /* Try to register using hardware perf events */
200 event
= perf_event_create_kernel_counter(wd_attr
, cpu
, NULL
, watchdog_overflow_callback
, NULL
);
202 /* save the first cpu's error for future comparision */
203 if (firstcpu
&& IS_ERR(event
))
204 firstcpu_err
= PTR_ERR(event
);
206 if (!IS_ERR(event
)) {
207 /* only print for the first cpu initialized */
208 if (firstcpu
|| firstcpu_err
)
209 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
213 /* skip displaying the same error again */
214 if (!firstcpu
&& (PTR_ERR(event
) == firstcpu_err
))
215 return PTR_ERR(event
);
217 /* vary the KERN level based on the returned errno */
218 if (PTR_ERR(event
) == -EOPNOTSUPP
)
219 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu
);
220 else if (PTR_ERR(event
) == -ENOENT
)
221 pr_warn("disabled (cpu%i): hardware events not enabled\n",
224 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
225 cpu
, PTR_ERR(event
));
227 pr_info("Disabling hard lockup detector permanently\n");
228 hardlockup_detector_disabled
= true;
230 return PTR_ERR(event
);
234 per_cpu(watchdog_ev
, cpu
) = event
;
236 perf_event_enable(per_cpu(watchdog_ev
, cpu
));
241 static int hardlockup_detector_event_create(void)
243 unsigned int cpu
= smp_processor_id();
244 struct perf_event_attr
*wd_attr
;
245 struct perf_event
*evt
;
247 wd_attr
= &wd_hw_attr
;
248 wd_attr
->sample_period
= hw_nmi_get_sample_period(watchdog_thresh
);
250 /* Try to register using hardware perf events */
251 evt
= perf_event_create_kernel_counter(wd_attr
, cpu
, NULL
,
252 watchdog_overflow_callback
, NULL
);
254 pr_info("Perf event create on CPU %d failed with %ld\n", cpu
,
258 this_cpu_write(watchdog_ev
, evt
);
263 * hardlockup_detector_perf_enable - Enable the local event
265 void hardlockup_detector_perf_enable(void)
267 if (hardlockup_detector_event_create())
270 perf_event_enable(this_cpu_read(watchdog_ev
));
274 * hardlockup_detector_perf_disable - Disable the local event
276 void hardlockup_detector_perf_disable(void)
278 struct perf_event
*event
= this_cpu_read(watchdog_ev
);
281 perf_event_disable(event
);
282 this_cpu_write(watchdog_ev
, NULL
);
283 this_cpu_write(dead_event
, event
);
284 cpumask_set_cpu(smp_processor_id(), &dead_events_mask
);
286 /* watchdog_nmi_enable() expects this to be zero initially. */
287 if (atomic_dec_and_test(&watchdog_cpus
))
293 * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
295 * Called from lockup_detector_cleanup(). Serialized by the caller.
297 void hardlockup_detector_perf_cleanup(void)
301 for_each_cpu(cpu
, &dead_events_mask
) {
302 struct perf_event
*event
= per_cpu(dead_event
, cpu
);
304 per_cpu(dead_event
, cpu
) = NULL
;
305 perf_event_release_kernel(event
);
307 cpumask_clear(&dead_events_mask
);
311 * hardlockup_detector_perf_stop - Globally stop watchdog events
313 * Special interface for x86 to handle the perf HT bug.
315 void __init
hardlockup_detector_perf_stop(void)
319 lockdep_assert_cpus_held();
321 for_each_online_cpu(cpu
) {
322 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
325 perf_event_disable(event
);
330 * hardlockup_detector_perf_restart - Globally restart watchdog events
332 * Special interface for x86 to handle the perf HT bug.
334 void __init
hardlockup_detector_perf_restart(void)
338 lockdep_assert_cpus_held();
340 if (!(watchdog_enabled
& NMI_WATCHDOG_ENABLED
))
343 for_each_online_cpu(cpu
) {
344 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
347 perf_event_enable(event
);
352 * hardlockup_detector_perf_init - Probe whether NMI event is available at all
354 int __init
hardlockup_detector_perf_init(void)
356 int ret
= hardlockup_detector_event_create();
359 pr_info("Perf NMI watchdog permanetely disabled\n");
361 perf_event_release_kernel(this_cpu_read(watchdog_ev
));
362 this_cpu_write(watchdog_ev
, NULL
);