2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * Handle hardware traps and faults.
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/sched/debug.h>
17 #include <linux/nmi.h>
18 #include <linux/debugfs.h>
19 #include <linux/delay.h>
20 #include <linux/hardirq.h>
21 #include <linux/ratelimit.h>
22 #include <linux/slab.h>
23 #include <linux/export.h>
24 #include <linux/sched/clock.h>
26 #if defined(CONFIG_EDAC)
27 #include <linux/edac.h>
30 #include <linux/atomic.h>
31 #include <asm/traps.h>
32 #include <asm/mach_traps.h>
34 #include <asm/x86_init.h>
35 #include <asm/reboot.h>
36 #include <asm/cache.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/nmi.h>
43 struct list_head head
;
46 static struct nmi_desc nmi_desc
[NMI_MAX
] =
49 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[0].lock
),
50 .head
= LIST_HEAD_INIT(nmi_desc
[0].head
),
53 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[1].lock
),
54 .head
= LIST_HEAD_INIT(nmi_desc
[1].head
),
57 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[2].lock
),
58 .head
= LIST_HEAD_INIT(nmi_desc
[2].head
),
61 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[3].lock
),
62 .head
= LIST_HEAD_INIT(nmi_desc
[3].head
),
70 unsigned int external
;
74 static DEFINE_PER_CPU(struct nmi_stats
, nmi_stats
);
76 static int ignore_nmis __read_mostly
;
78 int unknown_nmi_panic
;
80 * Prevent NMI reason port (0x61) being accessed simultaneously, can
81 * only be used in NMI handler.
83 static DEFINE_RAW_SPINLOCK(nmi_reason_lock
);
85 static int __init
setup_unknown_nmi_panic(char *str
)
87 unknown_nmi_panic
= 1;
90 __setup("unknown_nmi_panic", setup_unknown_nmi_panic
);
92 #define nmi_to_desc(type) (&nmi_desc[type])
94 static u64 nmi_longest_ns
= 1 * NSEC_PER_MSEC
;
96 static int __init
nmi_warning_debugfs(void)
98 debugfs_create_u64("nmi_longest_ns", 0644,
99 arch_debugfs_dir
, &nmi_longest_ns
);
102 fs_initcall(nmi_warning_debugfs
);
104 static void nmi_max_handler(struct irq_work
*w
)
106 struct nmiaction
*a
= container_of(w
, struct nmiaction
, irq_work
);
107 int remainder_ns
, decimal_msecs
;
108 u64 whole_msecs
= ACCESS_ONCE(a
->max_duration
);
110 remainder_ns
= do_div(whole_msecs
, (1000 * 1000));
111 decimal_msecs
= remainder_ns
/ 1000;
113 printk_ratelimited(KERN_INFO
114 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
115 a
->handler
, whole_msecs
, decimal_msecs
);
118 static int nmi_handle(unsigned int type
, struct pt_regs
*regs
)
120 struct nmi_desc
*desc
= nmi_to_desc(type
);
127 * NMIs are edge-triggered, which means if you have enough
128 * of them concurrently, you can lose some because only one
129 * can be latched at any given time. Walk the whole list
130 * to handle those situations.
132 list_for_each_entry_rcu(a
, &desc
->head
, list
) {
136 delta
= sched_clock();
137 thishandled
= a
->handler(type
, regs
);
138 handled
+= thishandled
;
139 delta
= sched_clock() - delta
;
140 trace_nmi_handler(a
->handler
, (int)delta
, thishandled
);
142 if (delta
< nmi_longest_ns
|| delta
< a
->max_duration
)
145 a
->max_duration
= delta
;
146 irq_work_queue(&a
->irq_work
);
151 /* return total number of NMI events handled */
154 NOKPROBE_SYMBOL(nmi_handle
);
156 int __register_nmi_handler(unsigned int type
, struct nmiaction
*action
)
158 struct nmi_desc
*desc
= nmi_to_desc(type
);
161 if (!action
->handler
)
164 init_irq_work(&action
->irq_work
, nmi_max_handler
);
166 spin_lock_irqsave(&desc
->lock
, flags
);
169 * most handlers of type NMI_UNKNOWN never return because
170 * they just assume the NMI is theirs. Just a sanity check
171 * to manage expectations
173 WARN_ON_ONCE(type
== NMI_UNKNOWN
&& !list_empty(&desc
->head
));
174 WARN_ON_ONCE(type
== NMI_SERR
&& !list_empty(&desc
->head
));
175 WARN_ON_ONCE(type
== NMI_IO_CHECK
&& !list_empty(&desc
->head
));
178 * some handlers need to be executed first otherwise a fake
179 * event confuses some handlers (kdump uses this flag)
181 if (action
->flags
& NMI_FLAG_FIRST
)
182 list_add_rcu(&action
->list
, &desc
->head
);
184 list_add_tail_rcu(&action
->list
, &desc
->head
);
186 spin_unlock_irqrestore(&desc
->lock
, flags
);
189 EXPORT_SYMBOL(__register_nmi_handler
);
191 void unregister_nmi_handler(unsigned int type
, const char *name
)
193 struct nmi_desc
*desc
= nmi_to_desc(type
);
197 spin_lock_irqsave(&desc
->lock
, flags
);
199 list_for_each_entry_rcu(n
, &desc
->head
, list
) {
201 * the name passed in to describe the nmi handler
202 * is used as the lookup key
204 if (!strcmp(n
->name
, name
)) {
206 "Trying to free NMI (%s) from NMI context!\n", n
->name
);
207 list_del_rcu(&n
->list
);
212 spin_unlock_irqrestore(&desc
->lock
, flags
);
215 EXPORT_SYMBOL_GPL(unregister_nmi_handler
);
218 pci_serr_error(unsigned char reason
, struct pt_regs
*regs
)
220 /* check to see if anyone registered against these types of errors */
221 if (nmi_handle(NMI_SERR
, regs
))
224 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
225 reason
, smp_processor_id());
228 * On some machines, PCI SERR line is used to report memory
229 * errors. EDAC makes use of it.
231 #if defined(CONFIG_EDAC)
232 if (edac_handler_set()) {
233 edac_atomic_assert_error();
238 if (panic_on_unrecovered_nmi
)
239 nmi_panic(regs
, "NMI: Not continuing");
241 pr_emerg("Dazed and confused, but trying to continue\n");
243 /* Clear and disable the PCI SERR error line. */
244 reason
= (reason
& NMI_REASON_CLEAR_MASK
) | NMI_REASON_CLEAR_SERR
;
245 outb(reason
, NMI_REASON_PORT
);
247 NOKPROBE_SYMBOL(pci_serr_error
);
250 io_check_error(unsigned char reason
, struct pt_regs
*regs
)
254 /* check to see if anyone registered against these types of errors */
255 if (nmi_handle(NMI_IO_CHECK
, regs
))
259 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
260 reason
, smp_processor_id());
263 if (panic_on_io_nmi
) {
264 nmi_panic(regs
, "NMI IOCK error: Not continuing");
267 * If we end up here, it means we have received an NMI while
268 * processing panic(). Simply return without delaying and
274 /* Re-enable the IOCK line, wait for a few seconds */
275 reason
= (reason
& NMI_REASON_CLEAR_MASK
) | NMI_REASON_CLEAR_IOCHK
;
276 outb(reason
, NMI_REASON_PORT
);
280 touch_nmi_watchdog();
284 reason
&= ~NMI_REASON_CLEAR_IOCHK
;
285 outb(reason
, NMI_REASON_PORT
);
287 NOKPROBE_SYMBOL(io_check_error
);
290 unknown_nmi_error(unsigned char reason
, struct pt_regs
*regs
)
295 * Use 'false' as back-to-back NMIs are dealt with one level up.
296 * Of course this makes having multiple 'unknown' handlers useless
297 * as only the first one is ever run (unless it can actually determine
298 * if it caused the NMI)
300 handled
= nmi_handle(NMI_UNKNOWN
, regs
);
302 __this_cpu_add(nmi_stats
.unknown
, handled
);
306 __this_cpu_add(nmi_stats
.unknown
, 1);
308 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
309 reason
, smp_processor_id());
311 pr_emerg("Do you have a strange power saving mode enabled?\n");
312 if (unknown_nmi_panic
|| panic_on_unrecovered_nmi
)
313 nmi_panic(regs
, "NMI: Not continuing");
315 pr_emerg("Dazed and confused, but trying to continue\n");
317 NOKPROBE_SYMBOL(unknown_nmi_error
);
319 static DEFINE_PER_CPU(bool, swallow_nmi
);
320 static DEFINE_PER_CPU(unsigned long, last_nmi_rip
);
322 static void default_do_nmi(struct pt_regs
*regs
)
324 unsigned char reason
= 0;
329 * CPU-specific NMI must be processed before non-CPU-specific
330 * NMI, otherwise we may lose it, because the CPU-specific
331 * NMI can not be detected/processed on other CPUs.
335 * Back-to-back NMIs are interesting because they can either
336 * be two NMI or more than two NMIs (any thing over two is dropped
337 * due to NMI being edge-triggered). If this is the second half
338 * of the back-to-back NMI, assume we dropped things and process
339 * more handlers. Otherwise reset the 'swallow' NMI behaviour
341 if (regs
->ip
== __this_cpu_read(last_nmi_rip
))
344 __this_cpu_write(swallow_nmi
, false);
346 __this_cpu_write(last_nmi_rip
, regs
->ip
);
348 handled
= nmi_handle(NMI_LOCAL
, regs
);
349 __this_cpu_add(nmi_stats
.normal
, handled
);
352 * There are cases when a NMI handler handles multiple
353 * events in the current NMI. One of these events may
354 * be queued for in the next NMI. Because the event is
355 * already handled, the next NMI will result in an unknown
356 * NMI. Instead lets flag this for a potential NMI to
360 __this_cpu_write(swallow_nmi
, true);
365 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
367 * Another CPU may be processing panic routines while holding
368 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
369 * and if so, call its callback directly. If there is no CPU preparing
370 * crash dump, we simply loop here.
372 while (!raw_spin_trylock(&nmi_reason_lock
)) {
373 run_crash_ipi_callback(regs
);
377 reason
= x86_platform
.get_nmi_reason();
379 if (reason
& NMI_REASON_MASK
) {
380 if (reason
& NMI_REASON_SERR
)
381 pci_serr_error(reason
, regs
);
382 else if (reason
& NMI_REASON_IOCHK
)
383 io_check_error(reason
, regs
);
386 * Reassert NMI in case it became active
387 * meanwhile as it's edge-triggered:
391 __this_cpu_add(nmi_stats
.external
, 1);
392 raw_spin_unlock(&nmi_reason_lock
);
395 raw_spin_unlock(&nmi_reason_lock
);
398 * Only one NMI can be latched at a time. To handle
399 * this we may process multiple nmi handlers at once to
400 * cover the case where an NMI is dropped. The downside
401 * to this approach is we may process an NMI prematurely,
402 * while its real NMI is sitting latched. This will cause
403 * an unknown NMI on the next run of the NMI processing.
405 * We tried to flag that condition above, by setting the
406 * swallow_nmi flag when we process more than one event.
407 * This condition is also only present on the second half
408 * of a back-to-back NMI, so we flag that condition too.
410 * If both are true, we assume we already processed this
411 * NMI previously and we swallow it. Otherwise we reset
414 * There are scenarios where we may accidentally swallow
415 * a 'real' unknown NMI. For example, while processing
416 * a perf NMI another perf NMI comes in along with a
417 * 'real' unknown NMI. These two NMIs get combined into
418 * one (as descibed above). When the next NMI gets
419 * processed, it will be flagged by perf as handled, but
420 * noone will know that there was a 'real' unknown NMI sent
421 * also. As a result it gets swallowed. Or if the first
422 * perf NMI returns two events handled then the second
423 * NMI will get eaten by the logic below, again losing a
424 * 'real' unknown NMI. But this is the best we can do
427 if (b2b
&& __this_cpu_read(swallow_nmi
))
428 __this_cpu_add(nmi_stats
.swallow
, 1);
430 unknown_nmi_error(reason
, regs
);
432 NOKPROBE_SYMBOL(default_do_nmi
);
435 * NMIs can page fault or hit breakpoints which will cause it to lose
436 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
438 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
439 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
440 * if the outer NMI came from kernel mode, but we can still nest if the
441 * outer NMI came from user mode.
443 * To handle these nested NMIs, we have three states:
449 * When no NMI is in progress, it is in the "not running" state.
450 * When an NMI comes in, it goes into the "executing" state.
451 * Normally, if another NMI is triggered, it does not interrupt
452 * the running NMI and the HW will simply latch it so that when
453 * the first NMI finishes, it will restart the second NMI.
454 * (Note, the latch is binary, thus multiple NMIs triggering,
455 * when one is running, are ignored. Only one NMI is restarted.)
457 * If an NMI executes an iret, another NMI can preempt it. We do not
458 * want to allow this new NMI to run, but we want to execute it when the
459 * first one finishes. We set the state to "latched", and the exit of
460 * the first NMI will perform a dec_return, if the result is zero
461 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
462 * dec_return would have set the state to NMI_EXECUTING (what we want it
463 * to be when we are running). In this case, we simply jump back to
464 * rerun the NMI handler again, and restart the 'latched' NMI.
466 * No trap (breakpoint or page fault) should be hit before nmi_restart,
467 * thus there is no race between the first check of state for NOT_RUNNING
468 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
471 * In case the NMI takes a page fault, we need to save off the CR2
472 * because the NMI could have preempted another page fault and corrupt
473 * the CR2 that is about to be read. As nested NMIs must be restarted
474 * and they can not take breakpoints or page faults, the update of the
475 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
476 * Otherwise, there would be a race of another nested NMI coming in
477 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
484 static DEFINE_PER_CPU(enum nmi_states
, nmi_state
);
485 static DEFINE_PER_CPU(unsigned long, nmi_cr2
);
489 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
490 * some care, the inner breakpoint will clobber the outer breakpoint's
493 * If a breakpoint is being processed, and the debug stack is being
494 * used, if an NMI comes in and also hits a breakpoint, the stack
495 * pointer will be set to the same fixed address as the breakpoint that
496 * was interrupted, causing that stack to be corrupted. To handle this
497 * case, check if the stack that was interrupted is the debug stack, and
498 * if so, change the IDT so that new breakpoints will use the current
499 * stack and not switch to the fixed address. On return of the NMI,
500 * switch back to the original IDT.
502 static DEFINE_PER_CPU(int, update_debug_stack
);
505 dotraplinkage notrace
void
506 do_nmi(struct pt_regs
*regs
, long error_code
)
508 if (this_cpu_read(nmi_state
) != NMI_NOT_RUNNING
) {
509 this_cpu_write(nmi_state
, NMI_LATCHED
);
512 this_cpu_write(nmi_state
, NMI_EXECUTING
);
513 this_cpu_write(nmi_cr2
, read_cr2());
518 * If we interrupted a breakpoint, it is possible that
519 * the nmi handler will have breakpoints too. We need to
520 * change the IDT such that breakpoints that happen here
521 * continue to use the NMI stack.
523 if (unlikely(is_debug_stack(regs
->sp
))) {
524 debug_stack_set_zero();
525 this_cpu_write(update_debug_stack
, 1);
531 inc_irq_stat(__nmi_count
);
534 default_do_nmi(regs
);
539 if (unlikely(this_cpu_read(update_debug_stack
))) {
541 this_cpu_write(update_debug_stack
, 0);
545 if (unlikely(this_cpu_read(nmi_cr2
) != read_cr2()))
546 write_cr2(this_cpu_read(nmi_cr2
));
547 if (this_cpu_dec_return(nmi_state
))
550 NOKPROBE_SYMBOL(do_nmi
);
557 void restart_nmi(void)
562 /* reset the back-to-back NMI logic */
563 void local_touch_nmi(void)
565 __this_cpu_write(last_nmi_rip
, 0);
567 EXPORT_SYMBOL_GPL(local_touch_nmi
);