2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * Handle hardware traps and faults.
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/nmi.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/hardirq.h>
20 #include <linux/slab.h>
21 #include <linux/export.h>
23 #if defined(CONFIG_EDAC)
24 #include <linux/edac.h>
27 #include <linux/atomic.h>
28 #include <asm/traps.h>
29 #include <asm/mach_traps.h>
31 #include <asm/x86_init.h>
35 struct list_head head
;
38 static struct nmi_desc nmi_desc
[NMI_MAX
] =
41 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[0].lock
),
42 .head
= LIST_HEAD_INIT(nmi_desc
[0].head
),
45 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[1].lock
),
46 .head
= LIST_HEAD_INIT(nmi_desc
[1].head
),
49 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[2].lock
),
50 .head
= LIST_HEAD_INIT(nmi_desc
[2].head
),
53 .lock
= __SPIN_LOCK_UNLOCKED(&nmi_desc
[3].lock
),
54 .head
= LIST_HEAD_INIT(nmi_desc
[3].head
),
62 unsigned int external
;
66 static DEFINE_PER_CPU(struct nmi_stats
, nmi_stats
);
68 static int ignore_nmis
;
70 int unknown_nmi_panic
;
72 * Prevent NMI reason port (0x61) being accessed simultaneously, can
73 * only be used in NMI handler.
75 static DEFINE_RAW_SPINLOCK(nmi_reason_lock
);
77 static int __init
setup_unknown_nmi_panic(char *str
)
79 unknown_nmi_panic
= 1;
82 __setup("unknown_nmi_panic", setup_unknown_nmi_panic
);
84 #define nmi_to_desc(type) (&nmi_desc[type])
86 static u64 nmi_longest_ns
= 1 * NSEC_PER_MSEC
;
87 static int __init
nmi_warning_debugfs(void)
89 debugfs_create_u64("nmi_longest_ns", 0644,
90 arch_debugfs_dir
, &nmi_longest_ns
);
93 fs_initcall(nmi_warning_debugfs
);
95 static int __kprobes
nmi_handle(unsigned int type
, struct pt_regs
*regs
, bool b2b
)
97 struct nmi_desc
*desc
= nmi_to_desc(type
);
104 * NMIs are edge-triggered, which means if you have enough
105 * of them concurrently, you can lose some because only one
106 * can be latched at any given time. Walk the whole list
107 * to handle those situations.
109 list_for_each_entry_rcu(a
, &desc
->head
, list
) {
110 u64 before
, delta
, whole_msecs
;
113 before
= local_clock();
114 handled
+= a
->handler(type
, regs
);
115 delta
= local_clock() - before
;
117 if (delta
< nmi_longest_ns
)
120 nmi_longest_ns
= delta
;
121 whole_msecs
= do_div(delta
, (1000 * 1000));
122 decimal_msecs
= do_div(delta
, 1000) % 1000;
123 printk_ratelimited(KERN_INFO
124 "INFO: NMI handler (%ps) took too long to run: "
125 "%lld.%03d msecs\n", a
->handler
, whole_msecs
,
131 /* return total number of NMI events handled */
135 int __register_nmi_handler(unsigned int type
, struct nmiaction
*action
)
137 struct nmi_desc
*desc
= nmi_to_desc(type
);
140 if (!action
->handler
)
143 spin_lock_irqsave(&desc
->lock
, flags
);
146 * most handlers of type NMI_UNKNOWN never return because
147 * they just assume the NMI is theirs. Just a sanity check
148 * to manage expectations
150 WARN_ON_ONCE(type
== NMI_UNKNOWN
&& !list_empty(&desc
->head
));
151 WARN_ON_ONCE(type
== NMI_SERR
&& !list_empty(&desc
->head
));
152 WARN_ON_ONCE(type
== NMI_IO_CHECK
&& !list_empty(&desc
->head
));
155 * some handlers need to be executed first otherwise a fake
156 * event confuses some handlers (kdump uses this flag)
158 if (action
->flags
& NMI_FLAG_FIRST
)
159 list_add_rcu(&action
->list
, &desc
->head
);
161 list_add_tail_rcu(&action
->list
, &desc
->head
);
163 spin_unlock_irqrestore(&desc
->lock
, flags
);
166 EXPORT_SYMBOL(__register_nmi_handler
);
168 void unregister_nmi_handler(unsigned int type
, const char *name
)
170 struct nmi_desc
*desc
= nmi_to_desc(type
);
174 spin_lock_irqsave(&desc
->lock
, flags
);
176 list_for_each_entry_rcu(n
, &desc
->head
, list
) {
178 * the name passed in to describe the nmi handler
179 * is used as the lookup key
181 if (!strcmp(n
->name
, name
)) {
183 "Trying to free NMI (%s) from NMI context!\n", n
->name
);
184 list_del_rcu(&n
->list
);
189 spin_unlock_irqrestore(&desc
->lock
, flags
);
192 EXPORT_SYMBOL_GPL(unregister_nmi_handler
);
194 static __kprobes
void
195 pci_serr_error(unsigned char reason
, struct pt_regs
*regs
)
197 /* check to see if anyone registered against these types of errors */
198 if (nmi_handle(NMI_SERR
, regs
, false))
201 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
202 reason
, smp_processor_id());
205 * On some machines, PCI SERR line is used to report memory
206 * errors. EDAC makes use of it.
208 #if defined(CONFIG_EDAC)
209 if (edac_handler_set()) {
210 edac_atomic_assert_error();
215 if (panic_on_unrecovered_nmi
)
216 panic("NMI: Not continuing");
218 pr_emerg("Dazed and confused, but trying to continue\n");
220 /* Clear and disable the PCI SERR error line. */
221 reason
= (reason
& NMI_REASON_CLEAR_MASK
) | NMI_REASON_CLEAR_SERR
;
222 outb(reason
, NMI_REASON_PORT
);
225 static __kprobes
void
226 io_check_error(unsigned char reason
, struct pt_regs
*regs
)
230 /* check to see if anyone registered against these types of errors */
231 if (nmi_handle(NMI_IO_CHECK
, regs
, false))
235 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
236 reason
, smp_processor_id());
240 panic("NMI IOCK error: Not continuing");
242 /* Re-enable the IOCK line, wait for a few seconds */
243 reason
= (reason
& NMI_REASON_CLEAR_MASK
) | NMI_REASON_CLEAR_IOCHK
;
244 outb(reason
, NMI_REASON_PORT
);
248 touch_nmi_watchdog();
252 reason
&= ~NMI_REASON_CLEAR_IOCHK
;
253 outb(reason
, NMI_REASON_PORT
);
256 static __kprobes
void
257 unknown_nmi_error(unsigned char reason
, struct pt_regs
*regs
)
262 * Use 'false' as back-to-back NMIs are dealt with one level up.
263 * Of course this makes having multiple 'unknown' handlers useless
264 * as only the first one is ever run (unless it can actually determine
265 * if it caused the NMI)
267 handled
= nmi_handle(NMI_UNKNOWN
, regs
, false);
269 __this_cpu_add(nmi_stats
.unknown
, handled
);
273 __this_cpu_add(nmi_stats
.unknown
, 1);
275 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
276 reason
, smp_processor_id());
278 pr_emerg("Do you have a strange power saving mode enabled?\n");
279 if (unknown_nmi_panic
|| panic_on_unrecovered_nmi
)
280 panic("NMI: Not continuing");
282 pr_emerg("Dazed and confused, but trying to continue\n");
285 static DEFINE_PER_CPU(bool, swallow_nmi
);
286 static DEFINE_PER_CPU(unsigned long, last_nmi_rip
);
288 static __kprobes
void default_do_nmi(struct pt_regs
*regs
)
290 unsigned char reason
= 0;
295 * CPU-specific NMI must be processed before non-CPU-specific
296 * NMI, otherwise we may lose it, because the CPU-specific
297 * NMI can not be detected/processed on other CPUs.
301 * Back-to-back NMIs are interesting because they can either
302 * be two NMI or more than two NMIs (any thing over two is dropped
303 * due to NMI being edge-triggered). If this is the second half
304 * of the back-to-back NMI, assume we dropped things and process
305 * more handlers. Otherwise reset the 'swallow' NMI behaviour
307 if (regs
->ip
== __this_cpu_read(last_nmi_rip
))
310 __this_cpu_write(swallow_nmi
, false);
312 __this_cpu_write(last_nmi_rip
, regs
->ip
);
314 handled
= nmi_handle(NMI_LOCAL
, regs
, b2b
);
315 __this_cpu_add(nmi_stats
.normal
, handled
);
318 * There are cases when a NMI handler handles multiple
319 * events in the current NMI. One of these events may
320 * be queued for in the next NMI. Because the event is
321 * already handled, the next NMI will result in an unknown
322 * NMI. Instead lets flag this for a potential NMI to
326 __this_cpu_write(swallow_nmi
, true);
330 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
331 raw_spin_lock(&nmi_reason_lock
);
332 reason
= x86_platform
.get_nmi_reason();
334 if (reason
& NMI_REASON_MASK
) {
335 if (reason
& NMI_REASON_SERR
)
336 pci_serr_error(reason
, regs
);
337 else if (reason
& NMI_REASON_IOCHK
)
338 io_check_error(reason
, regs
);
341 * Reassert NMI in case it became active
342 * meanwhile as it's edge-triggered:
346 __this_cpu_add(nmi_stats
.external
, 1);
347 raw_spin_unlock(&nmi_reason_lock
);
350 raw_spin_unlock(&nmi_reason_lock
);
353 * Only one NMI can be latched at a time. To handle
354 * this we may process multiple nmi handlers at once to
355 * cover the case where an NMI is dropped. The downside
356 * to this approach is we may process an NMI prematurely,
357 * while its real NMI is sitting latched. This will cause
358 * an unknown NMI on the next run of the NMI processing.
360 * We tried to flag that condition above, by setting the
361 * swallow_nmi flag when we process more than one event.
362 * This condition is also only present on the second half
363 * of a back-to-back NMI, so we flag that condition too.
365 * If both are true, we assume we already processed this
366 * NMI previously and we swallow it. Otherwise we reset
369 * There are scenarios where we may accidentally swallow
370 * a 'real' unknown NMI. For example, while processing
371 * a perf NMI another perf NMI comes in along with a
372 * 'real' unknown NMI. These two NMIs get combined into
373 * one (as descibed above). When the next NMI gets
374 * processed, it will be flagged by perf as handled, but
375 * noone will know that there was a 'real' unknown NMI sent
376 * also. As a result it gets swallowed. Or if the first
377 * perf NMI returns two events handled then the second
378 * NMI will get eaten by the logic below, again losing a
379 * 'real' unknown NMI. But this is the best we can do
382 if (b2b
&& __this_cpu_read(swallow_nmi
))
383 __this_cpu_add(nmi_stats
.swallow
, 1);
385 unknown_nmi_error(reason
, regs
);
389 * NMIs can hit breakpoints which will cause it to lose its
390 * NMI context with the CPU when the breakpoint does an iret.
394 * For i386, NMIs use the same stack as the kernel, and we can
395 * add a workaround to the iret problem in C (preventing nested
396 * NMIs if an NMI takes a trap). Simply have 3 states the NMI
403 * When no NMI is in progress, it is in the "not running" state.
404 * When an NMI comes in, it goes into the "executing" state.
405 * Normally, if another NMI is triggered, it does not interrupt
406 * the running NMI and the HW will simply latch it so that when
407 * the first NMI finishes, it will restart the second NMI.
408 * (Note, the latch is binary, thus multiple NMIs triggering,
409 * when one is running, are ignored. Only one NMI is restarted.)
411 * If an NMI hits a breakpoint that executes an iret, another
412 * NMI can preempt it. We do not want to allow this new NMI
413 * to run, but we want to execute it when the first one finishes.
414 * We set the state to "latched", and the exit of the first NMI will
415 * perform a dec_return, if the result is zero (NOT_RUNNING), then
416 * it will simply exit the NMI handler. If not, the dec_return
417 * would have set the state to NMI_EXECUTING (what we want it to
418 * be when we are running). In this case, we simply jump back
419 * to rerun the NMI handler again, and restart the 'latched' NMI.
421 * No trap (breakpoint or page fault) should be hit before nmi_restart,
422 * thus there is no race between the first check of state for NOT_RUNNING
423 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
426 * In case the NMI takes a page fault, we need to save off the CR2
427 * because the NMI could have preempted another page fault and corrupt
428 * the CR2 that is about to be read. As nested NMIs must be restarted
429 * and they can not take breakpoints or page faults, the update of the
430 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
431 * Otherwise, there would be a race of another nested NMI coming in
432 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
439 static DEFINE_PER_CPU(enum nmi_states
, nmi_state
);
440 static DEFINE_PER_CPU(unsigned long, nmi_cr2
);
442 #define nmi_nesting_preprocess(regs) \
444 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
445 this_cpu_write(nmi_state, NMI_LATCHED); \
448 this_cpu_write(nmi_state, NMI_EXECUTING); \
449 this_cpu_write(nmi_cr2, read_cr2()); \
453 #define nmi_nesting_postprocess() \
455 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
456 write_cr2(this_cpu_read(nmi_cr2)); \
457 if (this_cpu_dec_return(nmi_state)) \
462 * In x86_64 things are a bit more difficult. This has the same problem
463 * where an NMI hitting a breakpoint that calls iret will remove the
464 * NMI context, allowing a nested NMI to enter. What makes this more
465 * difficult is that both NMIs and breakpoints have their own stack.
466 * When a new NMI or breakpoint is executed, the stack is set to a fixed
467 * point. If an NMI is nested, it will have its stack set at that same
468 * fixed address that the first NMI had, and will start corrupting the
469 * stack. This is handled in entry_64.S, but the same problem exists with
470 * the breakpoint stack.
472 * If a breakpoint is being processed, and the debug stack is being used,
473 * if an NMI comes in and also hits a breakpoint, the stack pointer
474 * will be set to the same fixed address as the breakpoint that was
475 * interrupted, causing that stack to be corrupted. To handle this case,
476 * check if the stack that was interrupted is the debug stack, and if
477 * so, change the IDT so that new breakpoints will use the current stack
478 * and not switch to the fixed address. On return of the NMI, switch back
479 * to the original IDT.
481 static DEFINE_PER_CPU(int, update_debug_stack
);
483 static inline void nmi_nesting_preprocess(struct pt_regs
*regs
)
486 * If we interrupted a breakpoint, it is possible that
487 * the nmi handler will have breakpoints too. We need to
488 * change the IDT such that breakpoints that happen here
489 * continue to use the NMI stack.
491 if (unlikely(is_debug_stack(regs
->sp
))) {
492 debug_stack_set_zero();
493 this_cpu_write(update_debug_stack
, 1);
497 static inline void nmi_nesting_postprocess(void)
499 if (unlikely(this_cpu_read(update_debug_stack
))) {
501 this_cpu_write(update_debug_stack
, 0);
506 dotraplinkage notrace __kprobes
void
507 do_nmi(struct pt_regs
*regs
, long error_code
)
509 nmi_nesting_preprocess(regs
);
513 inc_irq_stat(__nmi_count
);
516 default_do_nmi(regs
);
520 /* On i386, may loop back to preprocess */
521 nmi_nesting_postprocess();
529 void restart_nmi(void)
534 /* reset the back-to-back NMI logic */
535 void local_touch_nmi(void)
537 __this_cpu_write(last_nmi_rip
, 0);
539 EXPORT_SYMBOL_GPL(local_touch_nmi
);