2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/timer.h>
33 #include <linux/init.h>
34 #include <linux/bug.h>
35 #include <linux/nmi.h>
37 #include <linux/smp.h>
41 #include <linux/ioport.h>
42 #include <linux/eisa.h>
45 #if defined(CONFIG_EDAC)
46 #include <linux/edac.h>
49 #include <asm/kmemcheck.h>
50 #include <asm/stacktrace.h>
51 #include <asm/processor.h>
52 #include <asm/debugreg.h>
53 #include <linux/atomic.h>
54 #include <asm/text-patching.h>
55 #include <asm/ftrace.h>
56 #include <asm/traps.h>
58 #include <asm/fpu/internal.h>
60 #include <asm/fixmap.h>
61 #include <asm/mach_traps.h>
62 #include <asm/alternative.h>
63 #include <asm/fpu/xstate.h>
64 #include <asm/trace/mpx.h>
69 #include <asm/x86_init.h>
70 #include <asm/pgalloc.h>
71 #include <asm/proto.h>
73 /* No need to be aligned, but done to keep all IDTs defined the same way. */
74 gate_desc debug_idt_table
[NR_VECTORS
] __page_aligned_bss
;
76 #include <asm/processor-flags.h>
77 #include <asm/setup.h>
78 #include <asm/proto.h>
81 /* Must be page-aligned because the real IDT is used in a fixmap. */
82 gate_desc idt_table
[NR_VECTORS
] __page_aligned_bss
;
84 DECLARE_BITMAP(used_vectors
, NR_VECTORS
);
85 EXPORT_SYMBOL_GPL(used_vectors
);
87 static inline void cond_local_irq_enable(struct pt_regs
*regs
)
89 if (regs
->flags
& X86_EFLAGS_IF
)
93 static inline void cond_local_irq_disable(struct pt_regs
*regs
)
95 if (regs
->flags
& X86_EFLAGS_IF
)
100 * In IST context, we explicitly disable preemption. This serves two
101 * purposes: it makes it much less likely that we would accidentally
102 * schedule in IST context and it will force a warning if we somehow
103 * manage to schedule by accident.
105 void ist_enter(struct pt_regs
*regs
)
107 if (user_mode(regs
)) {
108 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
111 * We might have interrupted pretty much anything. In
112 * fact, if we're a machine check, we can even interrupt
113 * NMI processing. We don't want in_nmi() to return true,
114 * but we need to notify RCU.
121 /* This code is a bit fragile. Test it. */
122 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
125 void ist_exit(struct pt_regs
*regs
)
127 preempt_enable_no_resched();
129 if (!user_mode(regs
))
134 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
135 * @regs: regs passed to the IST exception handler
137 * IST exception handlers normally cannot schedule. As a special
138 * exception, if the exception interrupted userspace code (i.e.
139 * user_mode(regs) would return true) and the exception was not
140 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
141 * begins a non-atomic section within an ist_enter()/ist_exit() region.
142 * Callers are responsible for enabling interrupts themselves inside
143 * the non-atomic section, and callers must call ist_end_non_atomic()
146 void ist_begin_non_atomic(struct pt_regs
*regs
)
148 BUG_ON(!user_mode(regs
));
151 * Sanity check: we need to be on the normal thread stack. This
152 * will catch asm bugs and any attempt to use ist_preempt_enable
155 BUG_ON((unsigned long)(current_top_of_stack() -
156 current_stack_pointer()) >= THREAD_SIZE
);
158 preempt_enable_no_resched();
162 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
164 * Ends a non-atomic section started with ist_begin_non_atomic().
166 void ist_end_non_atomic(void)
171 static nokprobe_inline
int
172 do_trap_no_signal(struct task_struct
*tsk
, int trapnr
, char *str
,
173 struct pt_regs
*regs
, long error_code
)
175 if (v8086_mode(regs
)) {
177 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
178 * On nmi (interrupt 2), do_trap should not be called.
180 if (trapnr
< X86_TRAP_UD
) {
181 if (!handle_vm86_trap((struct kernel_vm86_regs
*) regs
,
188 if (!user_mode(regs
)) {
189 if (!fixup_exception(regs
, trapnr
)) {
190 tsk
->thread
.error_code
= error_code
;
191 tsk
->thread
.trap_nr
= trapnr
;
192 die(str
, regs
, error_code
);
200 static siginfo_t
*fill_trap_info(struct pt_regs
*regs
, int signr
, int trapnr
,
203 unsigned long siaddr
;
208 return SEND_SIG_PRIV
;
212 siaddr
= uprobe_get_trap_addr(regs
);
216 siaddr
= uprobe_get_trap_addr(regs
);
224 info
->si_signo
= signr
;
226 info
->si_code
= sicode
;
227 info
->si_addr
= (void __user
*)siaddr
;
232 do_trap(int trapnr
, int signr
, char *str
, struct pt_regs
*regs
,
233 long error_code
, siginfo_t
*info
)
235 struct task_struct
*tsk
= current
;
238 if (!do_trap_no_signal(tsk
, trapnr
, str
, regs
, error_code
))
241 * We want error_code and trap_nr set for userspace faults and
242 * kernelspace faults which result in die(), but not
243 * kernelspace faults which are fixed up. die() gives the
244 * process no chance to handle the signal and notice the
245 * kernel fault information, so that won't result in polluting
246 * the information about previously queued, but not yet
247 * delivered, faults. See also do_general_protection below.
249 tsk
->thread
.error_code
= error_code
;
250 tsk
->thread
.trap_nr
= trapnr
;
252 if (show_unhandled_signals
&& unhandled_signal(tsk
, signr
) &&
253 printk_ratelimit()) {
254 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
255 tsk
->comm
, tsk
->pid
, str
,
256 regs
->ip
, regs
->sp
, error_code
);
257 print_vma_addr(" in ", regs
->ip
);
261 force_sig_info(signr
, info
?: SEND_SIG_PRIV
, tsk
);
263 NOKPROBE_SYMBOL(do_trap
);
265 static void do_error_trap(struct pt_regs
*regs
, long error_code
, char *str
,
266 unsigned long trapnr
, int signr
)
270 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
272 if (notify_die(DIE_TRAP
, str
, regs
, error_code
, trapnr
, signr
) !=
274 cond_local_irq_enable(regs
);
275 do_trap(trapnr
, signr
, str
, regs
, error_code
,
276 fill_trap_info(regs
, signr
, trapnr
, &info
));
280 #define DO_ERROR(trapnr, signr, str, name) \
281 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
283 do_error_trap(regs, error_code, str, trapnr, signr); \
286 DO_ERROR(X86_TRAP_DE
, SIGFPE
, "divide error", divide_error
)
287 DO_ERROR(X86_TRAP_OF
, SIGSEGV
, "overflow", overflow
)
288 DO_ERROR(X86_TRAP_UD
, SIGILL
, "invalid opcode", invalid_op
)
289 DO_ERROR(X86_TRAP_OLD_MF
, SIGFPE
, "coprocessor segment overrun",coprocessor_segment_overrun
)
290 DO_ERROR(X86_TRAP_TS
, SIGSEGV
, "invalid TSS", invalid_TSS
)
291 DO_ERROR(X86_TRAP_NP
, SIGBUS
, "segment not present", segment_not_present
)
292 DO_ERROR(X86_TRAP_SS
, SIGBUS
, "stack segment", stack_segment
)
293 DO_ERROR(X86_TRAP_AC
, SIGBUS
, "alignment check", alignment_check
)
295 #ifdef CONFIG_VMAP_STACK
296 __visible
void __noreturn
handle_stack_overflow(const char *message
,
297 struct pt_regs
*regs
,
298 unsigned long fault_address
)
300 printk(KERN_EMERG
"BUG: stack guard page was hit at %p (stack is %p..%p)\n",
301 (void *)fault_address
, current
->stack
,
302 (char *)current
->stack
+ THREAD_SIZE
- 1);
303 die(message
, regs
, 0);
305 /* Be absolutely certain we don't return. */
311 /* Runs on IST stack */
312 dotraplinkage
void do_double_fault(struct pt_regs
*regs
, long error_code
)
314 static const char str
[] = "double fault";
315 struct task_struct
*tsk
= current
;
316 #ifdef CONFIG_VMAP_STACK
320 #ifdef CONFIG_X86_ESPFIX64
321 extern unsigned char native_irq_return_iret
[];
324 * If IRET takes a non-IST fault on the espfix64 stack, then we
325 * end up promoting it to a doublefault. In that case, modify
326 * the stack to make it look like we just entered the #GP
327 * handler from user space, similar to bad_iret.
329 * No need for ist_enter here because we don't use RCU.
331 if (((long)regs
->sp
>> PGDIR_SHIFT
) == ESPFIX_PGD_ENTRY
&&
332 regs
->cs
== __KERNEL_CS
&&
333 regs
->ip
== (unsigned long)native_irq_return_iret
)
335 struct pt_regs
*normal_regs
= task_pt_regs(current
);
337 /* Fake a #GP(0) from userspace. */
338 memmove(&normal_regs
->ip
, (void *)regs
->sp
, 5*8);
339 normal_regs
->orig_ax
= 0; /* Missing (lost) #GP error code */
340 regs
->ip
= (unsigned long)general_protection
;
341 regs
->sp
= (unsigned long)&normal_regs
->orig_ax
;
348 notify_die(DIE_TRAP
, str
, regs
, error_code
, X86_TRAP_DF
, SIGSEGV
);
350 tsk
->thread
.error_code
= error_code
;
351 tsk
->thread
.trap_nr
= X86_TRAP_DF
;
353 #ifdef CONFIG_VMAP_STACK
355 * If we overflow the stack into a guard page, the CPU will fail
356 * to deliver #PF and will send #DF instead. Similarly, if we
357 * take any non-IST exception while too close to the bottom of
358 * the stack, the processor will get a page fault while
359 * delivering the exception and will generate a double fault.
361 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
362 * Page-Fault Exception (#PF):
364 * Processors update CR2 whenever a page fault is detected. If a
365 * second page fault occurs while an earlier page fault is being
366 * deliv- ered, the faulting linear address of the second fault will
367 * overwrite the contents of CR2 (replacing the previous
368 * address). These updates to CR2 occur even if the page fault
369 * results in a double fault or occurs during the delivery of a
372 * The logic below has a small possibility of incorrectly diagnosing
373 * some errors as stack overflows. For example, if the IDT or GDT
374 * gets corrupted such that #GP delivery fails due to a bad descriptor
375 * causing #GP and we hit this condition while CR2 coincidentally
376 * points to the stack guard page, we'll think we overflowed the
377 * stack. Given that we're going to panic one way or another
378 * if this happens, this isn't necessarily worth fixing.
380 * If necessary, we could improve the test by only diagnosing
381 * a stack overflow if the saved RSP points within 47 bytes of
382 * the bottom of the stack: if RSP == tsk_stack + 48 and we
383 * take an exception, the stack is already aligned and there
384 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
385 * possible error code, so a stack overflow would *not* double
386 * fault. With any less space left, exception delivery could
387 * fail, and, as a practical matter, we've overflowed the
388 * stack even if the actual trigger for the double fault was
392 if ((unsigned long)task_stack_page(tsk
) - 1 - cr2
< PAGE_SIZE
)
393 handle_stack_overflow("kernel stack overflow (double-fault)", regs
, cr2
);
396 #ifdef CONFIG_DOUBLEFAULT
397 df_debug(regs
, error_code
);
400 * This is always a kernel trap and never fixable (and thus must
404 die(str
, regs
, error_code
);
408 dotraplinkage
void do_bounds(struct pt_regs
*regs
, long error_code
)
410 const struct mpx_bndcsr
*bndcsr
;
413 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
414 if (notify_die(DIE_TRAP
, "bounds", regs
, error_code
,
415 X86_TRAP_BR
, SIGSEGV
) == NOTIFY_STOP
)
417 cond_local_irq_enable(regs
);
419 if (!user_mode(regs
))
420 die("bounds", regs
, error_code
);
422 if (!cpu_feature_enabled(X86_FEATURE_MPX
)) {
423 /* The exception is not from Intel MPX */
428 * We need to look at BNDSTATUS to resolve this exception.
429 * A NULL here might mean that it is in its 'init state',
430 * which is all zeros which indicates MPX was not
431 * responsible for the exception.
433 bndcsr
= get_xsave_field_ptr(XFEATURE_MASK_BNDCSR
);
437 trace_bounds_exception_mpx(bndcsr
);
439 * The error code field of the BNDSTATUS register communicates status
440 * information of a bound range exception #BR or operation involving
443 switch (bndcsr
->bndstatus
& MPX_BNDSTA_ERROR_CODE
) {
444 case 2: /* Bound directory has invalid entry. */
445 if (mpx_handle_bd_fault())
447 break; /* Success, it was handled */
448 case 1: /* Bound violation. */
449 info
= mpx_generate_siginfo(regs
);
452 * We failed to decode the MPX instruction. Act as if
453 * the exception was not caused by MPX.
458 * Success, we decoded the instruction and retrieved
459 * an 'info' containing the address being accessed
460 * which caused the exception. This information
461 * allows and application to possibly handle the
462 * #BR exception itself.
464 do_trap(X86_TRAP_BR
, SIGSEGV
, "bounds", regs
, error_code
, info
);
467 case 0: /* No exception caused by Intel MPX operations. */
470 die("bounds", regs
, error_code
);
477 * This path out is for all the cases where we could not
478 * handle the exception in some way (like allocating a
479 * table or telling userspace about it. We will also end
480 * up here if the kernel has MPX turned off at compile
483 do_trap(X86_TRAP_BR
, SIGSEGV
, "bounds", regs
, error_code
, NULL
);
487 do_general_protection(struct pt_regs
*regs
, long error_code
)
489 struct task_struct
*tsk
;
491 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
492 cond_local_irq_enable(regs
);
494 if (v8086_mode(regs
)) {
496 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
501 if (!user_mode(regs
)) {
502 if (fixup_exception(regs
, X86_TRAP_GP
))
505 tsk
->thread
.error_code
= error_code
;
506 tsk
->thread
.trap_nr
= X86_TRAP_GP
;
507 if (notify_die(DIE_GPF
, "general protection fault", regs
, error_code
,
508 X86_TRAP_GP
, SIGSEGV
) != NOTIFY_STOP
)
509 die("general protection fault", regs
, error_code
);
513 tsk
->thread
.error_code
= error_code
;
514 tsk
->thread
.trap_nr
= X86_TRAP_GP
;
516 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
517 printk_ratelimit()) {
518 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
519 tsk
->comm
, task_pid_nr(tsk
),
520 regs
->ip
, regs
->sp
, error_code
);
521 print_vma_addr(" in ", regs
->ip
);
525 force_sig_info(SIGSEGV
, SEND_SIG_PRIV
, tsk
);
527 NOKPROBE_SYMBOL(do_general_protection
);
529 /* May run on IST stack. */
530 dotraplinkage
void notrace
do_int3(struct pt_regs
*regs
, long error_code
)
532 #ifdef CONFIG_DYNAMIC_FTRACE
534 * ftrace must be first, everything else may cause a recursive crash.
535 * See note by declaration of modifying_ftrace_code in ftrace.c
537 if (unlikely(atomic_read(&modifying_ftrace_code
)) &&
538 ftrace_int3_handler(regs
))
541 if (poke_int3_handler(regs
))
545 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
546 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
547 if (kgdb_ll_trap(DIE_INT3
, "int3", regs
, error_code
, X86_TRAP_BP
,
548 SIGTRAP
) == NOTIFY_STOP
)
550 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
552 #ifdef CONFIG_KPROBES
553 if (kprobe_int3_handler(regs
))
557 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, X86_TRAP_BP
,
558 SIGTRAP
) == NOTIFY_STOP
)
562 * Let others (NMI) know that the debug stack is in use
563 * as we may switch to the interrupt stack.
565 debug_stack_usage_inc();
566 cond_local_irq_enable(regs
);
567 do_trap(X86_TRAP_BP
, SIGTRAP
, "int3", regs
, error_code
, NULL
);
568 cond_local_irq_disable(regs
);
569 debug_stack_usage_dec();
573 NOKPROBE_SYMBOL(do_int3
);
577 * Help handler running on IST stack to switch off the IST stack if the
578 * interrupted code was in user mode. The actual stack switch is done in
581 asmlinkage __visible notrace
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
583 struct pt_regs
*regs
= task_pt_regs(current
);
587 NOKPROBE_SYMBOL(sync_regs
);
589 struct bad_iret_stack
{
590 void *error_entry_ret
;
594 asmlinkage __visible notrace
595 struct bad_iret_stack
*fixup_bad_iret(struct bad_iret_stack
*s
)
598 * This is called from entry_64.S early in handling a fault
599 * caused by a bad iret to user mode. To handle the fault
600 * correctly, we want move our stack frame to task_pt_regs
601 * and we want to pretend that the exception came from the
604 struct bad_iret_stack
*new_stack
=
605 container_of(task_pt_regs(current
),
606 struct bad_iret_stack
, regs
);
608 /* Copy the IRET target to the new stack. */
609 memmove(&new_stack
->regs
.ip
, (void *)s
->regs
.sp
, 5*8);
611 /* Copy the remainder of the stack from the current stack. */
612 memmove(new_stack
, s
, offsetof(struct bad_iret_stack
, regs
.ip
));
614 BUG_ON(!user_mode(&new_stack
->regs
));
617 NOKPROBE_SYMBOL(fixup_bad_iret
);
620 static bool is_sysenter_singlestep(struct pt_regs
*regs
)
623 * We don't try for precision here. If we're anywhere in the region of
624 * code that can be single-stepped in the SYSENTER entry path, then
625 * assume that this is a useless single-step trap due to SYSENTER
626 * being invoked with TF set. (We don't know in advance exactly
627 * which instructions will be hit because BTF could plausibly
631 return (regs
->ip
- (unsigned long)__begin_SYSENTER_singlestep_region
) <
632 (unsigned long)__end_SYSENTER_singlestep_region
-
633 (unsigned long)__begin_SYSENTER_singlestep_region
;
634 #elif defined(CONFIG_IA32_EMULATION)
635 return (regs
->ip
- (unsigned long)entry_SYSENTER_compat
) <
636 (unsigned long)__end_entry_SYSENTER_compat
-
637 (unsigned long)entry_SYSENTER_compat
;
644 * Our handling of the processor debug registers is non-trivial.
645 * We do not clear them on entry and exit from the kernel. Therefore
646 * it is possible to get a watchpoint trap here from inside the kernel.
647 * However, the code in ./ptrace.c has ensured that the user can
648 * only set watchpoints on userspace addresses. Therefore the in-kernel
649 * watchpoint trap can only occur in code which is reading/writing
650 * from user space. Such code must not hold kernel locks (since it
651 * can equally take a page fault), therefore it is safe to call
652 * force_sig_info even though that claims and releases locks.
654 * Code in ./signal.c ensures that the debug control register
655 * is restored before we deliver any signal, and therefore that
656 * user code runs with the correct debug control register even though
659 * Being careful here means that we don't have to be as careful in a
660 * lot of more complicated places (task switching can be a bit lazy
661 * about restoring all the debug state, and ptrace doesn't have to
662 * find every occurrence of the TF bit that could be saved away even
665 * May run on IST stack.
667 dotraplinkage
void do_debug(struct pt_regs
*regs
, long error_code
)
669 struct task_struct
*tsk
= current
;
676 get_debugreg(dr6
, 6);
678 * The Intel SDM says:
680 * Certain debug exceptions may clear bits 0-3. The remaining
681 * contents of the DR6 register are never cleared by the
682 * processor. To avoid confusion in identifying debug
683 * exceptions, debug handlers should clear the register before
684 * returning to the interrupted task.
686 * Keep it simple: clear DR6 immediately.
690 /* Filter out all the reserved bits which are preset to 1 */
691 dr6
&= ~DR6_RESERVED
;
694 * The SDM says "The processor clears the BTF flag when it
695 * generates a debug exception." Clear TIF_BLOCKSTEP to keep
696 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
698 clear_tsk_thread_flag(tsk
, TIF_BLOCKSTEP
);
700 if (unlikely(!user_mode(regs
) && (dr6
& DR_STEP
) &&
701 is_sysenter_singlestep(regs
))) {
706 * else we might have gotten a single-step trap and hit a
707 * watchpoint at the same time, in which case we should fall
708 * through and handle the watchpoint.
713 * If dr6 has no reason to give us about the origin of this trap,
714 * then it's very likely the result of an icebp/int01 trap.
715 * User wants a sigtrap for that.
717 if (!dr6
&& user_mode(regs
))
720 /* Catch kmemcheck conditions! */
721 if ((dr6
& DR_STEP
) && kmemcheck_trap(regs
))
724 /* Store the virtualized DR6 value */
725 tsk
->thread
.debugreg6
= dr6
;
727 #ifdef CONFIG_KPROBES
728 if (kprobe_debug_handler(regs
))
732 if (notify_die(DIE_DEBUG
, "debug", regs
, (long)&dr6
, error_code
,
733 SIGTRAP
) == NOTIFY_STOP
)
737 * Let others (NMI) know that the debug stack is in use
738 * as we may switch to the interrupt stack.
740 debug_stack_usage_inc();
742 /* It's safe to allow irq's after DR6 has been saved */
743 cond_local_irq_enable(regs
);
745 if (v8086_mode(regs
)) {
746 handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
,
748 cond_local_irq_disable(regs
);
749 debug_stack_usage_dec();
753 if (WARN_ON_ONCE((dr6
& DR_STEP
) && !user_mode(regs
))) {
755 * Historical junk that used to handle SYSENTER single-stepping.
756 * This should be unreachable now. If we survive for a while
757 * without anyone hitting this warning, we'll turn this into
760 tsk
->thread
.debugreg6
&= ~DR_STEP
;
761 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
762 regs
->flags
&= ~X86_EFLAGS_TF
;
764 si_code
= get_si_code(tsk
->thread
.debugreg6
);
765 if (tsk
->thread
.debugreg6
& (DR_STEP
| DR_TRAP_BITS
) || user_icebp
)
766 send_sigtrap(tsk
, regs
, error_code
, si_code
);
767 cond_local_irq_disable(regs
);
768 debug_stack_usage_dec();
771 #if defined(CONFIG_X86_32)
773 * This is the most likely code path that involves non-trivial use
774 * of the SYSENTER stack. Check that we haven't overrun it.
776 WARN(this_cpu_read(cpu_tss
.SYSENTER_stack_canary
) != STACK_END_MAGIC
,
777 "Overran or corrupted SYSENTER stack\n");
781 NOKPROBE_SYMBOL(do_debug
);
784 * Note that we play around with the 'TS' bit in an attempt to get
785 * the correct behaviour even in the presence of the asynchronous
788 static void math_error(struct pt_regs
*regs
, int error_code
, int trapnr
)
790 struct task_struct
*task
= current
;
791 struct fpu
*fpu
= &task
->thread
.fpu
;
793 char *str
= (trapnr
== X86_TRAP_MF
) ? "fpu exception" :
796 if (notify_die(DIE_TRAP
, str
, regs
, error_code
, trapnr
, SIGFPE
) == NOTIFY_STOP
)
798 cond_local_irq_enable(regs
);
800 if (!user_mode(regs
)) {
801 if (!fixup_exception(regs
, trapnr
)) {
802 task
->thread
.error_code
= error_code
;
803 task
->thread
.trap_nr
= trapnr
;
804 die(str
, regs
, error_code
);
810 * Save the info for the exception handler and clear the error.
814 task
->thread
.trap_nr
= trapnr
;
815 task
->thread
.error_code
= error_code
;
816 info
.si_signo
= SIGFPE
;
818 info
.si_addr
= (void __user
*)uprobe_get_trap_addr(regs
);
820 info
.si_code
= fpu__exception_code(fpu
, trapnr
);
822 /* Retry when we get spurious exceptions: */
826 force_sig_info(SIGFPE
, &info
, task
);
829 dotraplinkage
void do_coprocessor_error(struct pt_regs
*regs
, long error_code
)
831 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
832 math_error(regs
, error_code
, X86_TRAP_MF
);
836 do_simd_coprocessor_error(struct pt_regs
*regs
, long error_code
)
838 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
839 math_error(regs
, error_code
, X86_TRAP_XF
);
843 do_spurious_interrupt_bug(struct pt_regs
*regs
, long error_code
)
845 cond_local_irq_enable(regs
);
849 do_device_not_available(struct pt_regs
*regs
, long error_code
)
853 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
855 #ifdef CONFIG_MATH_EMULATION
856 if (!boot_cpu_has(X86_FEATURE_FPU
) && (read_cr0() & X86_CR0_EM
)) {
857 struct math_emu_info info
= { };
859 cond_local_irq_enable(regs
);
867 /* This should not happen. */
869 if (WARN(cr0
& X86_CR0_TS
, "CR0.TS was set")) {
870 /* Try to fix it up and carry on. */
871 write_cr0(cr0
& ~X86_CR0_TS
);
874 * Something terrible happened, and we're better off trying
875 * to kill the task than getting stuck in a never-ending
876 * loop of #NM faults.
878 die("unexpected #NM exception", regs
, error_code
);
881 NOKPROBE_SYMBOL(do_device_not_available
);
884 dotraplinkage
void do_iret_error(struct pt_regs
*regs
, long error_code
)
888 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
891 info
.si_signo
= SIGILL
;
893 info
.si_code
= ILL_BADSTK
;
895 if (notify_die(DIE_TRAP
, "iret exception", regs
, error_code
,
896 X86_TRAP_IRET
, SIGILL
) != NOTIFY_STOP
) {
897 do_trap(X86_TRAP_IRET
, SIGILL
, "iret exception", regs
, error_code
,
903 /* Set of traps needed for early debugging. */
904 void __init
early_trap_init(void)
907 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
908 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
909 * CPU runs at ring 0 so it is impossible to hit an invalid
910 * stack. Using the original stack works well enough at this
911 * early stage. DEBUG_STACK will be equipped after cpu_init() in
914 * We don't need to set trace_idt_table like set_intr_gate(),
915 * since we don't have trace_debug and it will be reset to
916 * 'debug' in trap_init() by set_intr_gate_ist().
918 set_intr_gate_notrace(X86_TRAP_DB
, debug
);
919 /* int3 can be called from all */
920 set_system_intr_gate(X86_TRAP_BP
, &int3
);
922 set_intr_gate(X86_TRAP_PF
, page_fault
);
924 load_idt(&idt_descr
);
927 void __init
early_trap_pf_init(void)
930 set_intr_gate(X86_TRAP_PF
, page_fault
);
934 void __init
trap_init(void)
939 void __iomem
*p
= early_ioremap(0x0FFFD9, 4);
941 if (readl(p
) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
946 set_intr_gate(X86_TRAP_DE
, divide_error
);
947 set_intr_gate_ist(X86_TRAP_NMI
, &nmi
, NMI_STACK
);
948 /* int4 can be called from all */
949 set_system_intr_gate(X86_TRAP_OF
, &overflow
);
950 set_intr_gate(X86_TRAP_BR
, bounds
);
951 set_intr_gate(X86_TRAP_UD
, invalid_op
);
952 set_intr_gate(X86_TRAP_NM
, device_not_available
);
954 set_task_gate(X86_TRAP_DF
, GDT_ENTRY_DOUBLEFAULT_TSS
);
956 set_intr_gate_ist(X86_TRAP_DF
, &double_fault
, DOUBLEFAULT_STACK
);
958 set_intr_gate(X86_TRAP_OLD_MF
, coprocessor_segment_overrun
);
959 set_intr_gate(X86_TRAP_TS
, invalid_TSS
);
960 set_intr_gate(X86_TRAP_NP
, segment_not_present
);
961 set_intr_gate(X86_TRAP_SS
, stack_segment
);
962 set_intr_gate(X86_TRAP_GP
, general_protection
);
963 set_intr_gate(X86_TRAP_SPURIOUS
, spurious_interrupt_bug
);
964 set_intr_gate(X86_TRAP_MF
, coprocessor_error
);
965 set_intr_gate(X86_TRAP_AC
, alignment_check
);
966 #ifdef CONFIG_X86_MCE
967 set_intr_gate_ist(X86_TRAP_MC
, &machine_check
, MCE_STACK
);
969 set_intr_gate(X86_TRAP_XF
, simd_coprocessor_error
);
971 /* Reserve all the builtin and the syscall vector: */
972 for (i
= 0; i
< FIRST_EXTERNAL_VECTOR
; i
++)
973 set_bit(i
, used_vectors
);
975 #ifdef CONFIG_IA32_EMULATION
976 set_system_intr_gate(IA32_SYSCALL_VECTOR
, entry_INT80_compat
);
977 set_bit(IA32_SYSCALL_VECTOR
, used_vectors
);
981 set_system_intr_gate(IA32_SYSCALL_VECTOR
, entry_INT80_32
);
982 set_bit(IA32_SYSCALL_VECTOR
, used_vectors
);
986 * Set the IDT descriptor to a fixed read-only location, so that the
987 * "sidt" instruction will not leak the location of the kernel, and
988 * to defend the IDT against arbitrary memory write vulnerabilities.
989 * It will be reloaded in cpu_init() */
990 __set_fixmap(FIX_RO_IDT
, __pa_symbol(idt_table
), PAGE_KERNEL_RO
);
991 idt_descr
.address
= fix_to_virt(FIX_RO_IDT
);
994 * Should be a barrier for any external CPU state:
999 * X86_TRAP_DB and X86_TRAP_BP have been set
1000 * in early_trap_init(). However, ITS works only after
1001 * cpu_init() loads TSS. See comments in early_trap_init().
1003 set_intr_gate_ist(X86_TRAP_DB
, &debug
, DEBUG_STACK
);
1004 /* int3 can be called from all */
1005 set_system_intr_gate_ist(X86_TRAP_BP
, &int3
, DEBUG_STACK
);
1007 x86_init
.irqs
.trap_init();
1009 #ifdef CONFIG_X86_64
1010 memcpy(&debug_idt_table
, &idt_table
, IDT_ENTRIES
* 16);
1011 set_nmi_gate(X86_TRAP_DB
, &debug
);
1012 set_nmi_gate(X86_TRAP_BP
, &int3
);