]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/traps.c
Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / traps.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1991, 1992 Linus Torvalds
a8c1be9d 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
1da177e4
LT
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9/*
c1d518c8 10 * Handle hardware traps and faults.
1da177e4 11 */
c767a54b
JP
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
56dd9470 15#include <linux/context_tracking.h>
b5964405
IM
16#include <linux/interrupt.h>
17#include <linux/kallsyms.h>
18#include <linux/spinlock.h>
b5964405
IM
19#include <linux/kprobes.h>
20#include <linux/uaccess.h>
b5964405 21#include <linux/kdebug.h>
f503b5ae 22#include <linux/kgdb.h>
1da177e4 23#include <linux/kernel.h>
b5964405
IM
24#include <linux/module.h>
25#include <linux/ptrace.h>
b02ef20a 26#include <linux/uprobes.h>
1da177e4 27#include <linux/string.h>
b5964405 28#include <linux/delay.h>
1da177e4 29#include <linux/errno.h>
b5964405
IM
30#include <linux/kexec.h>
31#include <linux/sched.h>
1da177e4 32#include <linux/timer.h>
1da177e4 33#include <linux/init.h>
91768d6c 34#include <linux/bug.h>
b5964405
IM
35#include <linux/nmi.h>
36#include <linux/mm.h>
c1d518c8
AH
37#include <linux/smp.h>
38#include <linux/io.h>
1da177e4
LT
39
40#ifdef CONFIG_EISA
41#include <linux/ioport.h>
42#include <linux/eisa.h>
43#endif
44
c0d12172
DJ
45#if defined(CONFIG_EDAC)
46#include <linux/edac.h>
47#endif
48
f8561296 49#include <asm/kmemcheck.h>
b5964405 50#include <asm/stacktrace.h>
1da177e4 51#include <asm/processor.h>
1da177e4 52#include <asm/debugreg.h>
60063497 53#include <linux/atomic.h>
08d636b6 54#include <asm/ftrace.h>
c1d518c8 55#include <asm/traps.h>
1da177e4 56#include <asm/desc.h>
78f7f1e5 57#include <asm/fpu/internal.h>
9e55e44e 58#include <asm/mce.h>
4eefbe79 59#include <asm/fixmap.h>
1164dd00 60#include <asm/mach_traps.h>
17f41571 61#include <asm/alternative.h>
a84eeaa9 62#include <asm/fpu/xstate.h>
e7126cf5 63#include <asm/trace/mpx.h>
fe3d197f 64#include <asm/mpx.h>
c1d518c8 65
081f75bb 66#ifdef CONFIG_X86_64
428cf902 67#include <asm/x86_init.h>
081f75bb
AH
68#include <asm/pgalloc.h>
69#include <asm/proto.h>
4df05f36
KC
70
71/* No need to be aligned, but done to keep all IDTs defined the same way. */
72gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
081f75bb 73#else
c1d518c8 74#include <asm/processor-flags.h>
8e6dafd6 75#include <asm/setup.h>
b2502b41 76#include <asm/proto.h>
081f75bb 77#endif
1da177e4 78
4df05f36
KC
79/* Must be page-aligned because the real IDT is used in a fixmap. */
80gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
81
b77b881f
YL
82DECLARE_BITMAP(used_vectors, NR_VECTORS);
83EXPORT_SYMBOL_GPL(used_vectors);
84
762db434
AH
85static inline void conditional_sti(struct pt_regs *regs)
86{
87 if (regs->flags & X86_EFLAGS_IF)
88 local_irq_enable();
89}
90
3d2a71a5
AH
91static inline void preempt_conditional_sti(struct pt_regs *regs)
92{
bdb43806 93 preempt_count_inc();
3d2a71a5
AH
94 if (regs->flags & X86_EFLAGS_IF)
95 local_irq_enable();
96}
97
be716615
TG
98static inline void conditional_cli(struct pt_regs *regs)
99{
100 if (regs->flags & X86_EFLAGS_IF)
101 local_irq_disable();
102}
103
3d2a71a5
AH
104static inline void preempt_conditional_cli(struct pt_regs *regs)
105{
106 if (regs->flags & X86_EFLAGS_IF)
107 local_irq_disable();
bdb43806 108 preempt_count_dec();
3d2a71a5
AH
109}
110
95927475
AL
111enum ctx_state ist_enter(struct pt_regs *regs)
112{
b926e6f6 113 enum ctx_state prev_state;
95927475 114
f39b6f0e 115 if (user_mode(regs)) {
95927475 116 /* Other than that, we're just an exception. */
b926e6f6 117 prev_state = exception_enter();
95927475
AL
118 } else {
119 /*
120 * We might have interrupted pretty much anything. In
121 * fact, if we're a machine check, we can even interrupt
122 * NMI processing. We don't want in_nmi() to return true,
123 * but we need to notify RCU.
124 */
125 rcu_nmi_enter();
c467ea76 126 prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */
95927475 127 }
b926e6f6
AL
128
129 /*
130 * We are atomic because we're on the IST stack (or we're on x86_32,
131 * in which case we still shouldn't schedule).
132 *
133 * This must be after exception_enter(), because exception_enter()
134 * won't do anything if in_interrupt() returns true.
135 */
136 preempt_count_add(HARDIRQ_OFFSET);
137
138 /* This code is a bit fragile. Test it. */
f78f5b90 139 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
b926e6f6
AL
140
141 return prev_state;
95927475
AL
142}
143
144void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
145{
b926e6f6 146 /* Must be before exception_exit. */
95927475
AL
147 preempt_count_sub(HARDIRQ_OFFSET);
148
f39b6f0e 149 if (user_mode(regs))
95927475
AL
150 return exception_exit(prev_state);
151 else
152 rcu_nmi_exit();
153}
154
bced35b6
AL
155/**
156 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
157 * @regs: regs passed to the IST exception handler
158 *
159 * IST exception handlers normally cannot schedule. As a special
160 * exception, if the exception interrupted userspace code (i.e.
f39b6f0e 161 * user_mode(regs) would return true) and the exception was not
bced35b6
AL
162 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
163 * begins a non-atomic section within an ist_enter()/ist_exit() region.
164 * Callers are responsible for enabling interrupts themselves inside
165 * the non-atomic section, and callers must call is_end_non_atomic()
166 * before ist_exit().
167 */
168void ist_begin_non_atomic(struct pt_regs *regs)
169{
f39b6f0e 170 BUG_ON(!user_mode(regs));
bced35b6
AL
171
172 /*
173 * Sanity check: we need to be on the normal thread stack. This
174 * will catch asm bugs and any attempt to use ist_preempt_enable
175 * from double_fault.
176 */
a7fcf28d
AL
177 BUG_ON((unsigned long)(current_top_of_stack() -
178 current_stack_pointer()) >= THREAD_SIZE);
bced35b6
AL
179
180 preempt_count_sub(HARDIRQ_OFFSET);
181}
182
183/**
184 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
185 *
186 * Ends a non-atomic section started with ist_begin_non_atomic().
187 */
188void ist_end_non_atomic(void)
189{
190 preempt_count_add(HARDIRQ_OFFSET);
191}
192
9326638c 193static nokprobe_inline int
c416ddf5
FW
194do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
195 struct pt_regs *regs, long error_code)
1da177e4 196{
d74ef111 197 if (v8086_mode(regs)) {
3c1326f8 198 /*
c416ddf5 199 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
3c1326f8
AH
200 * On nmi (interrupt 2), do_trap should not be called.
201 */
c416ddf5
FW
202 if (trapnr < X86_TRAP_UD) {
203 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
204 error_code, trapnr))
205 return 0;
206 }
207 return -1;
1da177e4 208 }
d74ef111 209
55474c48 210 if (!user_mode(regs)) {
c416ddf5
FW
211 if (!fixup_exception(regs)) {
212 tsk->thread.error_code = error_code;
213 tsk->thread.trap_nr = trapnr;
214 die(str, regs, error_code);
215 }
216 return 0;
217 }
1da177e4 218
c416ddf5
FW
219 return -1;
220}
1da177e4 221
1c326c4d
ON
222static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
223 siginfo_t *info)
958d3d72
ON
224{
225 unsigned long siaddr;
226 int sicode;
227
228 switch (trapnr) {
1c326c4d
ON
229 default:
230 return SEND_SIG_PRIV;
231
958d3d72
ON
232 case X86_TRAP_DE:
233 sicode = FPE_INTDIV;
b02ef20a 234 siaddr = uprobe_get_trap_addr(regs);
958d3d72
ON
235 break;
236 case X86_TRAP_UD:
237 sicode = ILL_ILLOPN;
b02ef20a 238 siaddr = uprobe_get_trap_addr(regs);
958d3d72
ON
239 break;
240 case X86_TRAP_AC:
241 sicode = BUS_ADRALN;
242 siaddr = 0;
243 break;
244 }
245
246 info->si_signo = signr;
247 info->si_errno = 0;
248 info->si_code = sicode;
249 info->si_addr = (void __user *)siaddr;
1c326c4d 250 return info;
958d3d72
ON
251}
252
9326638c 253static void
c416ddf5
FW
254do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
255 long error_code, siginfo_t *info)
256{
257 struct task_struct *tsk = current;
258
259
260 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
261 return;
b5964405 262 /*
51e7dc70 263 * We want error_code and trap_nr set for userspace faults and
b5964405
IM
264 * kernelspace faults which result in die(), but not
265 * kernelspace faults which are fixed up. die() gives the
266 * process no chance to handle the signal and notice the
267 * kernel fault information, so that won't result in polluting
268 * the information about previously queued, but not yet
269 * delivered, faults. See also do_general_protection below.
270 */
271 tsk->thread.error_code = error_code;
51e7dc70 272 tsk->thread.trap_nr = trapnr;
d1895183 273
081f75bb
AH
274#ifdef CONFIG_X86_64
275 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
276 printk_ratelimit()) {
c767a54b
JP
277 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
278 tsk->comm, tsk->pid, str,
279 regs->ip, regs->sp, error_code);
081f75bb 280 print_vma_addr(" in ", regs->ip);
c767a54b 281 pr_cont("\n");
081f75bb
AH
282 }
283#endif
284
38cad57b 285 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
1da177e4 286}
9326638c 287NOKPROBE_SYMBOL(do_trap);
1da177e4 288
dff0796e 289static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
1c326c4d 290 unsigned long trapnr, int signr)
dff0796e
ON
291{
292 enum ctx_state prev_state = exception_enter();
1c326c4d 293 siginfo_t info;
dff0796e
ON
294
295 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
296 NOTIFY_STOP) {
297 conditional_sti(regs);
1c326c4d
ON
298 do_trap(trapnr, signr, str, regs, error_code,
299 fill_trap_info(regs, signr, trapnr, &info));
dff0796e
ON
300 }
301
302 exception_exit(prev_state);
303}
304
b5964405 305#define DO_ERROR(trapnr, signr, str, name) \
e407d620 306dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
b5964405 307{ \
1c326c4d 308 do_error_trap(regs, error_code, str, trapnr, signr); \
1da177e4
LT
309}
310
0eb14833
ON
311DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error)
312DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
0eb14833
ON
313DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
314DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
315DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
316DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
0eb14833 317DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
0eb14833 318DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
1da177e4 319
081f75bb
AH
320#ifdef CONFIG_X86_64
321/* Runs on IST stack */
081f75bb
AH
322dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
323{
324 static const char str[] = "double fault";
325 struct task_struct *tsk = current;
326
af726f21
AL
327#ifdef CONFIG_X86_ESPFIX64
328 extern unsigned char native_irq_return_iret[];
329
330 /*
331 * If IRET takes a non-IST fault on the espfix64 stack, then we
332 * end up promoting it to a doublefault. In that case, modify
333 * the stack to make it look like we just entered the #GP
334 * handler from user space, similar to bad_iret.
95927475
AL
335 *
336 * No need for ist_enter here because we don't use RCU.
af726f21
AL
337 */
338 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
339 regs->cs == __KERNEL_CS &&
340 regs->ip == (unsigned long)native_irq_return_iret)
341 {
342 struct pt_regs *normal_regs = task_pt_regs(current);
343
344 /* Fake a #GP(0) from userspace. */
345 memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
346 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
347 regs->ip = (unsigned long)general_protection;
348 regs->sp = (unsigned long)&normal_regs->orig_ax;
95927475 349
af726f21
AL
350 return;
351 }
352#endif
353
95927475 354 ist_enter(regs); /* Discard prev_state because we won't return. */
c9408265 355 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
081f75bb
AH
356
357 tsk->thread.error_code = error_code;
51e7dc70 358 tsk->thread.trap_nr = X86_TRAP_DF;
081f75bb 359
4d067d8e
BP
360#ifdef CONFIG_DOUBLEFAULT
361 df_debug(regs, error_code);
362#endif
bd8b96df
IM
363 /*
364 * This is always a kernel trap and never fixable (and thus must
365 * never return).
366 */
081f75bb
AH
367 for (;;)
368 die(str, regs, error_code);
369}
370#endif
371
fe3d197f
DH
372dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
373{
fe3d197f 374 enum ctx_state prev_state;
a84eeaa9 375 const struct bndcsr *bndcsr;
fe3d197f
DH
376 siginfo_t *info;
377
378 prev_state = exception_enter();
379 if (notify_die(DIE_TRAP, "bounds", regs, error_code,
380 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
381 goto exit;
382 conditional_sti(regs);
383
f39b6f0e 384 if (!user_mode(regs))
fe3d197f
DH
385 die("bounds", regs, error_code);
386
387 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
388 /* The exception is not from Intel MPX */
389 goto exit_trap;
390 }
391
392 /*
393 * We need to look at BNDSTATUS to resolve this exception.
a84eeaa9
DH
394 * A NULL here might mean that it is in its 'init state',
395 * which is all zeros which indicates MPX was not
396 * responsible for the exception.
fe3d197f 397 */
a84eeaa9 398 bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
fe3d197f
DH
399 if (!bndcsr)
400 goto exit_trap;
401
e7126cf5 402 trace_bounds_exception_mpx(bndcsr);
fe3d197f
DH
403 /*
404 * The error code field of the BNDSTATUS register communicates status
405 * information of a bound range exception #BR or operation involving
406 * bound directory.
407 */
408 switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
409 case 2: /* Bound directory has invalid entry. */
46a6e0cf 410 if (mpx_handle_bd_fault())
fe3d197f
DH
411 goto exit_trap;
412 break; /* Success, it was handled */
413 case 1: /* Bound violation. */
46a6e0cf 414 info = mpx_generate_siginfo(regs);
e10abb2f 415 if (IS_ERR(info)) {
fe3d197f
DH
416 /*
417 * We failed to decode the MPX instruction. Act as if
418 * the exception was not caused by MPX.
419 */
420 goto exit_trap;
421 }
422 /*
423 * Success, we decoded the instruction and retrieved
424 * an 'info' containing the address being accessed
425 * which caused the exception. This information
426 * allows and application to possibly handle the
427 * #BR exception itself.
428 */
429 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
430 kfree(info);
431 break;
432 case 0: /* No exception caused by Intel MPX operations. */
433 goto exit_trap;
434 default:
435 die("bounds", regs, error_code);
436 }
437
438exit:
439 exception_exit(prev_state);
440 return;
441exit_trap:
442 /*
443 * This path out is for all the cases where we could not
444 * handle the exception in some way (like allocating a
445 * table or telling userspace about it. We will also end
446 * up here if the kernel has MPX turned off at compile
447 * time..
448 */
449 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
450 exception_exit(prev_state);
451}
452
9326638c 453dotraplinkage void
13485ab5 454do_general_protection(struct pt_regs *regs, long error_code)
1da177e4 455{
13485ab5 456 struct task_struct *tsk;
6c1e0256 457 enum ctx_state prev_state;
b5964405 458
6c1e0256 459 prev_state = exception_enter();
c6df0d71
AH
460 conditional_sti(regs);
461
d74ef111 462 if (v8086_mode(regs)) {
ef3f6288
FW
463 local_irq_enable();
464 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
6ba3c97a 465 goto exit;
ef3f6288 466 }
1da177e4 467
13485ab5 468 tsk = current;
55474c48 469 if (!user_mode(regs)) {
ef3f6288 470 if (fixup_exception(regs))
6ba3c97a 471 goto exit;
ef3f6288
FW
472
473 tsk->thread.error_code = error_code;
474 tsk->thread.trap_nr = X86_TRAP_GP;
6ba3c97a
FW
475 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
476 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
ef3f6288 477 die("general protection fault", regs, error_code);
6ba3c97a 478 goto exit;
ef3f6288 479 }
1da177e4 480
13485ab5 481 tsk->thread.error_code = error_code;
51e7dc70 482 tsk->thread.trap_nr = X86_TRAP_GP;
b5964405 483
13485ab5
AH
484 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
485 printk_ratelimit()) {
c767a54b 486 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
13485ab5
AH
487 tsk->comm, task_pid_nr(tsk),
488 regs->ip, regs->sp, error_code);
03252919 489 print_vma_addr(" in ", regs->ip);
c767a54b 490 pr_cont("\n");
03252919 491 }
abd4f750 492
38cad57b 493 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
6ba3c97a 494exit:
6c1e0256 495 exception_exit(prev_state);
1da177e4 496}
9326638c 497NOKPROBE_SYMBOL(do_general_protection);
1da177e4 498
c1d518c8 499/* May run on IST stack. */
9326638c 500dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
1da177e4 501{
6c1e0256
FW
502 enum ctx_state prev_state;
503
08d636b6 504#ifdef CONFIG_DYNAMIC_FTRACE
a192cd04
SR
505 /*
506 * ftrace must be first, everything else may cause a recursive crash.
507 * See note by declaration of modifying_ftrace_code in ftrace.c
508 */
509 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
510 ftrace_int3_handler(regs))
08d636b6
SR
511 return;
512#endif
17f41571
JK
513 if (poke_int3_handler(regs))
514 return;
515
95927475 516 prev_state = ist_enter(regs);
f503b5ae 517#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
c9408265
KC
518 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
519 SIGTRAP) == NOTIFY_STOP)
6ba3c97a 520 goto exit;
f503b5ae 521#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
cc3a1bf5 522
6f6343f5
MH
523#ifdef CONFIG_KPROBES
524 if (kprobe_int3_handler(regs))
4cdf77a8 525 goto exit;
6f6343f5
MH
526#endif
527
c9408265
KC
528 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
529 SIGTRAP) == NOTIFY_STOP)
6ba3c97a 530 goto exit;
b5964405 531
42181186
SR
532 /*
533 * Let others (NMI) know that the debug stack is in use
534 * as we may switch to the interrupt stack.
535 */
536 debug_stack_usage_inc();
4915a35e 537 preempt_conditional_sti(regs);
c9408265 538 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
4915a35e 539 preempt_conditional_cli(regs);
42181186 540 debug_stack_usage_dec();
6ba3c97a 541exit:
95927475 542 ist_exit(regs, prev_state);
1da177e4 543}
9326638c 544NOKPROBE_SYMBOL(do_int3);
1da177e4 545
081f75bb 546#ifdef CONFIG_X86_64
bd8b96df 547/*
48e08d0f
AL
548 * Help handler running on IST stack to switch off the IST stack if the
549 * interrupted code was in user mode. The actual stack switch is done in
550 * entry_64.S
bd8b96df 551 */
7ddc6a21 552asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
081f75bb 553{
48e08d0f
AL
554 struct pt_regs *regs = task_pt_regs(current);
555 *regs = *eregs;
081f75bb
AH
556 return regs;
557}
9326638c 558NOKPROBE_SYMBOL(sync_regs);
b645af2d
AL
559
560struct bad_iret_stack {
561 void *error_entry_ret;
562 struct pt_regs regs;
563};
564
7ddc6a21 565asmlinkage __visible notrace
b645af2d
AL
566struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
567{
568 /*
569 * This is called from entry_64.S early in handling a fault
570 * caused by a bad iret to user mode. To handle the fault
571 * correctly, we want move our stack frame to task_pt_regs
572 * and we want to pretend that the exception came from the
573 * iret target.
574 */
575 struct bad_iret_stack *new_stack =
576 container_of(task_pt_regs(current),
577 struct bad_iret_stack, regs);
578
579 /* Copy the IRET target to the new stack. */
580 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
581
582 /* Copy the remainder of the stack from the current stack. */
583 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
584
f39b6f0e 585 BUG_ON(!user_mode(&new_stack->regs));
b645af2d
AL
586 return new_stack;
587}
7ddc6a21 588NOKPROBE_SYMBOL(fixup_bad_iret);
081f75bb
AH
589#endif
590
1da177e4
LT
591/*
592 * Our handling of the processor debug registers is non-trivial.
593 * We do not clear them on entry and exit from the kernel. Therefore
594 * it is possible to get a watchpoint trap here from inside the kernel.
595 * However, the code in ./ptrace.c has ensured that the user can
596 * only set watchpoints on userspace addresses. Therefore the in-kernel
597 * watchpoint trap can only occur in code which is reading/writing
598 * from user space. Such code must not hold kernel locks (since it
599 * can equally take a page fault), therefore it is safe to call
600 * force_sig_info even though that claims and releases locks.
b5964405 601 *
1da177e4
LT
602 * Code in ./signal.c ensures that the debug control register
603 * is restored before we deliver any signal, and therefore that
604 * user code runs with the correct debug control register even though
605 * we clear it here.
606 *
607 * Being careful here means that we don't have to be as careful in a
608 * lot of more complicated places (task switching can be a bit lazy
609 * about restoring all the debug state, and ptrace doesn't have to
610 * find every occurrence of the TF bit that could be saved away even
611 * by user code)
c1d518c8
AH
612 *
613 * May run on IST stack.
1da177e4 614 */
9326638c 615dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
1da177e4 616{
1da177e4 617 struct task_struct *tsk = current;
6c1e0256 618 enum ctx_state prev_state;
a1e80faf 619 int user_icebp = 0;
08d68323 620 unsigned long dr6;
da654b74 621 int si_code;
1da177e4 622
95927475 623 prev_state = ist_enter(regs);
4cdf77a8 624
08d68323 625 get_debugreg(dr6, 6);
1da177e4 626
40f9249a
P
627 /* Filter out all the reserved bits which are preset to 1 */
628 dr6 &= ~DR6_RESERVED;
629
a1e80faf
FW
630 /*
631 * If dr6 has no reason to give us about the origin of this trap,
632 * then it's very likely the result of an icebp/int01 trap.
633 * User wants a sigtrap for that.
634 */
f39b6f0e 635 if (!dr6 && user_mode(regs))
a1e80faf
FW
636 user_icebp = 1;
637
f8561296 638 /* Catch kmemcheck conditions first of all! */
eadb8a09 639 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
6ba3c97a 640 goto exit;
f8561296 641
08d68323
P
642 /* DR6 may or may not be cleared by the CPU */
643 set_debugreg(0, 6);
10faa81e 644
ea8e61b7
PZ
645 /*
646 * The processor cleared BTF, so don't mark that we need it set.
647 */
648 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
649
08d68323
P
650 /* Store the virtualized DR6 value */
651 tsk->thread.debugreg6 = dr6;
652
6f6343f5
MH
653#ifdef CONFIG_KPROBES
654 if (kprobe_debug_handler(regs))
655 goto exit;
656#endif
657
5a802e15 658 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
62edab90 659 SIGTRAP) == NOTIFY_STOP)
6ba3c97a 660 goto exit;
3d2a71a5 661
42181186
SR
662 /*
663 * Let others (NMI) know that the debug stack is in use
664 * as we may switch to the interrupt stack.
665 */
666 debug_stack_usage_inc();
667
1da177e4 668 /* It's safe to allow irq's after DR6 has been saved */
3d2a71a5 669 preempt_conditional_sti(regs);
1da177e4 670
d74ef111 671 if (v8086_mode(regs)) {
c9408265
KC
672 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
673 X86_TRAP_DB);
6554287b 674 preempt_conditional_cli(regs);
42181186 675 debug_stack_usage_dec();
6ba3c97a 676 goto exit;
1da177e4
LT
677 }
678
1da177e4 679 /*
08d68323
P
680 * Single-stepping through system calls: ignore any exceptions in
681 * kernel space, but re-enable TF when returning to user mode.
682 *
683 * We already checked v86 mode above, so we can check for kernel mode
684 * by just checking the CPL of CS.
1da177e4 685 */
55474c48 686 if ((dr6 & DR_STEP) && !user_mode(regs)) {
08d68323
P
687 tsk->thread.debugreg6 &= ~DR_STEP;
688 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
689 regs->flags &= ~X86_EFLAGS_TF;
1da177e4 690 }
08d68323 691 si_code = get_si_code(tsk->thread.debugreg6);
a1e80faf 692 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
08d68323 693 send_sigtrap(tsk, regs, error_code, si_code);
3d2a71a5 694 preempt_conditional_cli(regs);
42181186 695 debug_stack_usage_dec();
1da177e4 696
6ba3c97a 697exit:
95927475 698 ist_exit(regs, prev_state);
1da177e4 699}
9326638c 700NOKPROBE_SYMBOL(do_debug);
1da177e4
LT
701
702/*
703 * Note that we play around with the 'TS' bit in an attempt to get
704 * the correct behaviour even in the presence of the asynchronous
705 * IRQ13 behaviour
706 */
5e1b05be 707static void math_error(struct pt_regs *regs, int error_code, int trapnr)
1da177e4 708{
e2e75c91 709 struct task_struct *task = current;
e1cebad4 710 struct fpu *fpu = &task->thread.fpu;
1da177e4 711 siginfo_t info;
c9408265
KC
712 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
713 "simd exception";
e2e75c91
BG
714
715 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
716 return;
717 conditional_sti(regs);
718
e1cebad4 719 if (!user_mode(regs)) {
e2e75c91
BG
720 if (!fixup_exception(regs)) {
721 task->thread.error_code = error_code;
51e7dc70 722 task->thread.trap_nr = trapnr;
e2e75c91
BG
723 die(str, regs, error_code);
724 }
725 return;
726 }
1da177e4
LT
727
728 /*
729 * Save the info for the exception handler and clear the error.
730 */
e1cebad4
IM
731 fpu__save(fpu);
732
733 task->thread.trap_nr = trapnr;
9b6dba9e 734 task->thread.error_code = error_code;
e1cebad4
IM
735 info.si_signo = SIGFPE;
736 info.si_errno = 0;
737 info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
adf77bac 738
e1cebad4 739 info.si_code = fpu__exception_code(fpu, trapnr);
adf77bac 740
e1cebad4
IM
741 /* Retry when we get spurious exceptions: */
742 if (!info.si_code)
c9408265 743 return;
e1cebad4 744
1da177e4
LT
745 force_sig_info(SIGFPE, &info, task);
746}
747
e407d620 748dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
1da177e4 749{
6c1e0256
FW
750 enum ctx_state prev_state;
751
752 prev_state = exception_enter();
c9408265 753 math_error(regs, error_code, X86_TRAP_MF);
6c1e0256 754 exception_exit(prev_state);
1da177e4
LT
755}
756
e407d620
AH
757dotraplinkage void
758do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
1da177e4 759{
6c1e0256
FW
760 enum ctx_state prev_state;
761
762 prev_state = exception_enter();
c9408265 763 math_error(regs, error_code, X86_TRAP_XF);
6c1e0256 764 exception_exit(prev_state);
1da177e4
LT
765}
766
e407d620
AH
767dotraplinkage void
768do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
1da177e4 769{
cf81978d 770 conditional_sti(regs);
081f75bb
AH
771}
772
9326638c 773dotraplinkage void
aa78bcfa 774do_device_not_available(struct pt_regs *regs, long error_code)
7643e9b9 775{
6c1e0256
FW
776 enum ctx_state prev_state;
777
778 prev_state = exception_enter();
5d2bd700 779 BUG_ON(use_eager_fpu());
304bceda 780
a334fe43 781#ifdef CONFIG_MATH_EMULATION
7643e9b9 782 if (read_cr0() & X86_CR0_EM) {
d315760f
TH
783 struct math_emu_info info = { };
784
7643e9b9 785 conditional_sti(regs);
d315760f 786
aa78bcfa 787 info.regs = regs;
d315760f 788 math_emulate(&info);
6c1e0256 789 exception_exit(prev_state);
a334fe43 790 return;
7643e9b9 791 }
a334fe43 792#endif
e1884d69 793 fpu__restore(&current->thread.fpu); /* interrupts still off */
a334fe43
BG
794#ifdef CONFIG_X86_32
795 conditional_sti(regs);
081f75bb 796#endif
6c1e0256 797 exception_exit(prev_state);
7643e9b9 798}
9326638c 799NOKPROBE_SYMBOL(do_device_not_available);
7643e9b9 800
081f75bb 801#ifdef CONFIG_X86_32
e407d620 802dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
f8e0870f
AH
803{
804 siginfo_t info;
6c1e0256 805 enum ctx_state prev_state;
6ba3c97a 806
6c1e0256 807 prev_state = exception_enter();
f8e0870f
AH
808 local_irq_enable();
809
810 info.si_signo = SIGILL;
811 info.si_errno = 0;
812 info.si_code = ILL_BADSTK;
fc6fcdfb 813 info.si_addr = NULL;
c9408265 814 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
6ba3c97a
FW
815 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
816 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
817 &info);
818 }
6c1e0256 819 exception_exit(prev_state);
f8e0870f 820}
081f75bb 821#endif
f8e0870f 822
29c84391
JK
823/* Set of traps needed for early debugging. */
824void __init early_trap_init(void)
825{
b4d83270 826 /*
5eca7453
WN
827 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
828 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
829 * CPU runs at ring 0 so it is impossible to hit an invalid
830 * stack. Using the original stack works well enough at this
831 * early stage. DEBUG_STACK will be equipped after cpu_init() in
b4d83270 832 * trap_init().
5eca7453
WN
833 *
834 * We don't need to set trace_idt_table like set_intr_gate(),
835 * since we don't have trace_debug and it will be reset to
836 * 'debug' in trap_init() by set_intr_gate_ist().
b4d83270 837 */
5eca7453 838 set_intr_gate_notrace(X86_TRAP_DB, debug);
29c84391 839 /* int3 can be called from all */
5eca7453 840 set_system_intr_gate(X86_TRAP_BP, &int3);
8170e6be 841#ifdef CONFIG_X86_32
25c74b10 842 set_intr_gate(X86_TRAP_PF, page_fault);
8170e6be 843#endif
29c84391
JK
844 load_idt(&idt_descr);
845}
846
8170e6be
PA
847void __init early_trap_pf_init(void)
848{
849#ifdef CONFIG_X86_64
25c74b10 850 set_intr_gate(X86_TRAP_PF, page_fault);
8170e6be
PA
851#endif
852}
853
1da177e4
LT
854void __init trap_init(void)
855{
dbeb2be2
RR
856 int i;
857
1da177e4 858#ifdef CONFIG_EISA
927222b1 859 void __iomem *p = early_ioremap(0x0FFFD9, 4);
b5964405
IM
860
861 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
1da177e4 862 EISA_bus = 1;
927222b1 863 early_iounmap(p, 4);
1da177e4
LT
864#endif
865
25c74b10 866 set_intr_gate(X86_TRAP_DE, divide_error);
c9408265 867 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
699d2937 868 /* int4 can be called from all */
c9408265 869 set_system_intr_gate(X86_TRAP_OF, &overflow);
25c74b10
SA
870 set_intr_gate(X86_TRAP_BR, bounds);
871 set_intr_gate(X86_TRAP_UD, invalid_op);
872 set_intr_gate(X86_TRAP_NM, device_not_available);
081f75bb 873#ifdef CONFIG_X86_32
c9408265 874 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
081f75bb 875#else
c9408265 876 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
081f75bb 877#endif
25c74b10
SA
878 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
879 set_intr_gate(X86_TRAP_TS, invalid_TSS);
880 set_intr_gate(X86_TRAP_NP, segment_not_present);
6f442be2 881 set_intr_gate(X86_TRAP_SS, stack_segment);
25c74b10
SA
882 set_intr_gate(X86_TRAP_GP, general_protection);
883 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
884 set_intr_gate(X86_TRAP_MF, coprocessor_error);
885 set_intr_gate(X86_TRAP_AC, alignment_check);
1da177e4 886#ifdef CONFIG_X86_MCE
c9408265 887 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
1da177e4 888#endif
25c74b10 889 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
1da177e4 890
bb3f0b59
YL
891 /* Reserve all the builtin and the syscall vector: */
892 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
893 set_bit(i, used_vectors);
894
081f75bb 895#ifdef CONFIG_IA32_EMULATION
2cd23553 896 set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
bb3f0b59 897 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
081f75bb
AH
898#endif
899
900#ifdef CONFIG_X86_32
b2502b41 901 set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
51bb9284 902 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
081f75bb 903#endif
bb3f0b59 904
4eefbe79
KC
905 /*
906 * Set the IDT descriptor to a fixed read-only location, so that the
907 * "sidt" instruction will not leak the location of the kernel, and
908 * to defend the IDT against arbitrary memory write vulnerabilities.
909 * It will be reloaded in cpu_init() */
910 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
911 idt_descr.address = fix_to_virt(FIX_RO_IDT);
912
1da177e4 913 /*
b5964405 914 * Should be a barrier for any external CPU state:
1da177e4
LT
915 */
916 cpu_init();
917
b4d83270
WN
918 /*
919 * X86_TRAP_DB and X86_TRAP_BP have been set
5eca7453 920 * in early_trap_init(). However, ITS works only after
b4d83270
WN
921 * cpu_init() loads TSS. See comments in early_trap_init().
922 */
923 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
924 /* int3 can be called from all */
925 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
926
428cf902 927 x86_init.irqs.trap_init();
228bdaa9
SR
928
929#ifdef CONFIG_X86_64
629f4f9d 930 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
c9408265
KC
931 set_nmi_gate(X86_TRAP_DB, &debug);
932 set_nmi_gate(X86_TRAP_BP, &int3);
228bdaa9 933#endif
1da177e4 934}