]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
a8c1be9d | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
1da177e4 LT |
4 | * |
5 | * Pentium III FXSR, SSE support | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
8 | ||
9 | /* | |
c1d518c8 | 10 | * Handle hardware traps and faults. |
1da177e4 | 11 | */ |
c767a54b JP |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
14 | ||
56dd9470 | 15 | #include <linux/context_tracking.h> |
b5964405 IM |
16 | #include <linux/interrupt.h> |
17 | #include <linux/kallsyms.h> | |
18 | #include <linux/spinlock.h> | |
b5964405 IM |
19 | #include <linux/kprobes.h> |
20 | #include <linux/uaccess.h> | |
b5964405 | 21 | #include <linux/kdebug.h> |
f503b5ae | 22 | #include <linux/kgdb.h> |
1da177e4 | 23 | #include <linux/kernel.h> |
b5964405 IM |
24 | #include <linux/module.h> |
25 | #include <linux/ptrace.h> | |
b02ef20a | 26 | #include <linux/uprobes.h> |
1da177e4 | 27 | #include <linux/string.h> |
b5964405 | 28 | #include <linux/delay.h> |
1da177e4 | 29 | #include <linux/errno.h> |
b5964405 IM |
30 | #include <linux/kexec.h> |
31 | #include <linux/sched.h> | |
1da177e4 | 32 | #include <linux/timer.h> |
1da177e4 | 33 | #include <linux/init.h> |
91768d6c | 34 | #include <linux/bug.h> |
b5964405 IM |
35 | #include <linux/nmi.h> |
36 | #include <linux/mm.h> | |
c1d518c8 AH |
37 | #include <linux/smp.h> |
38 | #include <linux/io.h> | |
1da177e4 LT |
39 | |
40 | #ifdef CONFIG_EISA | |
41 | #include <linux/ioport.h> | |
42 | #include <linux/eisa.h> | |
43 | #endif | |
44 | ||
c0d12172 DJ |
45 | #if defined(CONFIG_EDAC) |
46 | #include <linux/edac.h> | |
47 | #endif | |
48 | ||
f8561296 | 49 | #include <asm/kmemcheck.h> |
b5964405 | 50 | #include <asm/stacktrace.h> |
1da177e4 | 51 | #include <asm/processor.h> |
1da177e4 | 52 | #include <asm/debugreg.h> |
60063497 | 53 | #include <linux/atomic.h> |
08d636b6 | 54 | #include <asm/ftrace.h> |
c1d518c8 | 55 | #include <asm/traps.h> |
1da177e4 LT |
56 | #include <asm/desc.h> |
57 | #include <asm/i387.h> | |
1361b83a | 58 | #include <asm/fpu-internal.h> |
9e55e44e | 59 | #include <asm/mce.h> |
4eefbe79 | 60 | #include <asm/fixmap.h> |
1164dd00 | 61 | #include <asm/mach_traps.h> |
17f41571 | 62 | #include <asm/alternative.h> |
fe3d197f | 63 | #include <asm/mpx.h> |
c1d518c8 | 64 | |
081f75bb | 65 | #ifdef CONFIG_X86_64 |
428cf902 | 66 | #include <asm/x86_init.h> |
081f75bb AH |
67 | #include <asm/pgalloc.h> |
68 | #include <asm/proto.h> | |
4df05f36 KC |
69 | |
70 | /* No need to be aligned, but done to keep all IDTs defined the same way. */ | |
71 | gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; | |
081f75bb | 72 | #else |
c1d518c8 | 73 | #include <asm/processor-flags.h> |
8e6dafd6 | 74 | #include <asm/setup.h> |
1da177e4 | 75 | |
1da177e4 | 76 | asmlinkage int system_call(void); |
081f75bb | 77 | #endif |
1da177e4 | 78 | |
4df05f36 KC |
79 | /* Must be page-aligned because the real IDT is used in a fixmap. */ |
80 | gate_desc idt_table[NR_VECTORS] __page_aligned_bss; | |
81 | ||
b77b881f YL |
82 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
83 | EXPORT_SYMBOL_GPL(used_vectors); | |
84 | ||
762db434 AH |
85 | static inline void conditional_sti(struct pt_regs *regs) |
86 | { | |
87 | if (regs->flags & X86_EFLAGS_IF) | |
88 | local_irq_enable(); | |
89 | } | |
90 | ||
3d2a71a5 AH |
91 | static inline void preempt_conditional_sti(struct pt_regs *regs) |
92 | { | |
bdb43806 | 93 | preempt_count_inc(); |
3d2a71a5 AH |
94 | if (regs->flags & X86_EFLAGS_IF) |
95 | local_irq_enable(); | |
96 | } | |
97 | ||
be716615 TG |
98 | static inline void conditional_cli(struct pt_regs *regs) |
99 | { | |
100 | if (regs->flags & X86_EFLAGS_IF) | |
101 | local_irq_disable(); | |
102 | } | |
103 | ||
3d2a71a5 AH |
104 | static inline void preempt_conditional_cli(struct pt_regs *regs) |
105 | { | |
106 | if (regs->flags & X86_EFLAGS_IF) | |
107 | local_irq_disable(); | |
bdb43806 | 108 | preempt_count_dec(); |
3d2a71a5 AH |
109 | } |
110 | ||
95927475 AL |
111 | enum ctx_state ist_enter(struct pt_regs *regs) |
112 | { | |
b926e6f6 | 113 | enum ctx_state prev_state; |
95927475 | 114 | |
f39b6f0e | 115 | if (user_mode(regs)) { |
95927475 | 116 | /* Other than that, we're just an exception. */ |
b926e6f6 | 117 | prev_state = exception_enter(); |
95927475 AL |
118 | } else { |
119 | /* | |
120 | * We might have interrupted pretty much anything. In | |
121 | * fact, if we're a machine check, we can even interrupt | |
122 | * NMI processing. We don't want in_nmi() to return true, | |
123 | * but we need to notify RCU. | |
124 | */ | |
125 | rcu_nmi_enter(); | |
c467ea76 | 126 | prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */ |
95927475 | 127 | } |
b926e6f6 AL |
128 | |
129 | /* | |
130 | * We are atomic because we're on the IST stack (or we're on x86_32, | |
131 | * in which case we still shouldn't schedule). | |
132 | * | |
133 | * This must be after exception_enter(), because exception_enter() | |
134 | * won't do anything if in_interrupt() returns true. | |
135 | */ | |
136 | preempt_count_add(HARDIRQ_OFFSET); | |
137 | ||
138 | /* This code is a bit fragile. Test it. */ | |
139 | rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work"); | |
140 | ||
141 | return prev_state; | |
95927475 AL |
142 | } |
143 | ||
144 | void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) | |
145 | { | |
b926e6f6 | 146 | /* Must be before exception_exit. */ |
95927475 AL |
147 | preempt_count_sub(HARDIRQ_OFFSET); |
148 | ||
f39b6f0e | 149 | if (user_mode(regs)) |
95927475 AL |
150 | return exception_exit(prev_state); |
151 | else | |
152 | rcu_nmi_exit(); | |
153 | } | |
154 | ||
bced35b6 AL |
155 | /** |
156 | * ist_begin_non_atomic() - begin a non-atomic section in an IST exception | |
157 | * @regs: regs passed to the IST exception handler | |
158 | * | |
159 | * IST exception handlers normally cannot schedule. As a special | |
160 | * exception, if the exception interrupted userspace code (i.e. | |
f39b6f0e | 161 | * user_mode(regs) would return true) and the exception was not |
bced35b6 AL |
162 | * a double fault, it can be safe to schedule. ist_begin_non_atomic() |
163 | * begins a non-atomic section within an ist_enter()/ist_exit() region. | |
164 | * Callers are responsible for enabling interrupts themselves inside | |
165 | * the non-atomic section, and callers must call is_end_non_atomic() | |
166 | * before ist_exit(). | |
167 | */ | |
168 | void ist_begin_non_atomic(struct pt_regs *regs) | |
169 | { | |
f39b6f0e | 170 | BUG_ON(!user_mode(regs)); |
bced35b6 AL |
171 | |
172 | /* | |
173 | * Sanity check: we need to be on the normal thread stack. This | |
174 | * will catch asm bugs and any attempt to use ist_preempt_enable | |
175 | * from double_fault. | |
176 | */ | |
a7fcf28d AL |
177 | BUG_ON((unsigned long)(current_top_of_stack() - |
178 | current_stack_pointer()) >= THREAD_SIZE); | |
bced35b6 AL |
179 | |
180 | preempt_count_sub(HARDIRQ_OFFSET); | |
181 | } | |
182 | ||
183 | /** | |
184 | * ist_end_non_atomic() - begin a non-atomic section in an IST exception | |
185 | * | |
186 | * Ends a non-atomic section started with ist_begin_non_atomic(). | |
187 | */ | |
188 | void ist_end_non_atomic(void) | |
189 | { | |
190 | preempt_count_add(HARDIRQ_OFFSET); | |
191 | } | |
192 | ||
9326638c | 193 | static nokprobe_inline int |
c416ddf5 FW |
194 | do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, |
195 | struct pt_regs *regs, long error_code) | |
1da177e4 | 196 | { |
d74ef111 | 197 | if (v8086_mode(regs)) { |
3c1326f8 | 198 | /* |
c416ddf5 | 199 | * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. |
3c1326f8 AH |
200 | * On nmi (interrupt 2), do_trap should not be called. |
201 | */ | |
c416ddf5 FW |
202 | if (trapnr < X86_TRAP_UD) { |
203 | if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, | |
204 | error_code, trapnr)) | |
205 | return 0; | |
206 | } | |
207 | return -1; | |
1da177e4 | 208 | } |
d74ef111 | 209 | |
55474c48 | 210 | if (!user_mode(regs)) { |
c416ddf5 FW |
211 | if (!fixup_exception(regs)) { |
212 | tsk->thread.error_code = error_code; | |
213 | tsk->thread.trap_nr = trapnr; | |
214 | die(str, regs, error_code); | |
215 | } | |
216 | return 0; | |
217 | } | |
1da177e4 | 218 | |
c416ddf5 FW |
219 | return -1; |
220 | } | |
1da177e4 | 221 | |
1c326c4d ON |
222 | static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, |
223 | siginfo_t *info) | |
958d3d72 ON |
224 | { |
225 | unsigned long siaddr; | |
226 | int sicode; | |
227 | ||
228 | switch (trapnr) { | |
1c326c4d ON |
229 | default: |
230 | return SEND_SIG_PRIV; | |
231 | ||
958d3d72 ON |
232 | case X86_TRAP_DE: |
233 | sicode = FPE_INTDIV; | |
b02ef20a | 234 | siaddr = uprobe_get_trap_addr(regs); |
958d3d72 ON |
235 | break; |
236 | case X86_TRAP_UD: | |
237 | sicode = ILL_ILLOPN; | |
b02ef20a | 238 | siaddr = uprobe_get_trap_addr(regs); |
958d3d72 ON |
239 | break; |
240 | case X86_TRAP_AC: | |
241 | sicode = BUS_ADRALN; | |
242 | siaddr = 0; | |
243 | break; | |
244 | } | |
245 | ||
246 | info->si_signo = signr; | |
247 | info->si_errno = 0; | |
248 | info->si_code = sicode; | |
249 | info->si_addr = (void __user *)siaddr; | |
1c326c4d | 250 | return info; |
958d3d72 ON |
251 | } |
252 | ||
9326638c | 253 | static void |
c416ddf5 FW |
254 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
255 | long error_code, siginfo_t *info) | |
256 | { | |
257 | struct task_struct *tsk = current; | |
258 | ||
259 | ||
260 | if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) | |
261 | return; | |
b5964405 | 262 | /* |
51e7dc70 | 263 | * We want error_code and trap_nr set for userspace faults and |
b5964405 IM |
264 | * kernelspace faults which result in die(), but not |
265 | * kernelspace faults which are fixed up. die() gives the | |
266 | * process no chance to handle the signal and notice the | |
267 | * kernel fault information, so that won't result in polluting | |
268 | * the information about previously queued, but not yet | |
269 | * delivered, faults. See also do_general_protection below. | |
270 | */ | |
271 | tsk->thread.error_code = error_code; | |
51e7dc70 | 272 | tsk->thread.trap_nr = trapnr; |
d1895183 | 273 | |
081f75bb AH |
274 | #ifdef CONFIG_X86_64 |
275 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | |
276 | printk_ratelimit()) { | |
c767a54b JP |
277 | pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", |
278 | tsk->comm, tsk->pid, str, | |
279 | regs->ip, regs->sp, error_code); | |
081f75bb | 280 | print_vma_addr(" in ", regs->ip); |
c767a54b | 281 | pr_cont("\n"); |
081f75bb AH |
282 | } |
283 | #endif | |
284 | ||
38cad57b | 285 | force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); |
1da177e4 | 286 | } |
9326638c | 287 | NOKPROBE_SYMBOL(do_trap); |
1da177e4 | 288 | |
dff0796e | 289 | static void do_error_trap(struct pt_regs *regs, long error_code, char *str, |
1c326c4d | 290 | unsigned long trapnr, int signr) |
dff0796e ON |
291 | { |
292 | enum ctx_state prev_state = exception_enter(); | |
1c326c4d | 293 | siginfo_t info; |
dff0796e ON |
294 | |
295 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != | |
296 | NOTIFY_STOP) { | |
297 | conditional_sti(regs); | |
1c326c4d ON |
298 | do_trap(trapnr, signr, str, regs, error_code, |
299 | fill_trap_info(regs, signr, trapnr, &info)); | |
dff0796e ON |
300 | } |
301 | ||
302 | exception_exit(prev_state); | |
303 | } | |
304 | ||
b5964405 | 305 | #define DO_ERROR(trapnr, signr, str, name) \ |
e407d620 | 306 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
b5964405 | 307 | { \ |
1c326c4d | 308 | do_error_trap(regs, error_code, str, trapnr, signr); \ |
1da177e4 LT |
309 | } |
310 | ||
0eb14833 ON |
311 | DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) |
312 | DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) | |
0eb14833 ON |
313 | DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) |
314 | DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) | |
315 | DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) | |
316 | DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) | |
0eb14833 | 317 | DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) |
0eb14833 | 318 | DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) |
1da177e4 | 319 | |
081f75bb AH |
320 | #ifdef CONFIG_X86_64 |
321 | /* Runs on IST stack */ | |
081f75bb AH |
322 | dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) |
323 | { | |
324 | static const char str[] = "double fault"; | |
325 | struct task_struct *tsk = current; | |
326 | ||
af726f21 AL |
327 | #ifdef CONFIG_X86_ESPFIX64 |
328 | extern unsigned char native_irq_return_iret[]; | |
329 | ||
330 | /* | |
331 | * If IRET takes a non-IST fault on the espfix64 stack, then we | |
332 | * end up promoting it to a doublefault. In that case, modify | |
333 | * the stack to make it look like we just entered the #GP | |
334 | * handler from user space, similar to bad_iret. | |
95927475 AL |
335 | * |
336 | * No need for ist_enter here because we don't use RCU. | |
af726f21 AL |
337 | */ |
338 | if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && | |
339 | regs->cs == __KERNEL_CS && | |
340 | regs->ip == (unsigned long)native_irq_return_iret) | |
341 | { | |
342 | struct pt_regs *normal_regs = task_pt_regs(current); | |
343 | ||
344 | /* Fake a #GP(0) from userspace. */ | |
345 | memmove(&normal_regs->ip, (void *)regs->sp, 5*8); | |
346 | normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ | |
347 | regs->ip = (unsigned long)general_protection; | |
348 | regs->sp = (unsigned long)&normal_regs->orig_ax; | |
95927475 | 349 | |
af726f21 AL |
350 | return; |
351 | } | |
352 | #endif | |
353 | ||
95927475 | 354 | ist_enter(regs); /* Discard prev_state because we won't return. */ |
c9408265 | 355 | notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); |
081f75bb AH |
356 | |
357 | tsk->thread.error_code = error_code; | |
51e7dc70 | 358 | tsk->thread.trap_nr = X86_TRAP_DF; |
081f75bb | 359 | |
4d067d8e BP |
360 | #ifdef CONFIG_DOUBLEFAULT |
361 | df_debug(regs, error_code); | |
362 | #endif | |
bd8b96df IM |
363 | /* |
364 | * This is always a kernel trap and never fixable (and thus must | |
365 | * never return). | |
366 | */ | |
081f75bb AH |
367 | for (;;) |
368 | die(str, regs, error_code); | |
369 | } | |
370 | #endif | |
371 | ||
fe3d197f DH |
372 | dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) |
373 | { | |
374 | struct task_struct *tsk = current; | |
375 | struct xsave_struct *xsave_buf; | |
376 | enum ctx_state prev_state; | |
377 | struct bndcsr *bndcsr; | |
378 | siginfo_t *info; | |
379 | ||
380 | prev_state = exception_enter(); | |
381 | if (notify_die(DIE_TRAP, "bounds", regs, error_code, | |
382 | X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) | |
383 | goto exit; | |
384 | conditional_sti(regs); | |
385 | ||
f39b6f0e | 386 | if (!user_mode(regs)) |
fe3d197f DH |
387 | die("bounds", regs, error_code); |
388 | ||
389 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { | |
390 | /* The exception is not from Intel MPX */ | |
391 | goto exit_trap; | |
392 | } | |
393 | ||
394 | /* | |
395 | * We need to look at BNDSTATUS to resolve this exception. | |
396 | * It is not directly accessible, though, so we need to | |
397 | * do an xsave and then pull it out of the xsave buffer. | |
398 | */ | |
399 | fpu_save_init(&tsk->thread.fpu); | |
400 | xsave_buf = &(tsk->thread.fpu.state->xsave); | |
401 | bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); | |
402 | if (!bndcsr) | |
403 | goto exit_trap; | |
404 | ||
405 | /* | |
406 | * The error code field of the BNDSTATUS register communicates status | |
407 | * information of a bound range exception #BR or operation involving | |
408 | * bound directory. | |
409 | */ | |
410 | switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { | |
411 | case 2: /* Bound directory has invalid entry. */ | |
412 | if (mpx_handle_bd_fault(xsave_buf)) | |
413 | goto exit_trap; | |
414 | break; /* Success, it was handled */ | |
415 | case 1: /* Bound violation. */ | |
416 | info = mpx_generate_siginfo(regs, xsave_buf); | |
e10abb2f | 417 | if (IS_ERR(info)) { |
fe3d197f DH |
418 | /* |
419 | * We failed to decode the MPX instruction. Act as if | |
420 | * the exception was not caused by MPX. | |
421 | */ | |
422 | goto exit_trap; | |
423 | } | |
424 | /* | |
425 | * Success, we decoded the instruction and retrieved | |
426 | * an 'info' containing the address being accessed | |
427 | * which caused the exception. This information | |
428 | * allows and application to possibly handle the | |
429 | * #BR exception itself. | |
430 | */ | |
431 | do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); | |
432 | kfree(info); | |
433 | break; | |
434 | case 0: /* No exception caused by Intel MPX operations. */ | |
435 | goto exit_trap; | |
436 | default: | |
437 | die("bounds", regs, error_code); | |
438 | } | |
439 | ||
440 | exit: | |
441 | exception_exit(prev_state); | |
442 | return; | |
443 | exit_trap: | |
444 | /* | |
445 | * This path out is for all the cases where we could not | |
446 | * handle the exception in some way (like allocating a | |
447 | * table or telling userspace about it. We will also end | |
448 | * up here if the kernel has MPX turned off at compile | |
449 | * time.. | |
450 | */ | |
451 | do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); | |
452 | exception_exit(prev_state); | |
453 | } | |
454 | ||
9326638c | 455 | dotraplinkage void |
13485ab5 | 456 | do_general_protection(struct pt_regs *regs, long error_code) |
1da177e4 | 457 | { |
13485ab5 | 458 | struct task_struct *tsk; |
6c1e0256 | 459 | enum ctx_state prev_state; |
b5964405 | 460 | |
6c1e0256 | 461 | prev_state = exception_enter(); |
c6df0d71 AH |
462 | conditional_sti(regs); |
463 | ||
d74ef111 | 464 | if (v8086_mode(regs)) { |
ef3f6288 FW |
465 | local_irq_enable(); |
466 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); | |
6ba3c97a | 467 | goto exit; |
ef3f6288 | 468 | } |
1da177e4 | 469 | |
13485ab5 | 470 | tsk = current; |
55474c48 | 471 | if (!user_mode(regs)) { |
ef3f6288 | 472 | if (fixup_exception(regs)) |
6ba3c97a | 473 | goto exit; |
ef3f6288 FW |
474 | |
475 | tsk->thread.error_code = error_code; | |
476 | tsk->thread.trap_nr = X86_TRAP_GP; | |
6ba3c97a FW |
477 | if (notify_die(DIE_GPF, "general protection fault", regs, error_code, |
478 | X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) | |
ef3f6288 | 479 | die("general protection fault", regs, error_code); |
6ba3c97a | 480 | goto exit; |
ef3f6288 | 481 | } |
1da177e4 | 482 | |
13485ab5 | 483 | tsk->thread.error_code = error_code; |
51e7dc70 | 484 | tsk->thread.trap_nr = X86_TRAP_GP; |
b5964405 | 485 | |
13485ab5 AH |
486 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
487 | printk_ratelimit()) { | |
c767a54b | 488 | pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", |
13485ab5 AH |
489 | tsk->comm, task_pid_nr(tsk), |
490 | regs->ip, regs->sp, error_code); | |
03252919 | 491 | print_vma_addr(" in ", regs->ip); |
c767a54b | 492 | pr_cont("\n"); |
03252919 | 493 | } |
abd4f750 | 494 | |
38cad57b | 495 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); |
6ba3c97a | 496 | exit: |
6c1e0256 | 497 | exception_exit(prev_state); |
1da177e4 | 498 | } |
9326638c | 499 | NOKPROBE_SYMBOL(do_general_protection); |
1da177e4 | 500 | |
c1d518c8 | 501 | /* May run on IST stack. */ |
9326638c | 502 | dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) |
1da177e4 | 503 | { |
6c1e0256 FW |
504 | enum ctx_state prev_state; |
505 | ||
08d636b6 | 506 | #ifdef CONFIG_DYNAMIC_FTRACE |
a192cd04 SR |
507 | /* |
508 | * ftrace must be first, everything else may cause a recursive crash. | |
509 | * See note by declaration of modifying_ftrace_code in ftrace.c | |
510 | */ | |
511 | if (unlikely(atomic_read(&modifying_ftrace_code)) && | |
512 | ftrace_int3_handler(regs)) | |
08d636b6 SR |
513 | return; |
514 | #endif | |
17f41571 JK |
515 | if (poke_int3_handler(regs)) |
516 | return; | |
517 | ||
95927475 | 518 | prev_state = ist_enter(regs); |
f503b5ae | 519 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
c9408265 KC |
520 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
521 | SIGTRAP) == NOTIFY_STOP) | |
6ba3c97a | 522 | goto exit; |
f503b5ae | 523 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ |
cc3a1bf5 | 524 | |
6f6343f5 MH |
525 | #ifdef CONFIG_KPROBES |
526 | if (kprobe_int3_handler(regs)) | |
4cdf77a8 | 527 | goto exit; |
6f6343f5 MH |
528 | #endif |
529 | ||
c9408265 KC |
530 | if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
531 | SIGTRAP) == NOTIFY_STOP) | |
6ba3c97a | 532 | goto exit; |
b5964405 | 533 | |
42181186 SR |
534 | /* |
535 | * Let others (NMI) know that the debug stack is in use | |
536 | * as we may switch to the interrupt stack. | |
537 | */ | |
538 | debug_stack_usage_inc(); | |
4915a35e | 539 | preempt_conditional_sti(regs); |
c9408265 | 540 | do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); |
4915a35e | 541 | preempt_conditional_cli(regs); |
42181186 | 542 | debug_stack_usage_dec(); |
6ba3c97a | 543 | exit: |
95927475 | 544 | ist_exit(regs, prev_state); |
1da177e4 | 545 | } |
9326638c | 546 | NOKPROBE_SYMBOL(do_int3); |
1da177e4 | 547 | |
081f75bb | 548 | #ifdef CONFIG_X86_64 |
bd8b96df | 549 | /* |
48e08d0f AL |
550 | * Help handler running on IST stack to switch off the IST stack if the |
551 | * interrupted code was in user mode. The actual stack switch is done in | |
552 | * entry_64.S | |
bd8b96df | 553 | */ |
7ddc6a21 | 554 | asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) |
081f75bb | 555 | { |
48e08d0f AL |
556 | struct pt_regs *regs = task_pt_regs(current); |
557 | *regs = *eregs; | |
081f75bb AH |
558 | return regs; |
559 | } | |
9326638c | 560 | NOKPROBE_SYMBOL(sync_regs); |
b645af2d AL |
561 | |
562 | struct bad_iret_stack { | |
563 | void *error_entry_ret; | |
564 | struct pt_regs regs; | |
565 | }; | |
566 | ||
7ddc6a21 | 567 | asmlinkage __visible notrace |
b645af2d AL |
568 | struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) |
569 | { | |
570 | /* | |
571 | * This is called from entry_64.S early in handling a fault | |
572 | * caused by a bad iret to user mode. To handle the fault | |
573 | * correctly, we want move our stack frame to task_pt_regs | |
574 | * and we want to pretend that the exception came from the | |
575 | * iret target. | |
576 | */ | |
577 | struct bad_iret_stack *new_stack = | |
578 | container_of(task_pt_regs(current), | |
579 | struct bad_iret_stack, regs); | |
580 | ||
581 | /* Copy the IRET target to the new stack. */ | |
582 | memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); | |
583 | ||
584 | /* Copy the remainder of the stack from the current stack. */ | |
585 | memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); | |
586 | ||
f39b6f0e | 587 | BUG_ON(!user_mode(&new_stack->regs)); |
b645af2d AL |
588 | return new_stack; |
589 | } | |
7ddc6a21 | 590 | NOKPROBE_SYMBOL(fixup_bad_iret); |
081f75bb AH |
591 | #endif |
592 | ||
1da177e4 LT |
593 | /* |
594 | * Our handling of the processor debug registers is non-trivial. | |
595 | * We do not clear them on entry and exit from the kernel. Therefore | |
596 | * it is possible to get a watchpoint trap here from inside the kernel. | |
597 | * However, the code in ./ptrace.c has ensured that the user can | |
598 | * only set watchpoints on userspace addresses. Therefore the in-kernel | |
599 | * watchpoint trap can only occur in code which is reading/writing | |
600 | * from user space. Such code must not hold kernel locks (since it | |
601 | * can equally take a page fault), therefore it is safe to call | |
602 | * force_sig_info even though that claims and releases locks. | |
b5964405 | 603 | * |
1da177e4 LT |
604 | * Code in ./signal.c ensures that the debug control register |
605 | * is restored before we deliver any signal, and therefore that | |
606 | * user code runs with the correct debug control register even though | |
607 | * we clear it here. | |
608 | * | |
609 | * Being careful here means that we don't have to be as careful in a | |
610 | * lot of more complicated places (task switching can be a bit lazy | |
611 | * about restoring all the debug state, and ptrace doesn't have to | |
612 | * find every occurrence of the TF bit that could be saved away even | |
613 | * by user code) | |
c1d518c8 AH |
614 | * |
615 | * May run on IST stack. | |
1da177e4 | 616 | */ |
9326638c | 617 | dotraplinkage void do_debug(struct pt_regs *regs, long error_code) |
1da177e4 | 618 | { |
1da177e4 | 619 | struct task_struct *tsk = current; |
6c1e0256 | 620 | enum ctx_state prev_state; |
a1e80faf | 621 | int user_icebp = 0; |
08d68323 | 622 | unsigned long dr6; |
da654b74 | 623 | int si_code; |
1da177e4 | 624 | |
95927475 | 625 | prev_state = ist_enter(regs); |
4cdf77a8 | 626 | |
08d68323 | 627 | get_debugreg(dr6, 6); |
1da177e4 | 628 | |
40f9249a P |
629 | /* Filter out all the reserved bits which are preset to 1 */ |
630 | dr6 &= ~DR6_RESERVED; | |
631 | ||
a1e80faf FW |
632 | /* |
633 | * If dr6 has no reason to give us about the origin of this trap, | |
634 | * then it's very likely the result of an icebp/int01 trap. | |
635 | * User wants a sigtrap for that. | |
636 | */ | |
f39b6f0e | 637 | if (!dr6 && user_mode(regs)) |
a1e80faf FW |
638 | user_icebp = 1; |
639 | ||
f8561296 | 640 | /* Catch kmemcheck conditions first of all! */ |
eadb8a09 | 641 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
6ba3c97a | 642 | goto exit; |
f8561296 | 643 | |
08d68323 P |
644 | /* DR6 may or may not be cleared by the CPU */ |
645 | set_debugreg(0, 6); | |
10faa81e | 646 | |
ea8e61b7 PZ |
647 | /* |
648 | * The processor cleared BTF, so don't mark that we need it set. | |
649 | */ | |
650 | clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); | |
651 | ||
08d68323 P |
652 | /* Store the virtualized DR6 value */ |
653 | tsk->thread.debugreg6 = dr6; | |
654 | ||
6f6343f5 MH |
655 | #ifdef CONFIG_KPROBES |
656 | if (kprobe_debug_handler(regs)) | |
657 | goto exit; | |
658 | #endif | |
659 | ||
5a802e15 | 660 | if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, |
62edab90 | 661 | SIGTRAP) == NOTIFY_STOP) |
6ba3c97a | 662 | goto exit; |
3d2a71a5 | 663 | |
42181186 SR |
664 | /* |
665 | * Let others (NMI) know that the debug stack is in use | |
666 | * as we may switch to the interrupt stack. | |
667 | */ | |
668 | debug_stack_usage_inc(); | |
669 | ||
1da177e4 | 670 | /* It's safe to allow irq's after DR6 has been saved */ |
3d2a71a5 | 671 | preempt_conditional_sti(regs); |
1da177e4 | 672 | |
d74ef111 | 673 | if (v8086_mode(regs)) { |
c9408265 KC |
674 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, |
675 | X86_TRAP_DB); | |
6554287b | 676 | preempt_conditional_cli(regs); |
42181186 | 677 | debug_stack_usage_dec(); |
6ba3c97a | 678 | goto exit; |
1da177e4 LT |
679 | } |
680 | ||
1da177e4 | 681 | /* |
08d68323 P |
682 | * Single-stepping through system calls: ignore any exceptions in |
683 | * kernel space, but re-enable TF when returning to user mode. | |
684 | * | |
685 | * We already checked v86 mode above, so we can check for kernel mode | |
686 | * by just checking the CPL of CS. | |
1da177e4 | 687 | */ |
55474c48 | 688 | if ((dr6 & DR_STEP) && !user_mode(regs)) { |
08d68323 P |
689 | tsk->thread.debugreg6 &= ~DR_STEP; |
690 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | |
691 | regs->flags &= ~X86_EFLAGS_TF; | |
1da177e4 | 692 | } |
08d68323 | 693 | si_code = get_si_code(tsk->thread.debugreg6); |
a1e80faf | 694 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) |
08d68323 | 695 | send_sigtrap(tsk, regs, error_code, si_code); |
3d2a71a5 | 696 | preempt_conditional_cli(regs); |
42181186 | 697 | debug_stack_usage_dec(); |
1da177e4 | 698 | |
6ba3c97a | 699 | exit: |
95927475 | 700 | ist_exit(regs, prev_state); |
1da177e4 | 701 | } |
9326638c | 702 | NOKPROBE_SYMBOL(do_debug); |
1da177e4 LT |
703 | |
704 | /* | |
705 | * Note that we play around with the 'TS' bit in an attempt to get | |
706 | * the correct behaviour even in the presence of the asynchronous | |
707 | * IRQ13 behaviour | |
708 | */ | |
5e1b05be | 709 | static void math_error(struct pt_regs *regs, int error_code, int trapnr) |
1da177e4 | 710 | { |
e2e75c91 | 711 | struct task_struct *task = current; |
1da177e4 | 712 | siginfo_t info; |
9b6dba9e | 713 | unsigned short err; |
c9408265 KC |
714 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : |
715 | "simd exception"; | |
e2e75c91 BG |
716 | |
717 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) | |
718 | return; | |
719 | conditional_sti(regs); | |
720 | ||
f39b6f0e | 721 | if (!user_mode(regs)) |
e2e75c91 BG |
722 | { |
723 | if (!fixup_exception(regs)) { | |
724 | task->thread.error_code = error_code; | |
51e7dc70 | 725 | task->thread.trap_nr = trapnr; |
e2e75c91 BG |
726 | die(str, regs, error_code); |
727 | } | |
728 | return; | |
729 | } | |
1da177e4 LT |
730 | |
731 | /* | |
732 | * Save the info for the exception handler and clear the error. | |
733 | */ | |
08a744c6 | 734 | unlazy_fpu(task); |
51e7dc70 | 735 | task->thread.trap_nr = trapnr; |
9b6dba9e | 736 | task->thread.error_code = error_code; |
1da177e4 LT |
737 | info.si_signo = SIGFPE; |
738 | info.si_errno = 0; | |
b02ef20a | 739 | info.si_addr = (void __user *)uprobe_get_trap_addr(regs); |
c9408265 | 740 | if (trapnr == X86_TRAP_MF) { |
9b6dba9e BG |
741 | unsigned short cwd, swd; |
742 | /* | |
743 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
744 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
745 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
746 | * fault bit. We should only be taking one exception at a time, | |
747 | * so if this combination doesn't produce any single exception, | |
748 | * then we have a bad program that isn't synchronizing its FPU usage | |
749 | * and it will suffer the consequences since we won't be able to | |
750 | * fully reproduce the context of the exception | |
751 | */ | |
752 | cwd = get_fpu_cwd(task); | |
753 | swd = get_fpu_swd(task); | |
adf77bac | 754 | |
9b6dba9e BG |
755 | err = swd & ~cwd; |
756 | } else { | |
757 | /* | |
758 | * The SIMD FPU exceptions are handled a little differently, as there | |
759 | * is only a single status/control register. Thus, to determine which | |
760 | * unmasked exception was caught we must mask the exception mask bits | |
761 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
762 | */ | |
763 | unsigned short mxcsr = get_fpu_mxcsr(task); | |
764 | err = ~(mxcsr >> 7) & mxcsr; | |
765 | } | |
adf77bac PA |
766 | |
767 | if (err & 0x001) { /* Invalid op */ | |
b5964405 IM |
768 | /* |
769 | * swd & 0x240 == 0x040: Stack Underflow | |
770 | * swd & 0x240 == 0x240: Stack Overflow | |
771 | * User must clear the SF bit (0x40) if set | |
772 | */ | |
773 | info.si_code = FPE_FLTINV; | |
adf77bac | 774 | } else if (err & 0x004) { /* Divide by Zero */ |
b5964405 | 775 | info.si_code = FPE_FLTDIV; |
adf77bac | 776 | } else if (err & 0x008) { /* Overflow */ |
b5964405 | 777 | info.si_code = FPE_FLTOVF; |
adf77bac PA |
778 | } else if (err & 0x012) { /* Denormal, Underflow */ |
779 | info.si_code = FPE_FLTUND; | |
780 | } else if (err & 0x020) { /* Precision */ | |
b5964405 | 781 | info.si_code = FPE_FLTRES; |
adf77bac | 782 | } else { |
bd8b96df | 783 | /* |
c9408265 KC |
784 | * If we're using IRQ 13, or supposedly even some trap |
785 | * X86_TRAP_MF implementations, it's possible | |
786 | * we get a spurious trap, which is not an error. | |
bd8b96df | 787 | */ |
c9408265 | 788 | return; |
1da177e4 LT |
789 | } |
790 | force_sig_info(SIGFPE, &info, task); | |
791 | } | |
792 | ||
e407d620 | 793 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) |
1da177e4 | 794 | { |
6c1e0256 FW |
795 | enum ctx_state prev_state; |
796 | ||
797 | prev_state = exception_enter(); | |
c9408265 | 798 | math_error(regs, error_code, X86_TRAP_MF); |
6c1e0256 | 799 | exception_exit(prev_state); |
1da177e4 LT |
800 | } |
801 | ||
e407d620 AH |
802 | dotraplinkage void |
803 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) | |
1da177e4 | 804 | { |
6c1e0256 FW |
805 | enum ctx_state prev_state; |
806 | ||
807 | prev_state = exception_enter(); | |
c9408265 | 808 | math_error(regs, error_code, X86_TRAP_XF); |
6c1e0256 | 809 | exception_exit(prev_state); |
1da177e4 LT |
810 | } |
811 | ||
e407d620 AH |
812 | dotraplinkage void |
813 | do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |
1da177e4 | 814 | { |
cf81978d | 815 | conditional_sti(regs); |
1da177e4 LT |
816 | #if 0 |
817 | /* No need to warn about this any longer. */ | |
c767a54b | 818 | pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); |
1da177e4 LT |
819 | #endif |
820 | } | |
821 | ||
2605fc21 | 822 | asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void) |
1da177e4 | 823 | { |
1da177e4 | 824 | } |
4efc0670 | 825 | |
2605fc21 | 826 | asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) |
081f75bb AH |
827 | { |
828 | } | |
829 | ||
1da177e4 | 830 | /* |
b5964405 | 831 | * 'math_state_restore()' saves the current math information in the |
1da177e4 LT |
832 | * old math state array, and gets the new ones from the current task |
833 | * | |
834 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | |
835 | * Don't touch unless you *really* know how it works. | |
836 | * | |
be98c2cd LT |
837 | * Must be called with kernel preemption disabled (eg with local |
838 | * local interrupts as in the case of do_device_not_available). | |
1da177e4 | 839 | */ |
be98c2cd | 840 | void math_state_restore(void) |
1da177e4 | 841 | { |
f94edacf | 842 | struct task_struct *tsk = current; |
1da177e4 | 843 | |
aa283f49 SS |
844 | if (!tsk_used_math(tsk)) { |
845 | local_irq_enable(); | |
846 | /* | |
847 | * does a slab alloc which can sleep | |
848 | */ | |
849 | if (init_fpu(tsk)) { | |
850 | /* | |
851 | * ran out of memory! | |
852 | */ | |
853 | do_group_exit(SIGKILL); | |
854 | return; | |
855 | } | |
856 | local_irq_disable(); | |
857 | } | |
858 | ||
7575637a ON |
859 | /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */ |
860 | kernel_fpu_disable(); | |
f94edacf | 861 | __thread_fpu_begin(tsk); |
80ab6f1e | 862 | if (unlikely(restore_fpu_checking(tsk))) { |
b85e67d1 | 863 | fpu_reset_state(tsk); |
38cad57b | 864 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); |
7575637a ON |
865 | } else { |
866 | tsk->thread.fpu_counter++; | |
80ab6f1e | 867 | } |
7575637a | 868 | kernel_fpu_enable(); |
1da177e4 | 869 | } |
5992b6da | 870 | EXPORT_SYMBOL_GPL(math_state_restore); |
1da177e4 | 871 | |
9326638c | 872 | dotraplinkage void |
aa78bcfa | 873 | do_device_not_available(struct pt_regs *regs, long error_code) |
7643e9b9 | 874 | { |
6c1e0256 FW |
875 | enum ctx_state prev_state; |
876 | ||
877 | prev_state = exception_enter(); | |
5d2bd700 | 878 | BUG_ON(use_eager_fpu()); |
304bceda | 879 | |
a334fe43 | 880 | #ifdef CONFIG_MATH_EMULATION |
7643e9b9 | 881 | if (read_cr0() & X86_CR0_EM) { |
d315760f TH |
882 | struct math_emu_info info = { }; |
883 | ||
7643e9b9 | 884 | conditional_sti(regs); |
d315760f | 885 | |
aa78bcfa | 886 | info.regs = regs; |
d315760f | 887 | math_emulate(&info); |
6c1e0256 | 888 | exception_exit(prev_state); |
a334fe43 | 889 | return; |
7643e9b9 | 890 | } |
a334fe43 BG |
891 | #endif |
892 | math_state_restore(); /* interrupts still off */ | |
893 | #ifdef CONFIG_X86_32 | |
894 | conditional_sti(regs); | |
081f75bb | 895 | #endif |
6c1e0256 | 896 | exception_exit(prev_state); |
7643e9b9 | 897 | } |
9326638c | 898 | NOKPROBE_SYMBOL(do_device_not_available); |
7643e9b9 | 899 | |
081f75bb | 900 | #ifdef CONFIG_X86_32 |
e407d620 | 901 | dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) |
f8e0870f AH |
902 | { |
903 | siginfo_t info; | |
6c1e0256 | 904 | enum ctx_state prev_state; |
6ba3c97a | 905 | |
6c1e0256 | 906 | prev_state = exception_enter(); |
f8e0870f AH |
907 | local_irq_enable(); |
908 | ||
909 | info.si_signo = SIGILL; | |
910 | info.si_errno = 0; | |
911 | info.si_code = ILL_BADSTK; | |
fc6fcdfb | 912 | info.si_addr = NULL; |
c9408265 | 913 | if (notify_die(DIE_TRAP, "iret exception", regs, error_code, |
6ba3c97a FW |
914 | X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { |
915 | do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, | |
916 | &info); | |
917 | } | |
6c1e0256 | 918 | exception_exit(prev_state); |
f8e0870f | 919 | } |
081f75bb | 920 | #endif |
f8e0870f | 921 | |
29c84391 JK |
922 | /* Set of traps needed for early debugging. */ |
923 | void __init early_trap_init(void) | |
924 | { | |
b4d83270 | 925 | /* |
5eca7453 WN |
926 | * Don't use IST to set DEBUG_STACK as it doesn't work until TSS |
927 | * is ready in cpu_init() <-- trap_init(). Before trap_init(), | |
928 | * CPU runs at ring 0 so it is impossible to hit an invalid | |
929 | * stack. Using the original stack works well enough at this | |
930 | * early stage. DEBUG_STACK will be equipped after cpu_init() in | |
b4d83270 | 931 | * trap_init(). |
5eca7453 WN |
932 | * |
933 | * We don't need to set trace_idt_table like set_intr_gate(), | |
934 | * since we don't have trace_debug and it will be reset to | |
935 | * 'debug' in trap_init() by set_intr_gate_ist(). | |
b4d83270 | 936 | */ |
5eca7453 | 937 | set_intr_gate_notrace(X86_TRAP_DB, debug); |
29c84391 | 938 | /* int3 can be called from all */ |
5eca7453 | 939 | set_system_intr_gate(X86_TRAP_BP, &int3); |
8170e6be | 940 | #ifdef CONFIG_X86_32 |
25c74b10 | 941 | set_intr_gate(X86_TRAP_PF, page_fault); |
8170e6be | 942 | #endif |
29c84391 JK |
943 | load_idt(&idt_descr); |
944 | } | |
945 | ||
8170e6be PA |
946 | void __init early_trap_pf_init(void) |
947 | { | |
948 | #ifdef CONFIG_X86_64 | |
25c74b10 | 949 | set_intr_gate(X86_TRAP_PF, page_fault); |
8170e6be PA |
950 | #endif |
951 | } | |
952 | ||
1da177e4 LT |
953 | void __init trap_init(void) |
954 | { | |
dbeb2be2 RR |
955 | int i; |
956 | ||
1da177e4 | 957 | #ifdef CONFIG_EISA |
927222b1 | 958 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
b5964405 IM |
959 | |
960 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) | |
1da177e4 | 961 | EISA_bus = 1; |
927222b1 | 962 | early_iounmap(p, 4); |
1da177e4 LT |
963 | #endif |
964 | ||
25c74b10 | 965 | set_intr_gate(X86_TRAP_DE, divide_error); |
c9408265 | 966 | set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); |
699d2937 | 967 | /* int4 can be called from all */ |
c9408265 | 968 | set_system_intr_gate(X86_TRAP_OF, &overflow); |
25c74b10 SA |
969 | set_intr_gate(X86_TRAP_BR, bounds); |
970 | set_intr_gate(X86_TRAP_UD, invalid_op); | |
971 | set_intr_gate(X86_TRAP_NM, device_not_available); | |
081f75bb | 972 | #ifdef CONFIG_X86_32 |
c9408265 | 973 | set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); |
081f75bb | 974 | #else |
c9408265 | 975 | set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); |
081f75bb | 976 | #endif |
25c74b10 SA |
977 | set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); |
978 | set_intr_gate(X86_TRAP_TS, invalid_TSS); | |
979 | set_intr_gate(X86_TRAP_NP, segment_not_present); | |
6f442be2 | 980 | set_intr_gate(X86_TRAP_SS, stack_segment); |
25c74b10 SA |
981 | set_intr_gate(X86_TRAP_GP, general_protection); |
982 | set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); | |
983 | set_intr_gate(X86_TRAP_MF, coprocessor_error); | |
984 | set_intr_gate(X86_TRAP_AC, alignment_check); | |
1da177e4 | 985 | #ifdef CONFIG_X86_MCE |
c9408265 | 986 | set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); |
1da177e4 | 987 | #endif |
25c74b10 | 988 | set_intr_gate(X86_TRAP_XF, simd_coprocessor_error); |
1da177e4 | 989 | |
bb3f0b59 YL |
990 | /* Reserve all the builtin and the syscall vector: */ |
991 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | |
992 | set_bit(i, used_vectors); | |
993 | ||
081f75bb AH |
994 | #ifdef CONFIG_IA32_EMULATION |
995 | set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | |
bb3f0b59 | 996 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); |
081f75bb AH |
997 | #endif |
998 | ||
999 | #ifdef CONFIG_X86_32 | |
699d2937 | 1000 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
dbeb2be2 | 1001 | set_bit(SYSCALL_VECTOR, used_vectors); |
081f75bb | 1002 | #endif |
bb3f0b59 | 1003 | |
4eefbe79 KC |
1004 | /* |
1005 | * Set the IDT descriptor to a fixed read-only location, so that the | |
1006 | * "sidt" instruction will not leak the location of the kernel, and | |
1007 | * to defend the IDT against arbitrary memory write vulnerabilities. | |
1008 | * It will be reloaded in cpu_init() */ | |
1009 | __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); | |
1010 | idt_descr.address = fix_to_virt(FIX_RO_IDT); | |
1011 | ||
1da177e4 | 1012 | /* |
b5964405 | 1013 | * Should be a barrier for any external CPU state: |
1da177e4 LT |
1014 | */ |
1015 | cpu_init(); | |
1016 | ||
b4d83270 WN |
1017 | /* |
1018 | * X86_TRAP_DB and X86_TRAP_BP have been set | |
5eca7453 | 1019 | * in early_trap_init(). However, ITS works only after |
b4d83270 WN |
1020 | * cpu_init() loads TSS. See comments in early_trap_init(). |
1021 | */ | |
1022 | set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); | |
1023 | /* int3 can be called from all */ | |
1024 | set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); | |
1025 | ||
428cf902 | 1026 | x86_init.irqs.trap_init(); |
228bdaa9 SR |
1027 | |
1028 | #ifdef CONFIG_X86_64 | |
629f4f9d | 1029 | memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); |
c9408265 KC |
1030 | set_nmi_gate(X86_TRAP_DB, &debug); |
1031 | set_nmi_gate(X86_TRAP_BP, &int3); | |
228bdaa9 | 1032 | #endif |
1da177e4 | 1033 | } |