]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
a8c1be9d | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
1da177e4 LT |
4 | * |
5 | * Pentium III FXSR, SSE support | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
8 | ||
9 | /* | |
c1d518c8 | 10 | * Handle hardware traps and faults. |
1da177e4 | 11 | */ |
c767a54b JP |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
14 | ||
b5964405 IM |
15 | #include <linux/interrupt.h> |
16 | #include <linux/kallsyms.h> | |
17 | #include <linux/spinlock.h> | |
b5964405 IM |
18 | #include <linux/kprobes.h> |
19 | #include <linux/uaccess.h> | |
b5964405 | 20 | #include <linux/kdebug.h> |
f503b5ae | 21 | #include <linux/kgdb.h> |
1da177e4 | 22 | #include <linux/kernel.h> |
b5964405 IM |
23 | #include <linux/module.h> |
24 | #include <linux/ptrace.h> | |
1da177e4 | 25 | #include <linux/string.h> |
b5964405 | 26 | #include <linux/delay.h> |
1da177e4 | 27 | #include <linux/errno.h> |
b5964405 IM |
28 | #include <linux/kexec.h> |
29 | #include <linux/sched.h> | |
1da177e4 | 30 | #include <linux/timer.h> |
1da177e4 | 31 | #include <linux/init.h> |
91768d6c | 32 | #include <linux/bug.h> |
b5964405 IM |
33 | #include <linux/nmi.h> |
34 | #include <linux/mm.h> | |
c1d518c8 AH |
35 | #include <linux/smp.h> |
36 | #include <linux/io.h> | |
1da177e4 LT |
37 | |
38 | #ifdef CONFIG_EISA | |
39 | #include <linux/ioport.h> | |
40 | #include <linux/eisa.h> | |
41 | #endif | |
42 | ||
c0d12172 DJ |
43 | #if defined(CONFIG_EDAC) |
44 | #include <linux/edac.h> | |
45 | #endif | |
46 | ||
f8561296 | 47 | #include <asm/kmemcheck.h> |
b5964405 | 48 | #include <asm/stacktrace.h> |
1da177e4 | 49 | #include <asm/processor.h> |
1da177e4 | 50 | #include <asm/debugreg.h> |
60063497 | 51 | #include <linux/atomic.h> |
08d636b6 | 52 | #include <asm/ftrace.h> |
c1d518c8 | 53 | #include <asm/traps.h> |
1da177e4 LT |
54 | #include <asm/desc.h> |
55 | #include <asm/i387.h> | |
1361b83a | 56 | #include <asm/fpu-internal.h> |
9e55e44e | 57 | #include <asm/mce.h> |
c1d518c8 | 58 | |
1164dd00 | 59 | #include <asm/mach_traps.h> |
c1d518c8 | 60 | |
081f75bb | 61 | #ifdef CONFIG_X86_64 |
428cf902 | 62 | #include <asm/x86_init.h> |
081f75bb AH |
63 | #include <asm/pgalloc.h> |
64 | #include <asm/proto.h> | |
081f75bb | 65 | #else |
c1d518c8 | 66 | #include <asm/processor-flags.h> |
8e6dafd6 | 67 | #include <asm/setup.h> |
1da177e4 | 68 | |
1da177e4 LT |
69 | asmlinkage int system_call(void); |
70 | ||
1da177e4 | 71 | /* Do we ignore FPU interrupts ? */ |
b5964405 | 72 | char ignore_fpu_irq; |
1da177e4 LT |
73 | |
74 | /* | |
75 | * The IDT has to be page-aligned to simplify the Pentium | |
07e81d61 | 76 | * F0 0F bug workaround. |
1da177e4 | 77 | */ |
07e81d61 | 78 | gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; |
081f75bb | 79 | #endif |
1da177e4 | 80 | |
b77b881f YL |
81 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
82 | EXPORT_SYMBOL_GPL(used_vectors); | |
83 | ||
762db434 AH |
84 | static inline void conditional_sti(struct pt_regs *regs) |
85 | { | |
86 | if (regs->flags & X86_EFLAGS_IF) | |
87 | local_irq_enable(); | |
88 | } | |
89 | ||
3d2a71a5 AH |
90 | static inline void preempt_conditional_sti(struct pt_regs *regs) |
91 | { | |
92 | inc_preempt_count(); | |
93 | if (regs->flags & X86_EFLAGS_IF) | |
94 | local_irq_enable(); | |
95 | } | |
96 | ||
be716615 TG |
97 | static inline void conditional_cli(struct pt_regs *regs) |
98 | { | |
99 | if (regs->flags & X86_EFLAGS_IF) | |
100 | local_irq_disable(); | |
101 | } | |
102 | ||
3d2a71a5 AH |
103 | static inline void preempt_conditional_cli(struct pt_regs *regs) |
104 | { | |
105 | if (regs->flags & X86_EFLAGS_IF) | |
106 | local_irq_disable(); | |
107 | dec_preempt_count(); | |
108 | } | |
109 | ||
b5964405 | 110 | static void __kprobes |
3c1326f8 | 111 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
b5964405 | 112 | long error_code, siginfo_t *info) |
1da177e4 | 113 | { |
4f339ecb | 114 | struct task_struct *tsk = current; |
4f339ecb | 115 | |
081f75bb | 116 | #ifdef CONFIG_X86_32 |
6b6891f9 | 117 | if (regs->flags & X86_VM_MASK) { |
3c1326f8 AH |
118 | /* |
119 | * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. | |
120 | * On nmi (interrupt 2), do_trap should not be called. | |
121 | */ | |
c9408265 | 122 | if (trapnr < X86_TRAP_UD) |
1da177e4 LT |
123 | goto vm86_trap; |
124 | goto trap_signal; | |
125 | } | |
081f75bb | 126 | #endif |
1da177e4 | 127 | |
717b594a | 128 | if (!user_mode(regs)) |
1da177e4 LT |
129 | goto kernel_trap; |
130 | ||
081f75bb | 131 | #ifdef CONFIG_X86_32 |
b5964405 | 132 | trap_signal: |
081f75bb | 133 | #endif |
b5964405 | 134 | /* |
51e7dc70 | 135 | * We want error_code and trap_nr set for userspace faults and |
b5964405 IM |
136 | * kernelspace faults which result in die(), but not |
137 | * kernelspace faults which are fixed up. die() gives the | |
138 | * process no chance to handle the signal and notice the | |
139 | * kernel fault information, so that won't result in polluting | |
140 | * the information about previously queued, but not yet | |
141 | * delivered, faults. See also do_general_protection below. | |
142 | */ | |
143 | tsk->thread.error_code = error_code; | |
51e7dc70 | 144 | tsk->thread.trap_nr = trapnr; |
d1895183 | 145 | |
081f75bb AH |
146 | #ifdef CONFIG_X86_64 |
147 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | |
148 | printk_ratelimit()) { | |
c767a54b JP |
149 | pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", |
150 | tsk->comm, tsk->pid, str, | |
151 | regs->ip, regs->sp, error_code); | |
081f75bb | 152 | print_vma_addr(" in ", regs->ip); |
c767a54b | 153 | pr_cont("\n"); |
081f75bb AH |
154 | } |
155 | #endif | |
156 | ||
b5964405 IM |
157 | if (info) |
158 | force_sig_info(signr, info, tsk); | |
159 | else | |
160 | force_sig(signr, tsk); | |
161 | return; | |
1da177e4 | 162 | |
b5964405 IM |
163 | kernel_trap: |
164 | if (!fixup_exception(regs)) { | |
165 | tsk->thread.error_code = error_code; | |
51e7dc70 | 166 | tsk->thread.trap_nr = trapnr; |
b5964405 | 167 | die(str, regs, error_code); |
1da177e4 | 168 | } |
b5964405 | 169 | return; |
1da177e4 | 170 | |
081f75bb | 171 | #ifdef CONFIG_X86_32 |
b5964405 IM |
172 | vm86_trap: |
173 | if (handle_vm86_trap((struct kernel_vm86_regs *) regs, | |
174 | error_code, trapnr)) | |
175 | goto trap_signal; | |
176 | return; | |
081f75bb | 177 | #endif |
1da177e4 LT |
178 | } |
179 | ||
b5964405 | 180 | #define DO_ERROR(trapnr, signr, str, name) \ |
e407d620 | 181 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
b5964405 IM |
182 | { \ |
183 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | |
a8c1be9d | 184 | == NOTIFY_STOP) \ |
b5964405 | 185 | return; \ |
61aef7d2 | 186 | conditional_sti(regs); \ |
3c1326f8 | 187 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
1da177e4 LT |
188 | } |
189 | ||
3c1326f8 | 190 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
e407d620 | 191 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
b5964405 IM |
192 | { \ |
193 | siginfo_t info; \ | |
194 | info.si_signo = signr; \ | |
195 | info.si_errno = 0; \ | |
196 | info.si_code = sicode; \ | |
197 | info.si_addr = (void __user *)siaddr; \ | |
b5964405 | 198 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
a8c1be9d | 199 | == NOTIFY_STOP) \ |
b5964405 | 200 | return; \ |
61aef7d2 | 201 | conditional_sti(regs); \ |
3c1326f8 | 202 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
1da177e4 LT |
203 | } |
204 | ||
c9408265 KC |
205 | DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, |
206 | regs->ip) | |
207 | DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) | |
208 | DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) | |
209 | DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, | |
210 | regs->ip) | |
211 | DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", | |
212 | coprocessor_segment_overrun) | |
213 | DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) | |
214 | DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) | |
081f75bb | 215 | #ifdef CONFIG_X86_32 |
c9408265 | 216 | DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) |
081f75bb | 217 | #endif |
c9408265 KC |
218 | DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, |
219 | BUS_ADRALN, 0) | |
1da177e4 | 220 | |
081f75bb AH |
221 | #ifdef CONFIG_X86_64 |
222 | /* Runs on IST stack */ | |
223 | dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) | |
224 | { | |
225 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | |
c9408265 | 226 | X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) |
081f75bb AH |
227 | return; |
228 | preempt_conditional_sti(regs); | |
c9408265 | 229 | do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); |
081f75bb AH |
230 | preempt_conditional_cli(regs); |
231 | } | |
232 | ||
233 | dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) | |
234 | { | |
235 | static const char str[] = "double fault"; | |
236 | struct task_struct *tsk = current; | |
237 | ||
238 | /* Return not checked because double check cannot be ignored */ | |
c9408265 | 239 | notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); |
081f75bb AH |
240 | |
241 | tsk->thread.error_code = error_code; | |
51e7dc70 | 242 | tsk->thread.trap_nr = X86_TRAP_DF; |
081f75bb | 243 | |
bd8b96df IM |
244 | /* |
245 | * This is always a kernel trap and never fixable (and thus must | |
246 | * never return). | |
247 | */ | |
081f75bb AH |
248 | for (;;) |
249 | die(str, regs, error_code); | |
250 | } | |
251 | #endif | |
252 | ||
e407d620 | 253 | dotraplinkage void __kprobes |
13485ab5 | 254 | do_general_protection(struct pt_regs *regs, long error_code) |
1da177e4 | 255 | { |
13485ab5 | 256 | struct task_struct *tsk; |
b5964405 | 257 | |
c6df0d71 AH |
258 | conditional_sti(regs); |
259 | ||
081f75bb | 260 | #ifdef CONFIG_X86_32 |
ef3f6288 FW |
261 | if (regs->flags & X86_VM_MASK) { |
262 | local_irq_enable(); | |
263 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); | |
264 | return; | |
265 | } | |
081f75bb | 266 | #endif |
1da177e4 | 267 | |
13485ab5 | 268 | tsk = current; |
ef3f6288 FW |
269 | if (!user_mode(regs)) { |
270 | if (fixup_exception(regs)) | |
271 | return; | |
272 | ||
273 | tsk->thread.error_code = error_code; | |
274 | tsk->thread.trap_nr = X86_TRAP_GP; | |
275 | if (!notify_die(DIE_GPF, "general protection fault", regs, error_code, | |
276 | X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) | |
277 | die("general protection fault", regs, error_code); | |
278 | return; | |
279 | } | |
1da177e4 | 280 | |
13485ab5 | 281 | tsk->thread.error_code = error_code; |
51e7dc70 | 282 | tsk->thread.trap_nr = X86_TRAP_GP; |
b5964405 | 283 | |
13485ab5 AH |
284 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
285 | printk_ratelimit()) { | |
c767a54b | 286 | pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", |
13485ab5 AH |
287 | tsk->comm, task_pid_nr(tsk), |
288 | regs->ip, regs->sp, error_code); | |
03252919 | 289 | print_vma_addr(" in ", regs->ip); |
c767a54b | 290 | pr_cont("\n"); |
03252919 | 291 | } |
abd4f750 | 292 | |
13485ab5 | 293 | force_sig(SIGSEGV, tsk); |
1da177e4 | 294 | return; |
1da177e4 LT |
295 | } |
296 | ||
c1d518c8 | 297 | /* May run on IST stack. */ |
08d636b6 | 298 | dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) |
1da177e4 | 299 | { |
08d636b6 | 300 | #ifdef CONFIG_DYNAMIC_FTRACE |
a192cd04 SR |
301 | /* |
302 | * ftrace must be first, everything else may cause a recursive crash. | |
303 | * See note by declaration of modifying_ftrace_code in ftrace.c | |
304 | */ | |
305 | if (unlikely(atomic_read(&modifying_ftrace_code)) && | |
306 | ftrace_int3_handler(regs)) | |
08d636b6 SR |
307 | return; |
308 | #endif | |
f503b5ae | 309 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
c9408265 KC |
310 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
311 | SIGTRAP) == NOTIFY_STOP) | |
f503b5ae JW |
312 | return; |
313 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ | |
cc3a1bf5 | 314 | |
c9408265 KC |
315 | if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
316 | SIGTRAP) == NOTIFY_STOP) | |
48c88211 | 317 | return; |
b5964405 | 318 | |
42181186 SR |
319 | /* |
320 | * Let others (NMI) know that the debug stack is in use | |
321 | * as we may switch to the interrupt stack. | |
322 | */ | |
323 | debug_stack_usage_inc(); | |
4915a35e | 324 | preempt_conditional_sti(regs); |
c9408265 | 325 | do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); |
4915a35e | 326 | preempt_conditional_cli(regs); |
42181186 | 327 | debug_stack_usage_dec(); |
1da177e4 | 328 | } |
1da177e4 | 329 | |
081f75bb | 330 | #ifdef CONFIG_X86_64 |
bd8b96df IM |
331 | /* |
332 | * Help handler running on IST stack to switch back to user stack | |
333 | * for scheduling or signal handling. The actual stack switch is done in | |
334 | * entry.S | |
335 | */ | |
081f75bb AH |
336 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) |
337 | { | |
338 | struct pt_regs *regs = eregs; | |
339 | /* Did already sync */ | |
340 | if (eregs == (struct pt_regs *)eregs->sp) | |
341 | ; | |
342 | /* Exception from user space */ | |
343 | else if (user_mode(eregs)) | |
344 | regs = task_pt_regs(current); | |
bd8b96df IM |
345 | /* |
346 | * Exception from kernel and interrupts are enabled. Move to | |
347 | * kernel process stack. | |
348 | */ | |
081f75bb AH |
349 | else if (eregs->flags & X86_EFLAGS_IF) |
350 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | |
351 | if (eregs != regs) | |
352 | *regs = *eregs; | |
353 | return regs; | |
354 | } | |
355 | #endif | |
356 | ||
1da177e4 LT |
357 | /* |
358 | * Our handling of the processor debug registers is non-trivial. | |
359 | * We do not clear them on entry and exit from the kernel. Therefore | |
360 | * it is possible to get a watchpoint trap here from inside the kernel. | |
361 | * However, the code in ./ptrace.c has ensured that the user can | |
362 | * only set watchpoints on userspace addresses. Therefore the in-kernel | |
363 | * watchpoint trap can only occur in code which is reading/writing | |
364 | * from user space. Such code must not hold kernel locks (since it | |
365 | * can equally take a page fault), therefore it is safe to call | |
366 | * force_sig_info even though that claims and releases locks. | |
b5964405 | 367 | * |
1da177e4 LT |
368 | * Code in ./signal.c ensures that the debug control register |
369 | * is restored before we deliver any signal, and therefore that | |
370 | * user code runs with the correct debug control register even though | |
371 | * we clear it here. | |
372 | * | |
373 | * Being careful here means that we don't have to be as careful in a | |
374 | * lot of more complicated places (task switching can be a bit lazy | |
375 | * about restoring all the debug state, and ptrace doesn't have to | |
376 | * find every occurrence of the TF bit that could be saved away even | |
377 | * by user code) | |
c1d518c8 AH |
378 | * |
379 | * May run on IST stack. | |
1da177e4 | 380 | */ |
e407d620 | 381 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) |
1da177e4 | 382 | { |
1da177e4 | 383 | struct task_struct *tsk = current; |
a1e80faf | 384 | int user_icebp = 0; |
08d68323 | 385 | unsigned long dr6; |
da654b74 | 386 | int si_code; |
1da177e4 | 387 | |
08d68323 | 388 | get_debugreg(dr6, 6); |
1da177e4 | 389 | |
40f9249a P |
390 | /* Filter out all the reserved bits which are preset to 1 */ |
391 | dr6 &= ~DR6_RESERVED; | |
392 | ||
a1e80faf FW |
393 | /* |
394 | * If dr6 has no reason to give us about the origin of this trap, | |
395 | * then it's very likely the result of an icebp/int01 trap. | |
396 | * User wants a sigtrap for that. | |
397 | */ | |
398 | if (!dr6 && user_mode(regs)) | |
399 | user_icebp = 1; | |
400 | ||
f8561296 | 401 | /* Catch kmemcheck conditions first of all! */ |
eadb8a09 | 402 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
f8561296 VN |
403 | return; |
404 | ||
08d68323 P |
405 | /* DR6 may or may not be cleared by the CPU */ |
406 | set_debugreg(0, 6); | |
10faa81e | 407 | |
ea8e61b7 PZ |
408 | /* |
409 | * The processor cleared BTF, so don't mark that we need it set. | |
410 | */ | |
411 | clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); | |
412 | ||
08d68323 P |
413 | /* Store the virtualized DR6 value */ |
414 | tsk->thread.debugreg6 = dr6; | |
415 | ||
62edab90 P |
416 | if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, |
417 | SIGTRAP) == NOTIFY_STOP) | |
1da177e4 | 418 | return; |
3d2a71a5 | 419 | |
42181186 SR |
420 | /* |
421 | * Let others (NMI) know that the debug stack is in use | |
422 | * as we may switch to the interrupt stack. | |
423 | */ | |
424 | debug_stack_usage_inc(); | |
425 | ||
1da177e4 | 426 | /* It's safe to allow irq's after DR6 has been saved */ |
3d2a71a5 | 427 | preempt_conditional_sti(regs); |
1da177e4 | 428 | |
08d68323 | 429 | if (regs->flags & X86_VM_MASK) { |
c9408265 KC |
430 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, |
431 | X86_TRAP_DB); | |
6554287b | 432 | preempt_conditional_cli(regs); |
42181186 | 433 | debug_stack_usage_dec(); |
08d68323 | 434 | return; |
1da177e4 LT |
435 | } |
436 | ||
1da177e4 | 437 | /* |
08d68323 P |
438 | * Single-stepping through system calls: ignore any exceptions in |
439 | * kernel space, but re-enable TF when returning to user mode. | |
440 | * | |
441 | * We already checked v86 mode above, so we can check for kernel mode | |
442 | * by just checking the CPL of CS. | |
1da177e4 | 443 | */ |
08d68323 P |
444 | if ((dr6 & DR_STEP) && !user_mode(regs)) { |
445 | tsk->thread.debugreg6 &= ~DR_STEP; | |
446 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | |
447 | regs->flags &= ~X86_EFLAGS_TF; | |
1da177e4 | 448 | } |
08d68323 | 449 | si_code = get_si_code(tsk->thread.debugreg6); |
a1e80faf | 450 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) |
08d68323 | 451 | send_sigtrap(tsk, regs, error_code, si_code); |
3d2a71a5 | 452 | preempt_conditional_cli(regs); |
42181186 | 453 | debug_stack_usage_dec(); |
1da177e4 | 454 | |
1da177e4 LT |
455 | return; |
456 | } | |
457 | ||
458 | /* | |
459 | * Note that we play around with the 'TS' bit in an attempt to get | |
460 | * the correct behaviour even in the presence of the asynchronous | |
461 | * IRQ13 behaviour | |
462 | */ | |
9b6dba9e | 463 | void math_error(struct pt_regs *regs, int error_code, int trapnr) |
1da177e4 | 464 | { |
e2e75c91 | 465 | struct task_struct *task = current; |
1da177e4 | 466 | siginfo_t info; |
9b6dba9e | 467 | unsigned short err; |
c9408265 KC |
468 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : |
469 | "simd exception"; | |
e2e75c91 BG |
470 | |
471 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) | |
472 | return; | |
473 | conditional_sti(regs); | |
474 | ||
475 | if (!user_mode_vm(regs)) | |
476 | { | |
477 | if (!fixup_exception(regs)) { | |
478 | task->thread.error_code = error_code; | |
51e7dc70 | 479 | task->thread.trap_nr = trapnr; |
e2e75c91 BG |
480 | die(str, regs, error_code); |
481 | } | |
482 | return; | |
483 | } | |
1da177e4 LT |
484 | |
485 | /* | |
486 | * Save the info for the exception handler and clear the error. | |
487 | */ | |
1da177e4 | 488 | save_init_fpu(task); |
51e7dc70 | 489 | task->thread.trap_nr = trapnr; |
9b6dba9e | 490 | task->thread.error_code = error_code; |
1da177e4 LT |
491 | info.si_signo = SIGFPE; |
492 | info.si_errno = 0; | |
9b6dba9e | 493 | info.si_addr = (void __user *)regs->ip; |
c9408265 | 494 | if (trapnr == X86_TRAP_MF) { |
9b6dba9e BG |
495 | unsigned short cwd, swd; |
496 | /* | |
497 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
498 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
499 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
500 | * fault bit. We should only be taking one exception at a time, | |
501 | * so if this combination doesn't produce any single exception, | |
502 | * then we have a bad program that isn't synchronizing its FPU usage | |
503 | * and it will suffer the consequences since we won't be able to | |
504 | * fully reproduce the context of the exception | |
505 | */ | |
506 | cwd = get_fpu_cwd(task); | |
507 | swd = get_fpu_swd(task); | |
adf77bac | 508 | |
9b6dba9e BG |
509 | err = swd & ~cwd; |
510 | } else { | |
511 | /* | |
512 | * The SIMD FPU exceptions are handled a little differently, as there | |
513 | * is only a single status/control register. Thus, to determine which | |
514 | * unmasked exception was caught we must mask the exception mask bits | |
515 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
516 | */ | |
517 | unsigned short mxcsr = get_fpu_mxcsr(task); | |
518 | err = ~(mxcsr >> 7) & mxcsr; | |
519 | } | |
adf77bac PA |
520 | |
521 | if (err & 0x001) { /* Invalid op */ | |
b5964405 IM |
522 | /* |
523 | * swd & 0x240 == 0x040: Stack Underflow | |
524 | * swd & 0x240 == 0x240: Stack Overflow | |
525 | * User must clear the SF bit (0x40) if set | |
526 | */ | |
527 | info.si_code = FPE_FLTINV; | |
adf77bac | 528 | } else if (err & 0x004) { /* Divide by Zero */ |
b5964405 | 529 | info.si_code = FPE_FLTDIV; |
adf77bac | 530 | } else if (err & 0x008) { /* Overflow */ |
b5964405 | 531 | info.si_code = FPE_FLTOVF; |
adf77bac PA |
532 | } else if (err & 0x012) { /* Denormal, Underflow */ |
533 | info.si_code = FPE_FLTUND; | |
534 | } else if (err & 0x020) { /* Precision */ | |
b5964405 | 535 | info.si_code = FPE_FLTRES; |
adf77bac | 536 | } else { |
bd8b96df | 537 | /* |
c9408265 KC |
538 | * If we're using IRQ 13, or supposedly even some trap |
539 | * X86_TRAP_MF implementations, it's possible | |
540 | * we get a spurious trap, which is not an error. | |
bd8b96df | 541 | */ |
c9408265 | 542 | return; |
1da177e4 LT |
543 | } |
544 | force_sig_info(SIGFPE, &info, task); | |
545 | } | |
546 | ||
e407d620 | 547 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) |
1da177e4 | 548 | { |
081f75bb | 549 | #ifdef CONFIG_X86_32 |
1da177e4 | 550 | ignore_fpu_irq = 1; |
081f75bb AH |
551 | #endif |
552 | ||
c9408265 | 553 | math_error(regs, error_code, X86_TRAP_MF); |
1da177e4 LT |
554 | } |
555 | ||
e407d620 AH |
556 | dotraplinkage void |
557 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) | |
1da177e4 | 558 | { |
c9408265 | 559 | math_error(regs, error_code, X86_TRAP_XF); |
1da177e4 LT |
560 | } |
561 | ||
e407d620 AH |
562 | dotraplinkage void |
563 | do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |
1da177e4 | 564 | { |
cf81978d | 565 | conditional_sti(regs); |
1da177e4 LT |
566 | #if 0 |
567 | /* No need to warn about this any longer. */ | |
c767a54b | 568 | pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); |
1da177e4 LT |
569 | #endif |
570 | } | |
571 | ||
081f75bb | 572 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) |
1da177e4 | 573 | { |
1da177e4 | 574 | } |
4efc0670 | 575 | |
7856f6cc | 576 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) |
081f75bb AH |
577 | { |
578 | } | |
579 | ||
1da177e4 | 580 | /* |
b5964405 | 581 | * 'math_state_restore()' saves the current math information in the |
1da177e4 LT |
582 | * old math state array, and gets the new ones from the current task |
583 | * | |
584 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | |
585 | * Don't touch unless you *really* know how it works. | |
586 | * | |
be98c2cd LT |
587 | * Must be called with kernel preemption disabled (eg with local |
588 | * local interrupts as in the case of do_device_not_available). | |
1da177e4 | 589 | */ |
be98c2cd | 590 | void math_state_restore(void) |
1da177e4 | 591 | { |
f94edacf | 592 | struct task_struct *tsk = current; |
1da177e4 | 593 | |
aa283f49 SS |
594 | if (!tsk_used_math(tsk)) { |
595 | local_irq_enable(); | |
596 | /* | |
597 | * does a slab alloc which can sleep | |
598 | */ | |
599 | if (init_fpu(tsk)) { | |
600 | /* | |
601 | * ran out of memory! | |
602 | */ | |
603 | do_group_exit(SIGKILL); | |
604 | return; | |
605 | } | |
606 | local_irq_disable(); | |
607 | } | |
608 | ||
f94edacf | 609 | __thread_fpu_begin(tsk); |
80ab6f1e LT |
610 | /* |
611 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | |
612 | */ | |
613 | if (unlikely(restore_fpu_checking(tsk))) { | |
614 | __thread_fpu_end(tsk); | |
615 | force_sig(SIGSEGV, tsk); | |
616 | return; | |
617 | } | |
b3b0870e LT |
618 | |
619 | tsk->fpu_counter++; | |
1da177e4 | 620 | } |
5992b6da | 621 | EXPORT_SYMBOL_GPL(math_state_restore); |
1da177e4 | 622 | |
e407d620 | 623 | dotraplinkage void __kprobes |
aa78bcfa | 624 | do_device_not_available(struct pt_regs *regs, long error_code) |
7643e9b9 | 625 | { |
a334fe43 | 626 | #ifdef CONFIG_MATH_EMULATION |
7643e9b9 | 627 | if (read_cr0() & X86_CR0_EM) { |
d315760f TH |
628 | struct math_emu_info info = { }; |
629 | ||
7643e9b9 | 630 | conditional_sti(regs); |
d315760f | 631 | |
aa78bcfa | 632 | info.regs = regs; |
d315760f | 633 | math_emulate(&info); |
a334fe43 | 634 | return; |
7643e9b9 | 635 | } |
a334fe43 BG |
636 | #endif |
637 | math_state_restore(); /* interrupts still off */ | |
638 | #ifdef CONFIG_X86_32 | |
639 | conditional_sti(regs); | |
081f75bb | 640 | #endif |
7643e9b9 AH |
641 | } |
642 | ||
081f75bb | 643 | #ifdef CONFIG_X86_32 |
e407d620 | 644 | dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) |
f8e0870f AH |
645 | { |
646 | siginfo_t info; | |
647 | local_irq_enable(); | |
648 | ||
649 | info.si_signo = SIGILL; | |
650 | info.si_errno = 0; | |
651 | info.si_code = ILL_BADSTK; | |
fc6fcdfb | 652 | info.si_addr = NULL; |
c9408265 KC |
653 | if (notify_die(DIE_TRAP, "iret exception", regs, error_code, |
654 | X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) | |
f8e0870f | 655 | return; |
c9408265 KC |
656 | do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, |
657 | &info); | |
f8e0870f | 658 | } |
081f75bb | 659 | #endif |
f8e0870f | 660 | |
29c84391 JK |
661 | /* Set of traps needed for early debugging. */ |
662 | void __init early_trap_init(void) | |
663 | { | |
c9408265 | 664 | set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); |
29c84391 | 665 | /* int3 can be called from all */ |
c9408265 KC |
666 | set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); |
667 | set_intr_gate(X86_TRAP_PF, &page_fault); | |
29c84391 JK |
668 | load_idt(&idt_descr); |
669 | } | |
670 | ||
1da177e4 LT |
671 | void __init trap_init(void) |
672 | { | |
dbeb2be2 RR |
673 | int i; |
674 | ||
1da177e4 | 675 | #ifdef CONFIG_EISA |
927222b1 | 676 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
b5964405 IM |
677 | |
678 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) | |
1da177e4 | 679 | EISA_bus = 1; |
927222b1 | 680 | early_iounmap(p, 4); |
1da177e4 LT |
681 | #endif |
682 | ||
c9408265 KC |
683 | set_intr_gate(X86_TRAP_DE, ÷_error); |
684 | set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); | |
699d2937 | 685 | /* int4 can be called from all */ |
c9408265 KC |
686 | set_system_intr_gate(X86_TRAP_OF, &overflow); |
687 | set_intr_gate(X86_TRAP_BR, &bounds); | |
688 | set_intr_gate(X86_TRAP_UD, &invalid_op); | |
689 | set_intr_gate(X86_TRAP_NM, &device_not_available); | |
081f75bb | 690 | #ifdef CONFIG_X86_32 |
c9408265 | 691 | set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); |
081f75bb | 692 | #else |
c9408265 | 693 | set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); |
081f75bb | 694 | #endif |
c9408265 KC |
695 | set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); |
696 | set_intr_gate(X86_TRAP_TS, &invalid_TSS); | |
697 | set_intr_gate(X86_TRAP_NP, &segment_not_present); | |
698 | set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); | |
699 | set_intr_gate(X86_TRAP_GP, &general_protection); | |
700 | set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); | |
701 | set_intr_gate(X86_TRAP_MF, &coprocessor_error); | |
702 | set_intr_gate(X86_TRAP_AC, &alignment_check); | |
1da177e4 | 703 | #ifdef CONFIG_X86_MCE |
c9408265 | 704 | set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); |
1da177e4 | 705 | #endif |
c9408265 | 706 | set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); |
1da177e4 | 707 | |
bb3f0b59 YL |
708 | /* Reserve all the builtin and the syscall vector: */ |
709 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | |
710 | set_bit(i, used_vectors); | |
711 | ||
081f75bb AH |
712 | #ifdef CONFIG_IA32_EMULATION |
713 | set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | |
bb3f0b59 | 714 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); |
081f75bb AH |
715 | #endif |
716 | ||
717 | #ifdef CONFIG_X86_32 | |
699d2937 | 718 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
dbeb2be2 | 719 | set_bit(SYSCALL_VECTOR, used_vectors); |
081f75bb | 720 | #endif |
bb3f0b59 | 721 | |
1da177e4 | 722 | /* |
b5964405 | 723 | * Should be a barrier for any external CPU state: |
1da177e4 LT |
724 | */ |
725 | cpu_init(); | |
726 | ||
428cf902 | 727 | x86_init.irqs.trap_init(); |
228bdaa9 SR |
728 | |
729 | #ifdef CONFIG_X86_64 | |
730 | memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); | |
c9408265 KC |
731 | set_nmi_gate(X86_TRAP_DB, &debug); |
732 | set_nmi_gate(X86_TRAP_BP, &int3); | |
228bdaa9 | 733 | #endif |
1da177e4 | 734 | } |