]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/traps_32.c
x86, traps: introduce dotraplinkage
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / traps_32.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1991, 1992 Linus Torvalds
a8c1be9d 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
1da177e4
LT
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9/*
10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'asm.s'.
12 */
b5964405
IM
13#include <linux/interrupt.h>
14#include <linux/kallsyms.h>
15#include <linux/spinlock.h>
16#include <linux/highmem.h>
17#include <linux/kprobes.h>
18#include <linux/uaccess.h>
19#include <linux/utsname.h>
20#include <linux/kdebug.h>
1da177e4 21#include <linux/kernel.h>
b5964405
IM
22#include <linux/module.h>
23#include <linux/ptrace.h>
1da177e4 24#include <linux/string.h>
b5964405
IM
25#include <linux/unwind.h>
26#include <linux/delay.h>
1da177e4 27#include <linux/errno.h>
b5964405
IM
28#include <linux/kexec.h>
29#include <linux/sched.h>
1da177e4 30#include <linux/timer.h>
1da177e4 31#include <linux/init.h>
91768d6c 32#include <linux/bug.h>
b5964405
IM
33#include <linux/nmi.h>
34#include <linux/mm.h>
1da177e4
LT
35
36#ifdef CONFIG_EISA
37#include <linux/ioport.h>
38#include <linux/eisa.h>
39#endif
40
41#ifdef CONFIG_MCA
42#include <linux/mca.h>
43#endif
44
c0d12172
DJ
45#if defined(CONFIG_EDAC)
46#include <linux/edac.h>
47#endif
48
7643e9b9 49#include <asm/processor-flags.h>
b5964405
IM
50#include <asm/arch_hooks.h>
51#include <asm/stacktrace.h>
1da177e4 52#include <asm/processor.h>
1da177e4 53#include <asm/debugreg.h>
b5964405
IM
54#include <asm/atomic.h>
55#include <asm/system.h>
56#include <asm/unwind.h>
1da177e4
LT
57#include <asm/desc.h>
58#include <asm/i387.h>
59#include <asm/nmi.h>
1da177e4 60#include <asm/smp.h>
b5964405 61#include <asm/io.h>
6ac8d51f 62#include <asm/traps.h>
1da177e4
LT
63
64#include "mach_traps.h"
eb642f62 65#include "cpu/mcheck/mce.h"
1da177e4 66
dbeb2be2
RR
67DECLARE_BITMAP(used_vectors, NR_VECTORS);
68EXPORT_SYMBOL_GPL(used_vectors);
69
1da177e4
LT
70asmlinkage int system_call(void);
71
1da177e4 72/* Do we ignore FPU interrupts ? */
b5964405 73char ignore_fpu_irq;
1da177e4
LT
74
75/*
76 * The IDT has to be page-aligned to simplify the Pentium
77 * F0 0F bug workaround.. We have a special link segment
78 * for this.
79 */
010d4f82 80gate_desc idt_table[256]
6842ef0e 81 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
1da177e4 82
badc7652 83static int ignore_nmis;
e041c683 84
762db434
AH
85static inline void conditional_sti(struct pt_regs *regs)
86{
87 if (regs->flags & X86_EFLAGS_IF)
88 local_irq_enable();
89}
90
b5964405
IM
91static inline void
92die_if_kernel(const char *str, struct pt_regs *regs, long err)
1da177e4 93{
717b594a 94 if (!user_mode_vm(regs))
1da177e4
LT
95 die(str, regs, err);
96}
97
ae82157b
AH
98/*
99 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
100 * invalid offset set (the LAZY one) and the faulting thread has
101 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
102 * we set the offset field correctly and return 1.
103 */
104static int lazy_iobitmap_copy(void)
105{
106 struct thread_struct *thread;
107 struct tss_struct *tss;
108 int cpu;
109
110 cpu = get_cpu();
111 tss = &per_cpu(init_tss, cpu);
112 thread = &current->thread;
113
114 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
115 thread->io_bitmap_ptr) {
116 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
117 thread->io_bitmap_max);
118 /*
119 * If the previously set map was extending to higher ports
120 * than the current one, pad extra space with 0xff (no access).
121 */
122 if (thread->io_bitmap_max < tss->io_bitmap_max) {
123 memset((char *) tss->io_bitmap +
124 thread->io_bitmap_max, 0xff,
125 tss->io_bitmap_max - thread->io_bitmap_max);
126 }
127 tss->io_bitmap_max = thread->io_bitmap_max;
128 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
129 tss->io_bitmap_owner = thread;
130 put_cpu();
131
132 return 1;
133 }
134 put_cpu();
135
136 return 0;
137}
138
b5964405 139static void __kprobes
3c1326f8 140do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
b5964405 141 long error_code, siginfo_t *info)
1da177e4 142{
4f339ecb 143 struct task_struct *tsk = current;
4f339ecb 144
6b6891f9 145 if (regs->flags & X86_VM_MASK) {
3c1326f8
AH
146 /*
147 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
148 * On nmi (interrupt 2), do_trap should not be called.
149 */
150 if (trapnr < 6)
1da177e4
LT
151 goto vm86_trap;
152 goto trap_signal;
153 }
154
717b594a 155 if (!user_mode(regs))
1da177e4
LT
156 goto kernel_trap;
157
b5964405
IM
158trap_signal:
159 /*
160 * We want error_code and trap_no set for userspace faults and
161 * kernelspace faults which result in die(), but not
162 * kernelspace faults which are fixed up. die() gives the
163 * process no chance to handle the signal and notice the
164 * kernel fault information, so that won't result in polluting
165 * the information about previously queued, but not yet
166 * delivered, faults. See also do_general_protection below.
167 */
168 tsk->thread.error_code = error_code;
169 tsk->thread.trap_no = trapnr;
d1895183 170
b5964405
IM
171 if (info)
172 force_sig_info(signr, info, tsk);
173 else
174 force_sig(signr, tsk);
175 return;
1da177e4 176
b5964405
IM
177kernel_trap:
178 if (!fixup_exception(regs)) {
179 tsk->thread.error_code = error_code;
180 tsk->thread.trap_no = trapnr;
181 die(str, regs, error_code);
1da177e4 182 }
b5964405 183 return;
1da177e4 184
b5964405
IM
185vm86_trap:
186 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
187 error_code, trapnr))
188 goto trap_signal;
189 return;
1da177e4
LT
190}
191
b5964405 192#define DO_ERROR(trapnr, signr, str, name) \
e407d620 193dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
b5964405
IM
194{ \
195 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
a8c1be9d 196 == NOTIFY_STOP) \
b5964405 197 return; \
61aef7d2 198 conditional_sti(regs); \
3c1326f8 199 do_trap(trapnr, signr, str, regs, error_code, NULL); \
1da177e4
LT
200}
201
3c1326f8 202#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
e407d620 203dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
b5964405
IM
204{ \
205 siginfo_t info; \
206 info.si_signo = signr; \
207 info.si_errno = 0; \
208 info.si_code = sicode; \
209 info.si_addr = (void __user *)siaddr; \
b5964405 210 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
a8c1be9d 211 == NOTIFY_STOP) \
b5964405 212 return; \
61aef7d2 213 conditional_sti(regs); \
3c1326f8 214 do_trap(trapnr, signr, str, regs, error_code, &info); \
1da177e4
LT
215}
216
3c1326f8
AH
217DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
218DO_ERROR(4, SIGSEGV, "overflow", overflow)
219DO_ERROR(5, SIGSEGV, "bounds", bounds)
220DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
51bc1ed6 221DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
6bf77bf9 222DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
36d936c7 223DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
f5ca8187 224DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
3c1326f8 225DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
1da177e4 226
e407d620 227dotraplinkage void __kprobes
13485ab5 228do_general_protection(struct pt_regs *regs, long error_code)
1da177e4 229{
13485ab5 230 struct task_struct *tsk;
b5964405 231
c6df0d71
AH
232 conditional_sti(regs);
233
ae82157b
AH
234 if (lazy_iobitmap_copy()) {
235 /* restart the faulting instruction */
1da177e4
LT
236 return;
237 }
1da177e4 238
6b6891f9 239 if (regs->flags & X86_VM_MASK)
1da177e4
LT
240 goto gp_in_vm86;
241
13485ab5 242 tsk = current;
717b594a 243 if (!user_mode(regs))
1da177e4
LT
244 goto gp_in_kernel;
245
13485ab5
AH
246 tsk->thread.error_code = error_code;
247 tsk->thread.trap_no = 13;
b5964405 248
13485ab5
AH
249 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
250 printk_ratelimit()) {
abd4f750 251 printk(KERN_INFO
13485ab5
AH
252 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
253 tsk->comm, task_pid_nr(tsk),
254 regs->ip, regs->sp, error_code);
03252919
AK
255 print_vma_addr(" in ", regs->ip);
256 printk("\n");
257 }
abd4f750 258
13485ab5 259 force_sig(SIGSEGV, tsk);
1da177e4
LT
260 return;
261
262gp_in_vm86:
263 local_irq_enable();
264 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
265 return;
266
267gp_in_kernel:
13485ab5
AH
268 if (fixup_exception(regs))
269 return;
270
271 tsk->thread.error_code = error_code;
272 tsk->thread.trap_no = 13;
273 if (notify_die(DIE_GPF, "general protection fault", regs,
1da177e4 274 error_code, 13, SIGSEGV) == NOTIFY_STOP)
13485ab5
AH
275 return;
276 die("general protection fault", regs, error_code);
1da177e4
LT
277}
278
5deb45e3 279static notrace __kprobes void
b5964405 280mem_parity_error(unsigned char reason, struct pt_regs *regs)
1da177e4 281{
b5964405
IM
282 printk(KERN_EMERG
283 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
284 reason, smp_processor_id());
285
286 printk(KERN_EMERG
287 "You have some hardware problem, likely on the PCI bus.\n");
c0d12172
DJ
288
289#if defined(CONFIG_EDAC)
b5964405 290 if (edac_handler_set()) {
c0d12172
DJ
291 edac_atomic_assert_error();
292 return;
293 }
294#endif
295
8da5adda 296 if (panic_on_unrecovered_nmi)
b5964405 297 panic("NMI: Not continuing");
1da177e4 298
c41c5cd3 299 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
1da177e4
LT
300
301 /* Clear and disable the memory parity error line. */
302 clear_mem_error(reason);
303}
304
5deb45e3 305static notrace __kprobes void
b5964405 306io_check_error(unsigned char reason, struct pt_regs *regs)
1da177e4
LT
307{
308 unsigned long i;
309
9c107805 310 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
1da177e4
LT
311 show_registers(regs);
312
313 /* Re-enable the IOCK line, wait for a few seconds */
314 reason = (reason & 0xf) | 8;
315 outb(reason, 0x61);
b5964405 316
1da177e4 317 i = 2000;
b5964405
IM
318 while (--i)
319 udelay(1000);
320
1da177e4
LT
321 reason &= ~8;
322 outb(reason, 0x61);
323}
324
5deb45e3 325static notrace __kprobes void
b5964405 326unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
1da177e4 327{
d3597524
JW
328 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
329 return;
1da177e4 330#ifdef CONFIG_MCA
b5964405
IM
331 /*
332 * Might actually be able to figure out what the guilty party
333 * is:
334 */
335 if (MCA_bus) {
1da177e4
LT
336 mca_handle_nmi();
337 return;
338 }
339#endif
b5964405
IM
340 printk(KERN_EMERG
341 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
342 reason, smp_processor_id());
343
c41c5cd3 344 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
8da5adda 345 if (panic_on_unrecovered_nmi)
b5964405 346 panic("NMI: Not continuing");
8da5adda 347
c41c5cd3 348 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
1da177e4
LT
349}
350
351static DEFINE_SPINLOCK(nmi_print_lock);
352
ddca03c9 353void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
1da177e4 354{
ddca03c9 355 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
748f2edb
GA
356 return;
357
1da177e4
LT
358 spin_lock(&nmi_print_lock);
359 /*
360 * We are in trouble anyway, lets at least try
b5964405 361 * to get a message out:
1da177e4
LT
362 */
363 bust_spinlocks(1);
ddca03c9 364 printk(KERN_EMERG "%s", str);
65ea5b03
PA
365 printk(" on CPU%d, ip %08lx, registers:\n",
366 smp_processor_id(), regs->ip);
1da177e4 367 show_registers(regs);
ddca03c9
CG
368 if (do_panic)
369 panic("Non maskable interrupt");
1da177e4
LT
370 console_silent();
371 spin_unlock(&nmi_print_lock);
372 bust_spinlocks(0);
6e274d14 373
b5964405
IM
374 /*
375 * If we are in kernel we are probably nested up pretty bad
376 * and might aswell get out now while we still can:
377 */
db753bdf 378 if (!user_mode_vm(regs)) {
6e274d14
AN
379 current->thread.trap_no = 2;
380 crash_kexec(regs);
381 }
382
1da177e4
LT
383 do_exit(SIGSEGV);
384}
385
5deb45e3 386static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
1da177e4
LT
387{
388 unsigned char reason = 0;
abd34807
AH
389 int cpu;
390
391 cpu = smp_processor_id();
1da177e4 392
abd34807
AH
393 /* Only the BSP gets external NMIs from the system. */
394 if (!cpu)
1da177e4 395 reason = get_nmi_reason();
b5964405 396
1da177e4 397 if (!(reason & 0xc0)) {
20c0d2d4 398 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
a8c1be9d 399 == NOTIFY_STOP)
1da177e4
LT
400 return;
401#ifdef CONFIG_X86_LOCAL_APIC
402 /*
403 * Ok, so this is none of the documented NMI sources,
404 * so it must be the NMI watchdog.
405 */
3adbbcce 406 if (nmi_watchdog_tick(regs, reason))
1da177e4 407 return;
abd34807 408 if (!do_nmi_callback(regs, cpu))
3adbbcce 409 unknown_nmi_error(reason, regs);
b5964405
IM
410#else
411 unknown_nmi_error(reason, regs);
412#endif
2fbe7b25 413
1da177e4
LT
414 return;
415 }
20c0d2d4 416 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
1da177e4 417 return;
a8c1be9d
AH
418
419 /* AK: following checks seem to be broken on modern chipsets. FIXME */
1da177e4
LT
420 if (reason & 0x80)
421 mem_parity_error(reason, regs);
422 if (reason & 0x40)
423 io_check_error(reason, regs);
424 /*
425 * Reassert NMI in case it became active meanwhile
b5964405 426 * as it's edge-triggered:
1da177e4
LT
427 */
428 reassert_nmi();
429}
430
e407d620
AH
431dotraplinkage notrace __kprobes void
432do_nmi(struct pt_regs *regs, long error_code)
1da177e4
LT
433{
434 int cpu;
435
436 nmi_enter();
437
438 cpu = smp_processor_id();
f3705136 439
1da177e4
LT
440 ++nmi_count(cpu);
441
8f4e956b
AK
442 if (!ignore_nmis)
443 default_do_nmi(regs);
1da177e4
LT
444
445 nmi_exit();
446}
447
8f4e956b
AK
448void stop_nmi(void)
449{
450 acpi_nmi_disable();
451 ignore_nmis++;
452}
453
454void restart_nmi(void)
455{
456 ignore_nmis--;
457 acpi_nmi_enable();
458}
459
e407d620 460dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
1da177e4 461{
b94da1e4 462#ifdef CONFIG_KPROBES
1da177e4
LT
463 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
464 == NOTIFY_STOP)
48c88211 465 return;
762db434 466 conditional_sti(regs);
b94da1e4
AH
467#else
468 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
469 == NOTIFY_STOP)
470 return;
471#endif
b5964405 472
3c1326f8 473 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
1da177e4 474}
1da177e4
LT
475
476/*
477 * Our handling of the processor debug registers is non-trivial.
478 * We do not clear them on entry and exit from the kernel. Therefore
479 * it is possible to get a watchpoint trap here from inside the kernel.
480 * However, the code in ./ptrace.c has ensured that the user can
481 * only set watchpoints on userspace addresses. Therefore the in-kernel
482 * watchpoint trap can only occur in code which is reading/writing
483 * from user space. Such code must not hold kernel locks (since it
484 * can equally take a page fault), therefore it is safe to call
485 * force_sig_info even though that claims and releases locks.
b5964405 486 *
1da177e4
LT
487 * Code in ./signal.c ensures that the debug control register
488 * is restored before we deliver any signal, and therefore that
489 * user code runs with the correct debug control register even though
490 * we clear it here.
491 *
492 * Being careful here means that we don't have to be as careful in a
493 * lot of more complicated places (task switching can be a bit lazy
494 * about restoring all the debug state, and ptrace doesn't have to
495 * find every occurrence of the TF bit that could be saved away even
496 * by user code)
497 */
e407d620 498dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
1da177e4 499{
1da177e4 500 struct task_struct *tsk = current;
b5964405 501 unsigned int condition;
da654b74 502 int si_code;
1da177e4 503
1cc6f12e 504 get_debugreg(condition, 6);
1da177e4 505
10faa81e
RM
506 /*
507 * The processor cleared BTF, so don't mark that we need it set.
508 */
509 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
510 tsk->thread.debugctlmsr = 0;
511
1da177e4 512 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
a8c1be9d 513 SIGTRAP) == NOTIFY_STOP)
1da177e4
LT
514 return;
515 /* It's safe to allow irq's after DR6 has been saved */
65ea5b03 516 if (regs->flags & X86_EFLAGS_IF)
1da177e4
LT
517 local_irq_enable();
518
519 /* Mask out spurious debug traps due to lazy DR7 setting */
520 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
0f534093 521 if (!tsk->thread.debugreg7)
1da177e4
LT
522 goto clear_dr7;
523 }
524
6b6891f9 525 if (regs->flags & X86_VM_MASK)
1da177e4
LT
526 goto debug_vm86;
527
528 /* Save debug status register where ptrace can see it */
0f534093 529 tsk->thread.debugreg6 = condition;
1da177e4
LT
530
531 /*
532 * Single-stepping through TF: make sure we ignore any events in
533 * kernel space (but re-enable TF when returning to user mode).
534 */
535 if (condition & DR_STEP) {
536 /*
537 * We already checked v86 mode above, so we can
538 * check for kernel mode by just checking the CPL
539 * of CS.
540 */
717b594a 541 if (!user_mode(regs))
1da177e4
LT
542 goto clear_TF_reenable;
543 }
544
da654b74 545 si_code = get_si_code((unsigned long)condition);
1da177e4 546 /* Ok, finally something we can handle */
da654b74 547 send_sigtrap(tsk, regs, error_code, si_code);
1da177e4 548
b5964405
IM
549 /*
550 * Disable additional traps. They'll be re-enabled when
1da177e4
LT
551 * the signal is delivered.
552 */
553clear_dr7:
1cc6f12e 554 set_debugreg(0, 7);
1da177e4
LT
555 return;
556
557debug_vm86:
558 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
559 return;
560
561clear_TF_reenable:
562 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
6093015d 563 regs->flags &= ~X86_EFLAGS_TF;
1da177e4
LT
564 return;
565}
566
567/*
568 * Note that we play around with the 'TS' bit in an attempt to get
569 * the correct behaviour even in the presence of the asynchronous
570 * IRQ13 behaviour
571 */
65ea5b03 572void math_error(void __user *ip)
1da177e4 573{
b5964405 574 struct task_struct *task;
1da177e4 575 siginfo_t info;
7b4fd4bb 576 unsigned short cwd, swd;
1da177e4
LT
577
578 /*
579 * Save the info for the exception handler and clear the error.
580 */
581 task = current;
582 save_init_fpu(task);
583 task->thread.trap_no = 16;
584 task->thread.error_code = 0;
585 info.si_signo = SIGFPE;
586 info.si_errno = 0;
587 info.si_code = __SI_FAULT;
65ea5b03 588 info.si_addr = ip;
1da177e4
LT
589 /*
590 * (~cwd & swd) will mask out exceptions that are not set to unmasked
591 * status. 0x3f is the exception bits in these regs, 0x200 is the
592 * C1 reg you need in case of a stack fault, 0x040 is the stack
593 * fault bit. We should only be taking one exception at a time,
594 * so if this combination doesn't produce any single exception,
a8c1be9d 595 * then we have a bad program that isn't synchronizing its FPU usage
1da177e4
LT
596 * and it will suffer the consequences since we won't be able to
597 * fully reproduce the context of the exception
598 */
599 cwd = get_fpu_cwd(task);
600 swd = get_fpu_swd(task);
b1daec30 601 switch (swd & ~cwd & 0x3f) {
b5964405
IM
602 case 0x000: /* No unmasked exception */
603 return;
a8c1be9d 604 default: /* Multiple exceptions */
b5964405
IM
605 break;
606 case 0x001: /* Invalid Op */
607 /*
608 * swd & 0x240 == 0x040: Stack Underflow
609 * swd & 0x240 == 0x240: Stack Overflow
610 * User must clear the SF bit (0x40) if set
611 */
612 info.si_code = FPE_FLTINV;
613 break;
614 case 0x002: /* Denormalize */
615 case 0x010: /* Underflow */
616 info.si_code = FPE_FLTUND;
617 break;
618 case 0x004: /* Zero Divide */
619 info.si_code = FPE_FLTDIV;
620 break;
621 case 0x008: /* Overflow */
622 info.si_code = FPE_FLTOVF;
623 break;
624 case 0x020: /* Precision */
625 info.si_code = FPE_FLTRES;
626 break;
1da177e4
LT
627 }
628 force_sig_info(SIGFPE, &info, task);
629}
630
e407d620 631dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
1da177e4 632{
252d28fe 633 conditional_sti(regs);
1da177e4 634 ignore_fpu_irq = 1;
65ea5b03 635 math_error((void __user *)regs->ip);
1da177e4
LT
636}
637
65ea5b03 638static void simd_math_error(void __user *ip)
1da177e4 639{
b5964405 640 struct task_struct *task;
b5964405 641 siginfo_t info;
7b4fd4bb 642 unsigned short mxcsr;
1da177e4
LT
643
644 /*
645 * Save the info for the exception handler and clear the error.
646 */
647 task = current;
648 save_init_fpu(task);
649 task->thread.trap_no = 19;
650 task->thread.error_code = 0;
651 info.si_signo = SIGFPE;
652 info.si_errno = 0;
653 info.si_code = __SI_FAULT;
65ea5b03 654 info.si_addr = ip;
1da177e4
LT
655 /*
656 * The SIMD FPU exceptions are handled a little differently, as there
657 * is only a single status/control register. Thus, to determine which
658 * unmasked exception was caught we must mask the exception mask bits
659 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
660 */
661 mxcsr = get_fpu_mxcsr(task);
662 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
b5964405
IM
663 case 0x000:
664 default:
665 break;
666 case 0x001: /* Invalid Op */
667 info.si_code = FPE_FLTINV;
668 break;
669 case 0x002: /* Denormalize */
670 case 0x010: /* Underflow */
671 info.si_code = FPE_FLTUND;
672 break;
673 case 0x004: /* Zero Divide */
674 info.si_code = FPE_FLTDIV;
675 break;
676 case 0x008: /* Overflow */
677 info.si_code = FPE_FLTOVF;
678 break;
679 case 0x020: /* Precision */
680 info.si_code = FPE_FLTRES;
681 break;
1da177e4
LT
682 }
683 force_sig_info(SIGFPE, &info, task);
684}
685
e407d620
AH
686dotraplinkage void
687do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
1da177e4 688{
b939bde2
AH
689 conditional_sti(regs);
690
1da177e4
LT
691 if (cpu_has_xmm) {
692 /* Handle SIMD FPU exceptions on PIII+ processors. */
693 ignore_fpu_irq = 1;
65ea5b03 694 simd_math_error((void __user *)regs->ip);
b5964405
IM
695 return;
696 }
697 /*
698 * Handle strange cache flush from user space exception
699 * in all other cases. This is undocumented behaviour.
700 */
6b6891f9 701 if (regs->flags & X86_VM_MASK) {
b5964405
IM
702 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
703 return;
1da177e4 704 }
b5964405
IM
705 current->thread.trap_no = 19;
706 current->thread.error_code = error_code;
707 die_if_kernel("cache flush denied", regs, error_code);
708 force_sig(SIGSEGV, current);
1da177e4
LT
709}
710
e407d620
AH
711dotraplinkage void
712do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
1da177e4 713{
cf81978d 714 conditional_sti(regs);
1da177e4
LT
715#if 0
716 /* No need to warn about this any longer. */
b5964405 717 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1da177e4
LT
718#endif
719}
720
b5964405 721unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
1da177e4 722{
736f12bf 723 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
be44d2aa
SS
724 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
725 unsigned long new_kesp = kesp - base;
726 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
727 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
b5964405 728
be44d2aa 729 /* Set up base for espfix segment */
b5964405
IM
730 desc &= 0x00f0ff0000000000ULL;
731 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
be44d2aa
SS
732 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
733 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
734 (lim_pages & 0xffff);
735 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
b5964405 736
be44d2aa 737 return new_kesp;
1da177e4
LT
738}
739
740/*
b5964405 741 * 'math_state_restore()' saves the current math information in the
1da177e4
LT
742 * old math state array, and gets the new ones from the current task
743 *
744 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
745 * Don't touch unless you *really* know how it works.
746 *
747 * Must be called with kernel preemption disabled (in this case,
748 * local interrupts are disabled at the call-site in entry.S).
749 */
acc20761 750asmlinkage void math_state_restore(void)
1da177e4
LT
751{
752 struct thread_info *thread = current_thread_info();
753 struct task_struct *tsk = thread->task;
754
aa283f49
SS
755 if (!tsk_used_math(tsk)) {
756 local_irq_enable();
757 /*
758 * does a slab alloc which can sleep
759 */
760 if (init_fpu(tsk)) {
761 /*
762 * ran out of memory!
763 */
764 do_group_exit(SIGKILL);
765 return;
766 }
767 local_irq_disable();
768 }
769
b5964405 770 clts(); /* Allow maths ops (or we recurse) */
1da177e4
LT
771 restore_fpu(tsk);
772 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
acc20761 773 tsk->fpu_counter++;
1da177e4 774}
5992b6da 775EXPORT_SYMBOL_GPL(math_state_restore);
1da177e4
LT
776
777#ifndef CONFIG_MATH_EMULATION
778
779asmlinkage void math_emulate(long arg)
780{
b5964405
IM
781 printk(KERN_EMERG
782 "math-emulation not enabled and no coprocessor found.\n");
783 printk(KERN_EMERG "killing %s.\n", current->comm);
784 force_sig(SIGFPE, current);
1da177e4
LT
785 schedule();
786}
787
788#endif /* CONFIG_MATH_EMULATION */
789
e407d620
AH
790dotraplinkage void __kprobes
791do_device_not_available(struct pt_regs *regs, long error)
7643e9b9
AH
792{
793 if (read_cr0() & X86_CR0_EM) {
794 conditional_sti(regs);
795 math_emulate(0);
796 } else {
797 math_state_restore(); /* interrupts still off */
798 conditional_sti(regs);
799 }
800}
801
eb642f62 802#ifdef CONFIG_X86_MCE
e407d620 803dotraplinkage void __kprobes do_machine_check(struct pt_regs *regs, long error)
eb642f62
AH
804{
805 conditional_sti(regs);
806 machine_check_vector(regs, error);
807}
808#endif
809
e407d620 810dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
f8e0870f
AH
811{
812 siginfo_t info;
813 local_irq_enable();
814
815 info.si_signo = SIGILL;
816 info.si_errno = 0;
817 info.si_code = ILL_BADSTK;
818 info.si_addr = 0;
819 if (notify_die(DIE_TRAP, "iret exception",
820 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
821 return;
3c1326f8 822 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
f8e0870f
AH
823}
824
1da177e4
LT
825void __init trap_init(void)
826{
dbeb2be2
RR
827 int i;
828
1da177e4 829#ifdef CONFIG_EISA
927222b1 830 void __iomem *p = early_ioremap(0x0FFFD9, 4);
b5964405
IM
831
832 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
1da177e4 833 EISA_bus = 1;
927222b1 834 early_iounmap(p, 4);
1da177e4
LT
835#endif
836
976382dc 837 set_intr_gate(0, &divide_error);
a8c1be9d
AH
838 set_intr_gate(1, &debug);
839 set_intr_gate(2, &nmi);
840 set_system_intr_gate(3, &int3); /* int3 can be called from all */
8d6f9d69 841 set_system_intr_gate(4, &overflow); /* int4 can be called from all */
64f644c0 842 set_intr_gate(5, &bounds);
12394cf5 843 set_intr_gate(6, &invalid_op);
7643e9b9 844 set_intr_gate(7, &device_not_available);
a8c1be9d 845 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
51bc1ed6 846 set_intr_gate(9, &coprocessor_segment_overrun);
6bf77bf9 847 set_intr_gate(10, &invalid_TSS);
36d936c7 848 set_intr_gate(11, &segment_not_present);
f5ca8187 849 set_intr_gate(12, &stack_segment);
c6df0d71 850 set_intr_gate(13, &general_protection);
b5964405 851 set_intr_gate(14, &page_fault);
cf81978d 852 set_intr_gate(15, &spurious_interrupt_bug);
252d28fe 853 set_intr_gate(16, &coprocessor_error);
5feedfd4 854 set_intr_gate(17, &alignment_check);
1da177e4 855#ifdef CONFIG_X86_MCE
eb642f62 856 set_intr_gate(18, &machine_check);
1da177e4 857#endif
b939bde2 858 set_intr_gate(19, &simd_coprocessor_error);
1da177e4 859
d43c6e80 860 if (cpu_has_fxsr) {
d43c6e80
JB
861 printk(KERN_INFO "Enabling fast FPU save and restore... ");
862 set_in_cr4(X86_CR4_OSFXSR);
863 printk("done.\n");
864 }
865 if (cpu_has_xmm) {
b5964405
IM
866 printk(KERN_INFO
867 "Enabling unmasked SIMD FPU exception support... ");
d43c6e80
JB
868 set_in_cr4(X86_CR4_OSXMMEXCPT);
869 printk("done.\n");
870 }
871
b5964405 872 set_system_gate(SYSCALL_VECTOR, &system_call);
1da177e4 873
b5964405 874 /* Reserve all the builtin and the syscall vector: */
dbeb2be2
RR
875 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
876 set_bit(i, used_vectors);
b5964405 877
dbeb2be2
RR
878 set_bit(SYSCALL_VECTOR, used_vectors);
879
1da177e4 880 /*
b5964405 881 * Should be a barrier for any external CPU state:
1da177e4
LT
882 */
883 cpu_init();
884
885 trap_init_hook();
886}