]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame_incremental - arch/x86/kernel/traps.c
x86: remove various unused subarch hooks
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / traps.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9/*
10 * Handle hardware traps and faults.
11 */
12#include <linux/interrupt.h>
13#include <linux/kallsyms.h>
14#include <linux/spinlock.h>
15#include <linux/kprobes.h>
16#include <linux/uaccess.h>
17#include <linux/utsname.h>
18#include <linux/kdebug.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/ptrace.h>
22#include <linux/string.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/kexec.h>
26#include <linux/sched.h>
27#include <linux/timer.h>
28#include <linux/init.h>
29#include <linux/bug.h>
30#include <linux/nmi.h>
31#include <linux/mm.h>
32#include <linux/smp.h>
33#include <linux/io.h>
34
35#ifdef CONFIG_EISA
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#endif
39
40#ifdef CONFIG_MCA
41#include <linux/mca.h>
42#endif
43
44#if defined(CONFIG_EDAC)
45#include <linux/edac.h>
46#endif
47
48#include <asm/stacktrace.h>
49#include <asm/processor.h>
50#include <asm/debugreg.h>
51#include <asm/atomic.h>
52#include <asm/system.h>
53#include <asm/traps.h>
54#include <asm/desc.h>
55#include <asm/i387.h>
56
57#include <asm/mach_traps.h>
58
59#ifdef CONFIG_X86_64
60#include <asm/pgalloc.h>
61#include <asm/proto.h>
62#else
63#include <asm/processor-flags.h>
64#include <asm/arch_hooks.h>
65#include <asm/traps.h>
66
67#include "cpu/mcheck/mce.h"
68
69asmlinkage int system_call(void);
70
71/* Do we ignore FPU interrupts ? */
72char ignore_fpu_irq;
73
74/*
75 * The IDT has to be page-aligned to simplify the Pentium
76 * F0 0F bug workaround.. We have a special link segment
77 * for this.
78 */
79gate_desc idt_table[256]
80 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
81#endif
82
83DECLARE_BITMAP(used_vectors, NR_VECTORS);
84EXPORT_SYMBOL_GPL(used_vectors);
85
86static int ignore_nmis;
87
88static inline void conditional_sti(struct pt_regs *regs)
89{
90 if (regs->flags & X86_EFLAGS_IF)
91 local_irq_enable();
92}
93
94static inline void preempt_conditional_sti(struct pt_regs *regs)
95{
96 inc_preempt_count();
97 if (regs->flags & X86_EFLAGS_IF)
98 local_irq_enable();
99}
100
101static inline void conditional_cli(struct pt_regs *regs)
102{
103 if (regs->flags & X86_EFLAGS_IF)
104 local_irq_disable();
105}
106
107static inline void preempt_conditional_cli(struct pt_regs *regs)
108{
109 if (regs->flags & X86_EFLAGS_IF)
110 local_irq_disable();
111 dec_preempt_count();
112}
113
114#ifdef CONFIG_X86_32
115static inline void
116die_if_kernel(const char *str, struct pt_regs *regs, long err)
117{
118 if (!user_mode_vm(regs))
119 die(str, regs, err);
120}
121
122/*
123 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
124 * invalid offset set (the LAZY one) and the faulting thread has
125 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
126 * we set the offset field correctly and return 1.
127 */
128static int lazy_iobitmap_copy(void)
129{
130 struct thread_struct *thread;
131 struct tss_struct *tss;
132 int cpu;
133
134 cpu = get_cpu();
135 tss = &per_cpu(init_tss, cpu);
136 thread = &current->thread;
137
138 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
139 thread->io_bitmap_ptr) {
140 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
141 thread->io_bitmap_max);
142 /*
143 * If the previously set map was extending to higher ports
144 * than the current one, pad extra space with 0xff (no access).
145 */
146 if (thread->io_bitmap_max < tss->io_bitmap_max) {
147 memset((char *) tss->io_bitmap +
148 thread->io_bitmap_max, 0xff,
149 tss->io_bitmap_max - thread->io_bitmap_max);
150 }
151 tss->io_bitmap_max = thread->io_bitmap_max;
152 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
153 tss->io_bitmap_owner = thread;
154 put_cpu();
155
156 return 1;
157 }
158 put_cpu();
159
160 return 0;
161}
162#endif
163
164static void __kprobes
165do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
166 long error_code, siginfo_t *info)
167{
168 struct task_struct *tsk = current;
169
170#ifdef CONFIG_X86_32
171 if (regs->flags & X86_VM_MASK) {
172 /*
173 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
174 * On nmi (interrupt 2), do_trap should not be called.
175 */
176 if (trapnr < 6)
177 goto vm86_trap;
178 goto trap_signal;
179 }
180#endif
181
182 if (!user_mode(regs))
183 goto kernel_trap;
184
185#ifdef CONFIG_X86_32
186trap_signal:
187#endif
188 /*
189 * We want error_code and trap_no set for userspace faults and
190 * kernelspace faults which result in die(), but not
191 * kernelspace faults which are fixed up. die() gives the
192 * process no chance to handle the signal and notice the
193 * kernel fault information, so that won't result in polluting
194 * the information about previously queued, but not yet
195 * delivered, faults. See also do_general_protection below.
196 */
197 tsk->thread.error_code = error_code;
198 tsk->thread.trap_no = trapnr;
199
200#ifdef CONFIG_X86_64
201 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
202 printk_ratelimit()) {
203 printk(KERN_INFO
204 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
205 tsk->comm, tsk->pid, str,
206 regs->ip, regs->sp, error_code);
207 print_vma_addr(" in ", regs->ip);
208 printk("\n");
209 }
210#endif
211
212 if (info)
213 force_sig_info(signr, info, tsk);
214 else
215 force_sig(signr, tsk);
216 return;
217
218kernel_trap:
219 if (!fixup_exception(regs)) {
220 tsk->thread.error_code = error_code;
221 tsk->thread.trap_no = trapnr;
222 die(str, regs, error_code);
223 }
224 return;
225
226#ifdef CONFIG_X86_32
227vm86_trap:
228 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
229 error_code, trapnr))
230 goto trap_signal;
231 return;
232#endif
233}
234
235#define DO_ERROR(trapnr, signr, str, name) \
236dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
237{ \
238 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
239 == NOTIFY_STOP) \
240 return; \
241 conditional_sti(regs); \
242 do_trap(trapnr, signr, str, regs, error_code, NULL); \
243}
244
245#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
246dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
247{ \
248 siginfo_t info; \
249 info.si_signo = signr; \
250 info.si_errno = 0; \
251 info.si_code = sicode; \
252 info.si_addr = (void __user *)siaddr; \
253 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
254 == NOTIFY_STOP) \
255 return; \
256 conditional_sti(regs); \
257 do_trap(trapnr, signr, str, regs, error_code, &info); \
258}
259
260DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
261DO_ERROR(4, SIGSEGV, "overflow", overflow)
262DO_ERROR(5, SIGSEGV, "bounds", bounds)
263DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
264DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
265DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
266DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
267#ifdef CONFIG_X86_32
268DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
269#endif
270DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
271
272#ifdef CONFIG_X86_64
273/* Runs on IST stack */
274dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
275{
276 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
277 12, SIGBUS) == NOTIFY_STOP)
278 return;
279 preempt_conditional_sti(regs);
280 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
281 preempt_conditional_cli(regs);
282}
283
284dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
285{
286 static const char str[] = "double fault";
287 struct task_struct *tsk = current;
288
289 /* Return not checked because double check cannot be ignored */
290 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
291
292 tsk->thread.error_code = error_code;
293 tsk->thread.trap_no = 8;
294
295 /*
296 * This is always a kernel trap and never fixable (and thus must
297 * never return).
298 */
299 for (;;)
300 die(str, regs, error_code);
301}
302#endif
303
304dotraplinkage void __kprobes
305do_general_protection(struct pt_regs *regs, long error_code)
306{
307 struct task_struct *tsk;
308
309 conditional_sti(regs);
310
311#ifdef CONFIG_X86_32
312 if (lazy_iobitmap_copy()) {
313 /* restart the faulting instruction */
314 return;
315 }
316
317 if (regs->flags & X86_VM_MASK)
318 goto gp_in_vm86;
319#endif
320
321 tsk = current;
322 if (!user_mode(regs))
323 goto gp_in_kernel;
324
325 tsk->thread.error_code = error_code;
326 tsk->thread.trap_no = 13;
327
328 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
329 printk_ratelimit()) {
330 printk(KERN_INFO
331 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
332 tsk->comm, task_pid_nr(tsk),
333 regs->ip, regs->sp, error_code);
334 print_vma_addr(" in ", regs->ip);
335 printk("\n");
336 }
337
338 force_sig(SIGSEGV, tsk);
339 return;
340
341#ifdef CONFIG_X86_32
342gp_in_vm86:
343 local_irq_enable();
344 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
345 return;
346#endif
347
348gp_in_kernel:
349 if (fixup_exception(regs))
350 return;
351
352 tsk->thread.error_code = error_code;
353 tsk->thread.trap_no = 13;
354 if (notify_die(DIE_GPF, "general protection fault", regs,
355 error_code, 13, SIGSEGV) == NOTIFY_STOP)
356 return;
357 die("general protection fault", regs, error_code);
358}
359
360static notrace __kprobes void
361mem_parity_error(unsigned char reason, struct pt_regs *regs)
362{
363 printk(KERN_EMERG
364 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
365 reason, smp_processor_id());
366
367 printk(KERN_EMERG
368 "You have some hardware problem, likely on the PCI bus.\n");
369
370#if defined(CONFIG_EDAC)
371 if (edac_handler_set()) {
372 edac_atomic_assert_error();
373 return;
374 }
375#endif
376
377 if (panic_on_unrecovered_nmi)
378 panic("NMI: Not continuing");
379
380 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
381
382 /* Clear and disable the memory parity error line. */
383 reason = (reason & 0xf) | 4;
384 outb(reason, 0x61);
385}
386
387static notrace __kprobes void
388io_check_error(unsigned char reason, struct pt_regs *regs)
389{
390 unsigned long i;
391
392 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
393 show_registers(regs);
394
395 /* Re-enable the IOCK line, wait for a few seconds */
396 reason = (reason & 0xf) | 8;
397 outb(reason, 0x61);
398
399 i = 2000;
400 while (--i)
401 udelay(1000);
402
403 reason &= ~8;
404 outb(reason, 0x61);
405}
406
407static notrace __kprobes void
408unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
409{
410 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
411 NOTIFY_STOP)
412 return;
413#ifdef CONFIG_MCA
414 /*
415 * Might actually be able to figure out what the guilty party
416 * is:
417 */
418 if (MCA_bus) {
419 mca_handle_nmi();
420 return;
421 }
422#endif
423 printk(KERN_EMERG
424 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
425 reason, smp_processor_id());
426
427 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
428 if (panic_on_unrecovered_nmi)
429 panic("NMI: Not continuing");
430
431 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
432}
433
434static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
435{
436 unsigned char reason = 0;
437 int cpu;
438
439 cpu = smp_processor_id();
440
441 /* Only the BSP gets external NMIs from the system. */
442 if (!cpu)
443 reason = get_nmi_reason();
444
445 if (!(reason & 0xc0)) {
446 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
447 == NOTIFY_STOP)
448 return;
449#ifdef CONFIG_X86_LOCAL_APIC
450 /*
451 * Ok, so this is none of the documented NMI sources,
452 * so it must be the NMI watchdog.
453 */
454 if (nmi_watchdog_tick(regs, reason))
455 return;
456 if (!do_nmi_callback(regs, cpu))
457 unknown_nmi_error(reason, regs);
458#else
459 unknown_nmi_error(reason, regs);
460#endif
461
462 return;
463 }
464 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
465 return;
466
467 /* AK: following checks seem to be broken on modern chipsets. FIXME */
468 if (reason & 0x80)
469 mem_parity_error(reason, regs);
470 if (reason & 0x40)
471 io_check_error(reason, regs);
472#ifdef CONFIG_X86_32
473 /*
474 * Reassert NMI in case it became active meanwhile
475 * as it's edge-triggered:
476 */
477 reassert_nmi();
478#endif
479}
480
481dotraplinkage notrace __kprobes void
482do_nmi(struct pt_regs *regs, long error_code)
483{
484 nmi_enter();
485
486 inc_irq_stat(__nmi_count);
487
488 if (!ignore_nmis)
489 default_do_nmi(regs);
490
491 nmi_exit();
492}
493
494void stop_nmi(void)
495{
496 acpi_nmi_disable();
497 ignore_nmis++;
498}
499
500void restart_nmi(void)
501{
502 ignore_nmis--;
503 acpi_nmi_enable();
504}
505
506/* May run on IST stack. */
507dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
508{
509#ifdef CONFIG_KPROBES
510 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
511 == NOTIFY_STOP)
512 return;
513#else
514 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
515 == NOTIFY_STOP)
516 return;
517#endif
518
519 preempt_conditional_sti(regs);
520 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
521 preempt_conditional_cli(regs);
522}
523
524#ifdef CONFIG_X86_64
525/*
526 * Help handler running on IST stack to switch back to user stack
527 * for scheduling or signal handling. The actual stack switch is done in
528 * entry.S
529 */
530asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
531{
532 struct pt_regs *regs = eregs;
533 /* Did already sync */
534 if (eregs == (struct pt_regs *)eregs->sp)
535 ;
536 /* Exception from user space */
537 else if (user_mode(eregs))
538 regs = task_pt_regs(current);
539 /*
540 * Exception from kernel and interrupts are enabled. Move to
541 * kernel process stack.
542 */
543 else if (eregs->flags & X86_EFLAGS_IF)
544 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
545 if (eregs != regs)
546 *regs = *eregs;
547 return regs;
548}
549#endif
550
551/*
552 * Our handling of the processor debug registers is non-trivial.
553 * We do not clear them on entry and exit from the kernel. Therefore
554 * it is possible to get a watchpoint trap here from inside the kernel.
555 * However, the code in ./ptrace.c has ensured that the user can
556 * only set watchpoints on userspace addresses. Therefore the in-kernel
557 * watchpoint trap can only occur in code which is reading/writing
558 * from user space. Such code must not hold kernel locks (since it
559 * can equally take a page fault), therefore it is safe to call
560 * force_sig_info even though that claims and releases locks.
561 *
562 * Code in ./signal.c ensures that the debug control register
563 * is restored before we deliver any signal, and therefore that
564 * user code runs with the correct debug control register even though
565 * we clear it here.
566 *
567 * Being careful here means that we don't have to be as careful in a
568 * lot of more complicated places (task switching can be a bit lazy
569 * about restoring all the debug state, and ptrace doesn't have to
570 * find every occurrence of the TF bit that could be saved away even
571 * by user code)
572 *
573 * May run on IST stack.
574 */
575dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
576{
577 struct task_struct *tsk = current;
578 unsigned long condition;
579 int si_code;
580
581 get_debugreg(condition, 6);
582
583 /*
584 * The processor cleared BTF, so don't mark that we need it set.
585 */
586 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
587 tsk->thread.debugctlmsr = 0;
588
589 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
590 SIGTRAP) == NOTIFY_STOP)
591 return;
592
593 /* It's safe to allow irq's after DR6 has been saved */
594 preempt_conditional_sti(regs);
595
596 /* Mask out spurious debug traps due to lazy DR7 setting */
597 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
598 if (!tsk->thread.debugreg7)
599 goto clear_dr7;
600 }
601
602#ifdef CONFIG_X86_32
603 if (regs->flags & X86_VM_MASK)
604 goto debug_vm86;
605#endif
606
607 /* Save debug status register where ptrace can see it */
608 tsk->thread.debugreg6 = condition;
609
610 /*
611 * Single-stepping through TF: make sure we ignore any events in
612 * kernel space (but re-enable TF when returning to user mode).
613 */
614 if (condition & DR_STEP) {
615 if (!user_mode(regs))
616 goto clear_TF_reenable;
617 }
618
619 si_code = get_si_code(condition);
620 /* Ok, finally something we can handle */
621 send_sigtrap(tsk, regs, error_code, si_code);
622
623 /*
624 * Disable additional traps. They'll be re-enabled when
625 * the signal is delivered.
626 */
627clear_dr7:
628 set_debugreg(0, 7);
629 preempt_conditional_cli(regs);
630 return;
631
632#ifdef CONFIG_X86_32
633debug_vm86:
634 /* reenable preemption: handle_vm86_trap() might sleep */
635 dec_preempt_count();
636 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
637 conditional_cli(regs);
638 return;
639#endif
640
641clear_TF_reenable:
642 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
643 regs->flags &= ~X86_EFLAGS_TF;
644 preempt_conditional_cli(regs);
645 return;
646}
647
648#ifdef CONFIG_X86_64
649static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
650{
651 if (fixup_exception(regs))
652 return 1;
653
654 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
655 /* Illegal floating point operation in the kernel */
656 current->thread.trap_no = trapnr;
657 die(str, regs, 0);
658 return 0;
659}
660#endif
661
662/*
663 * Note that we play around with the 'TS' bit in an attempt to get
664 * the correct behaviour even in the presence of the asynchronous
665 * IRQ13 behaviour
666 */
667void math_error(void __user *ip)
668{
669 struct task_struct *task;
670 siginfo_t info;
671 unsigned short cwd, swd, err;
672
673 /*
674 * Save the info for the exception handler and clear the error.
675 */
676 task = current;
677 save_init_fpu(task);
678 task->thread.trap_no = 16;
679 task->thread.error_code = 0;
680 info.si_signo = SIGFPE;
681 info.si_errno = 0;
682 info.si_addr = ip;
683 /*
684 * (~cwd & swd) will mask out exceptions that are not set to unmasked
685 * status. 0x3f is the exception bits in these regs, 0x200 is the
686 * C1 reg you need in case of a stack fault, 0x040 is the stack
687 * fault bit. We should only be taking one exception at a time,
688 * so if this combination doesn't produce any single exception,
689 * then we have a bad program that isn't synchronizing its FPU usage
690 * and it will suffer the consequences since we won't be able to
691 * fully reproduce the context of the exception
692 */
693 cwd = get_fpu_cwd(task);
694 swd = get_fpu_swd(task);
695
696 err = swd & ~cwd;
697
698 if (err & 0x001) { /* Invalid op */
699 /*
700 * swd & 0x240 == 0x040: Stack Underflow
701 * swd & 0x240 == 0x240: Stack Overflow
702 * User must clear the SF bit (0x40) if set
703 */
704 info.si_code = FPE_FLTINV;
705 } else if (err & 0x004) { /* Divide by Zero */
706 info.si_code = FPE_FLTDIV;
707 } else if (err & 0x008) { /* Overflow */
708 info.si_code = FPE_FLTOVF;
709 } else if (err & 0x012) { /* Denormal, Underflow */
710 info.si_code = FPE_FLTUND;
711 } else if (err & 0x020) { /* Precision */
712 info.si_code = FPE_FLTRES;
713 } else {
714 /*
715 * If we're using IRQ 13, or supposedly even some trap 16
716 * implementations, it's possible we get a spurious trap...
717 */
718 return; /* Spurious trap, no error */
719 }
720 force_sig_info(SIGFPE, &info, task);
721}
722
723dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
724{
725 conditional_sti(regs);
726
727#ifdef CONFIG_X86_32
728 ignore_fpu_irq = 1;
729#else
730 if (!user_mode(regs) &&
731 kernel_math_error(regs, "kernel x87 math error", 16))
732 return;
733#endif
734
735 math_error((void __user *)regs->ip);
736}
737
738static void simd_math_error(void __user *ip)
739{
740 struct task_struct *task;
741 siginfo_t info;
742 unsigned short mxcsr;
743
744 /*
745 * Save the info for the exception handler and clear the error.
746 */
747 task = current;
748 save_init_fpu(task);
749 task->thread.trap_no = 19;
750 task->thread.error_code = 0;
751 info.si_signo = SIGFPE;
752 info.si_errno = 0;
753 info.si_code = __SI_FAULT;
754 info.si_addr = ip;
755 /*
756 * The SIMD FPU exceptions are handled a little differently, as there
757 * is only a single status/control register. Thus, to determine which
758 * unmasked exception was caught we must mask the exception mask bits
759 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
760 */
761 mxcsr = get_fpu_mxcsr(task);
762 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
763 case 0x000:
764 default:
765 break;
766 case 0x001: /* Invalid Op */
767 info.si_code = FPE_FLTINV;
768 break;
769 case 0x002: /* Denormalize */
770 case 0x010: /* Underflow */
771 info.si_code = FPE_FLTUND;
772 break;
773 case 0x004: /* Zero Divide */
774 info.si_code = FPE_FLTDIV;
775 break;
776 case 0x008: /* Overflow */
777 info.si_code = FPE_FLTOVF;
778 break;
779 case 0x020: /* Precision */
780 info.si_code = FPE_FLTRES;
781 break;
782 }
783 force_sig_info(SIGFPE, &info, task);
784}
785
786dotraplinkage void
787do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
788{
789 conditional_sti(regs);
790
791#ifdef CONFIG_X86_32
792 if (cpu_has_xmm) {
793 /* Handle SIMD FPU exceptions on PIII+ processors. */
794 ignore_fpu_irq = 1;
795 simd_math_error((void __user *)regs->ip);
796 return;
797 }
798 /*
799 * Handle strange cache flush from user space exception
800 * in all other cases. This is undocumented behaviour.
801 */
802 if (regs->flags & X86_VM_MASK) {
803 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
804 return;
805 }
806 current->thread.trap_no = 19;
807 current->thread.error_code = error_code;
808 die_if_kernel("cache flush denied", regs, error_code);
809 force_sig(SIGSEGV, current);
810#else
811 if (!user_mode(regs) &&
812 kernel_math_error(regs, "kernel simd math error", 19))
813 return;
814 simd_math_error((void __user *)regs->ip);
815#endif
816}
817
818dotraplinkage void
819do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
820{
821 conditional_sti(regs);
822#if 0
823 /* No need to warn about this any longer. */
824 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
825#endif
826}
827
828#ifdef CONFIG_X86_32
829unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
830{
831 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
832 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
833 unsigned long new_kesp = kesp - base;
834 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
835 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
836
837 /* Set up base for espfix segment */
838 desc &= 0x00f0ff0000000000ULL;
839 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
840 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
841 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
842 (lim_pages & 0xffff);
843 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
844
845 return new_kesp;
846}
847#else
848asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
849{
850}
851
852asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
853{
854}
855#endif
856
857/*
858 * 'math_state_restore()' saves the current math information in the
859 * old math state array, and gets the new ones from the current task
860 *
861 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
862 * Don't touch unless you *really* know how it works.
863 *
864 * Must be called with kernel preemption disabled (in this case,
865 * local interrupts are disabled at the call-site in entry.S).
866 */
867asmlinkage void math_state_restore(void)
868{
869 struct thread_info *thread = current_thread_info();
870 struct task_struct *tsk = thread->task;
871
872 if (!tsk_used_math(tsk)) {
873 local_irq_enable();
874 /*
875 * does a slab alloc which can sleep
876 */
877 if (init_fpu(tsk)) {
878 /*
879 * ran out of memory!
880 */
881 do_group_exit(SIGKILL);
882 return;
883 }
884 local_irq_disable();
885 }
886
887 clts(); /* Allow maths ops (or we recurse) */
888#ifdef CONFIG_X86_32
889 restore_fpu(tsk);
890#else
891 /*
892 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
893 */
894 if (unlikely(restore_fpu_checking(tsk))) {
895 stts();
896 force_sig(SIGSEGV, tsk);
897 return;
898 }
899#endif
900 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
901 tsk->fpu_counter++;
902}
903EXPORT_SYMBOL_GPL(math_state_restore);
904
905#ifndef CONFIG_MATH_EMULATION
906void math_emulate(struct math_emu_info *info)
907{
908 printk(KERN_EMERG
909 "math-emulation not enabled and no coprocessor found.\n");
910 printk(KERN_EMERG "killing %s.\n", current->comm);
911 force_sig(SIGFPE, current);
912 schedule();
913}
914#endif /* CONFIG_MATH_EMULATION */
915
916dotraplinkage void __kprobes
917do_device_not_available(struct pt_regs *regs, long error_code)
918{
919#ifdef CONFIG_X86_32
920 if (read_cr0() & X86_CR0_EM) {
921 struct math_emu_info info = { };
922
923 conditional_sti(regs);
924
925 info.regs = regs;
926 math_emulate(&info);
927 } else {
928 math_state_restore(); /* interrupts still off */
929 conditional_sti(regs);
930 }
931#else
932 math_state_restore();
933#endif
934}
935
936#ifdef CONFIG_X86_32
937dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
938{
939 siginfo_t info;
940 local_irq_enable();
941
942 info.si_signo = SIGILL;
943 info.si_errno = 0;
944 info.si_code = ILL_BADSTK;
945 info.si_addr = 0;
946 if (notify_die(DIE_TRAP, "iret exception",
947 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
948 return;
949 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
950}
951#endif
952
953void __init trap_init(void)
954{
955 int i;
956
957#ifdef CONFIG_EISA
958 void __iomem *p = early_ioremap(0x0FFFD9, 4);
959
960 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
961 EISA_bus = 1;
962 early_iounmap(p, 4);
963#endif
964
965 set_intr_gate(0, &divide_error);
966 set_intr_gate_ist(1, &debug, DEBUG_STACK);
967 set_intr_gate_ist(2, &nmi, NMI_STACK);
968 /* int3 can be called from all */
969 set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
970 /* int4 can be called from all */
971 set_system_intr_gate(4, &overflow);
972 set_intr_gate(5, &bounds);
973 set_intr_gate(6, &invalid_op);
974 set_intr_gate(7, &device_not_available);
975#ifdef CONFIG_X86_32
976 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
977#else
978 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
979#endif
980 set_intr_gate(9, &coprocessor_segment_overrun);
981 set_intr_gate(10, &invalid_TSS);
982 set_intr_gate(11, &segment_not_present);
983 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
984 set_intr_gate(13, &general_protection);
985 set_intr_gate(14, &page_fault);
986 set_intr_gate(15, &spurious_interrupt_bug);
987 set_intr_gate(16, &coprocessor_error);
988 set_intr_gate(17, &alignment_check);
989#ifdef CONFIG_X86_MCE
990 set_intr_gate_ist(18, &machine_check, MCE_STACK);
991#endif
992 set_intr_gate(19, &simd_coprocessor_error);
993
994#ifdef CONFIG_IA32_EMULATION
995 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
996#endif
997
998#ifdef CONFIG_X86_32
999 if (cpu_has_fxsr) {
1000 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1001 set_in_cr4(X86_CR4_OSFXSR);
1002 printk("done.\n");
1003 }
1004 if (cpu_has_xmm) {
1005 printk(KERN_INFO
1006 "Enabling unmasked SIMD FPU exception support... ");
1007 set_in_cr4(X86_CR4_OSXMMEXCPT);
1008 printk("done.\n");
1009 }
1010
1011 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
1012#endif
1013
1014 /* Reserve all the builtin and the syscall vector: */
1015 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1016 set_bit(i, used_vectors);
1017
1018#ifdef CONFIG_X86_64
1019 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
1020#else
1021 set_bit(SYSCALL_VECTOR, used_vectors);
1022#endif
1023 /*
1024 * Should be a barrier for any external CPU state:
1025 */
1026 cpu_init();
1027
1028#ifdef CONFIG_X86_32
1029 trap_init_hook();
1030#endif
1031}