]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/traps_32.c
i386: remove temporary DO_TRAP macros, expanding the last one used
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / traps_32.c
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9 /*
10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'asm.s'.
12 */
13 #include <linux/interrupt.h>
14 #include <linux/kallsyms.h>
15 #include <linux/spinlock.h>
16 #include <linux/highmem.h>
17 #include <linux/kprobes.h>
18 #include <linux/uaccess.h>
19 #include <linux/utsname.h>
20 #include <linux/kdebug.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/ptrace.h>
24 #include <linux/string.h>
25 #include <linux/unwind.h>
26 #include <linux/delay.h>
27 #include <linux/errno.h>
28 #include <linux/kexec.h>
29 #include <linux/sched.h>
30 #include <linux/timer.h>
31 #include <linux/init.h>
32 #include <linux/bug.h>
33 #include <linux/nmi.h>
34 #include <linux/mm.h>
35
36 #ifdef CONFIG_EISA
37 #include <linux/ioport.h>
38 #include <linux/eisa.h>
39 #endif
40
41 #ifdef CONFIG_MCA
42 #include <linux/mca.h>
43 #endif
44
45 #if defined(CONFIG_EDAC)
46 #include <linux/edac.h>
47 #endif
48
49 #include <asm/processor-flags.h>
50 #include <asm/arch_hooks.h>
51 #include <asm/stacktrace.h>
52 #include <asm/processor.h>
53 #include <asm/debugreg.h>
54 #include <asm/atomic.h>
55 #include <asm/system.h>
56 #include <asm/unwind.h>
57 #include <asm/desc.h>
58 #include <asm/i387.h>
59 #include <asm/nmi.h>
60 #include <asm/smp.h>
61 #include <asm/io.h>
62 #include <asm/traps.h>
63
64 #include "mach_traps.h"
65 #include "cpu/mcheck/mce.h"
66
67 DECLARE_BITMAP(used_vectors, NR_VECTORS);
68 EXPORT_SYMBOL_GPL(used_vectors);
69
70 asmlinkage int system_call(void);
71
72 /* Do we ignore FPU interrupts ? */
73 char ignore_fpu_irq;
74
75 /*
76 * The IDT has to be page-aligned to simplify the Pentium
77 * F0 0F bug workaround.. We have a special link segment
78 * for this.
79 */
80 gate_desc idt_table[256]
81 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
82
83 int panic_on_unrecovered_nmi;
84 int kstack_depth_to_print = 24;
85 static unsigned int code_bytes = 64;
86 static int ignore_nmis;
87 static int die_counter;
88
89 static inline void conditional_sti(struct pt_regs *regs)
90 {
91 if (regs->flags & X86_EFLAGS_IF)
92 local_irq_enable();
93 }
94
95 void printk_address(unsigned long address, int reliable)
96 {
97 #ifdef CONFIG_KALLSYMS
98 unsigned long offset = 0;
99 unsigned long symsize;
100 const char *symname;
101 char *modname;
102 char *delim = ":";
103 char namebuf[KSYM_NAME_LEN];
104 char reliab[4] = "";
105
106 symname = kallsyms_lookup(address, &symsize, &offset,
107 &modname, namebuf);
108 if (!symname) {
109 printk(" [<%08lx>]\n", address);
110 return;
111 }
112 if (!reliable)
113 strcpy(reliab, "? ");
114
115 if (!modname)
116 modname = delim = "";
117 printk(" [<%08lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
118 address, reliab, delim, modname, delim, symname, offset, symsize);
119 #else
120 printk(" [<%08lx>]\n", address);
121 #endif
122 }
123
124 static inline int valid_stack_ptr(struct thread_info *tinfo,
125 void *p, unsigned int size)
126 {
127 void *t = tinfo;
128 return p > t && p <= t + THREAD_SIZE - size;
129 }
130
131 /* The form of the top of the frame on the stack */
132 struct stack_frame {
133 struct stack_frame *next_frame;
134 unsigned long return_address;
135 };
136
137 static inline unsigned long
138 print_context_stack(struct thread_info *tinfo,
139 unsigned long *stack, unsigned long bp,
140 const struct stacktrace_ops *ops, void *data)
141 {
142 struct stack_frame *frame = (struct stack_frame *)bp;
143
144 while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
145 unsigned long addr;
146
147 addr = *stack;
148 if (__kernel_text_address(addr)) {
149 if ((unsigned long) stack == bp + 4) {
150 ops->address(data, addr, 1);
151 frame = frame->next_frame;
152 bp = (unsigned long) frame;
153 } else {
154 ops->address(data, addr, bp == 0);
155 }
156 }
157 stack++;
158 }
159 return bp;
160 }
161
162 void dump_trace(struct task_struct *task, struct pt_regs *regs,
163 unsigned long *stack, unsigned long bp,
164 const struct stacktrace_ops *ops, void *data)
165 {
166 if (!task)
167 task = current;
168
169 if (!stack) {
170 unsigned long dummy;
171 stack = &dummy;
172 if (task != current)
173 stack = (unsigned long *)task->thread.sp;
174 }
175
176 #ifdef CONFIG_FRAME_POINTER
177 if (!bp) {
178 if (task == current) {
179 /* Grab bp right from our regs */
180 asm("movl %%ebp, %0" : "=r" (bp) :);
181 } else {
182 /* bp is the last reg pushed by switch_to */
183 bp = *(unsigned long *) task->thread.sp;
184 }
185 }
186 #endif
187
188 for (;;) {
189 struct thread_info *context;
190
191 context = (struct thread_info *)
192 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
193 bp = print_context_stack(context, stack, bp, ops, data);
194 /*
195 * Should be after the line below, but somewhere
196 * in early boot context comes out corrupted and we
197 * can't reference it:
198 */
199 if (ops->stack(data, "IRQ") < 0)
200 break;
201 stack = (unsigned long *)context->previous_esp;
202 if (!stack)
203 break;
204 touch_nmi_watchdog();
205 }
206 }
207 EXPORT_SYMBOL(dump_trace);
208
209 static void
210 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
211 {
212 printk(data);
213 print_symbol(msg, symbol);
214 printk("\n");
215 }
216
217 static void print_trace_warning(void *data, char *msg)
218 {
219 printk("%s%s\n", (char *)data, msg);
220 }
221
222 static int print_trace_stack(void *data, char *name)
223 {
224 return 0;
225 }
226
227 /*
228 * Print one address/symbol entries per line.
229 */
230 static void print_trace_address(void *data, unsigned long addr, int reliable)
231 {
232 printk("%s [<%08lx>] ", (char *)data, addr);
233 if (!reliable)
234 printk("? ");
235 print_symbol("%s\n", addr);
236 touch_nmi_watchdog();
237 }
238
239 static const struct stacktrace_ops print_trace_ops = {
240 .warning = print_trace_warning,
241 .warning_symbol = print_trace_warning_symbol,
242 .stack = print_trace_stack,
243 .address = print_trace_address,
244 };
245
246 static void
247 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
248 unsigned long *stack, unsigned long bp, char *log_lvl)
249 {
250 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
251 printk("%s =======================\n", log_lvl);
252 }
253
254 void show_trace(struct task_struct *task, struct pt_regs *regs,
255 unsigned long *stack, unsigned long bp)
256 {
257 show_trace_log_lvl(task, regs, stack, bp, "");
258 }
259
260 static void
261 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
262 unsigned long *sp, unsigned long bp, char *log_lvl)
263 {
264 unsigned long *stack;
265 int i;
266
267 if (sp == NULL) {
268 if (task)
269 sp = (unsigned long *)task->thread.sp;
270 else
271 sp = (unsigned long *)&sp;
272 }
273
274 stack = sp;
275 for (i = 0; i < kstack_depth_to_print; i++) {
276 if (kstack_end(stack))
277 break;
278 if (i && ((i % 8) == 0))
279 printk("\n%s ", log_lvl);
280 printk("%08lx ", *stack++);
281 }
282 printk("\n%sCall Trace:\n", log_lvl);
283
284 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
285 }
286
287 void show_stack(struct task_struct *task, unsigned long *sp)
288 {
289 printk(" ");
290 show_stack_log_lvl(task, NULL, sp, 0, "");
291 }
292
293 /*
294 * The architecture-independent dump_stack generator
295 */
296 void dump_stack(void)
297 {
298 unsigned long bp = 0;
299 unsigned long stack;
300
301 #ifdef CONFIG_FRAME_POINTER
302 if (!bp)
303 asm("movl %%ebp, %0" : "=r" (bp):);
304 #endif
305
306 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
307 current->pid, current->comm, print_tainted(),
308 init_utsname()->release,
309 (int)strcspn(init_utsname()->version, " "),
310 init_utsname()->version);
311
312 show_trace(current, NULL, &stack, bp);
313 }
314
315 EXPORT_SYMBOL(dump_stack);
316
317 void show_registers(struct pt_regs *regs)
318 {
319 int i;
320
321 print_modules();
322 __show_registers(regs, 0);
323
324 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
325 TASK_COMM_LEN, current->comm, task_pid_nr(current),
326 current_thread_info(), current, task_thread_info(current));
327 /*
328 * When in-kernel, we also print out the stack and code at the
329 * time of the fault..
330 */
331 if (!user_mode_vm(regs)) {
332 unsigned int code_prologue = code_bytes * 43 / 64;
333 unsigned int code_len = code_bytes;
334 unsigned char c;
335 u8 *ip;
336
337 printk("\n" KERN_EMERG "Stack: ");
338 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
339
340 printk(KERN_EMERG "Code: ");
341
342 ip = (u8 *)regs->ip - code_prologue;
343 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
344 /* try starting at EIP */
345 ip = (u8 *)regs->ip;
346 code_len = code_len - code_prologue + 1;
347 }
348 for (i = 0; i < code_len; i++, ip++) {
349 if (ip < (u8 *)PAGE_OFFSET ||
350 probe_kernel_address(ip, c)) {
351 printk(" Bad EIP value.");
352 break;
353 }
354 if (ip == (u8 *)regs->ip)
355 printk("<%02x> ", c);
356 else
357 printk("%02x ", c);
358 }
359 }
360 printk("\n");
361 }
362
363 int is_valid_bugaddr(unsigned long ip)
364 {
365 unsigned short ud2;
366
367 if (ip < PAGE_OFFSET)
368 return 0;
369 if (probe_kernel_address((unsigned short *)ip, ud2))
370 return 0;
371
372 return ud2 == 0x0b0f;
373 }
374
375 static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
376 static int die_owner = -1;
377 static unsigned int die_nest_count;
378
379 unsigned __kprobes long oops_begin(void)
380 {
381 unsigned long flags;
382
383 oops_enter();
384
385 if (die_owner != raw_smp_processor_id()) {
386 console_verbose();
387 raw_local_irq_save(flags);
388 __raw_spin_lock(&die_lock);
389 die_owner = smp_processor_id();
390 die_nest_count = 0;
391 bust_spinlocks(1);
392 } else {
393 raw_local_irq_save(flags);
394 }
395 die_nest_count++;
396 return flags;
397 }
398
399 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
400 {
401 bust_spinlocks(0);
402 die_owner = -1;
403 add_taint(TAINT_DIE);
404 __raw_spin_unlock(&die_lock);
405 raw_local_irq_restore(flags);
406
407 if (!regs)
408 return;
409
410 if (kexec_should_crash(current))
411 crash_kexec(regs);
412
413 if (in_interrupt())
414 panic("Fatal exception in interrupt");
415
416 if (panic_on_oops)
417 panic("Fatal exception");
418
419 oops_exit();
420 do_exit(signr);
421 }
422
423 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
424 {
425 unsigned short ss;
426 unsigned long sp;
427
428 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
429 #ifdef CONFIG_PREEMPT
430 printk("PREEMPT ");
431 #endif
432 #ifdef CONFIG_SMP
433 printk("SMP ");
434 #endif
435 #ifdef CONFIG_DEBUG_PAGEALLOC
436 printk("DEBUG_PAGEALLOC");
437 #endif
438 printk("\n");
439 if (notify_die(DIE_OOPS, str, regs, err,
440 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
441 return 1;
442
443 show_registers(regs);
444 /* Executive summary in case the oops scrolled away */
445 sp = (unsigned long) (&regs->sp);
446 savesegment(ss, ss);
447 if (user_mode(regs)) {
448 sp = regs->sp;
449 ss = regs->ss & 0xffff;
450 }
451 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
452 print_symbol("%s", regs->ip);
453 printk(" SS:ESP %04x:%08lx\n", ss, sp);
454 return 0;
455 }
456
457 /*
458 * This is gone through when something in the kernel has done something bad
459 * and is about to be terminated:
460 */
461 void die(const char *str, struct pt_regs *regs, long err)
462 {
463 unsigned long flags = oops_begin();
464
465 if (die_nest_count < 3) {
466 report_bug(regs->ip, regs);
467
468 if (__die(str, regs, err))
469 regs = NULL;
470 } else {
471 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
472 }
473
474 oops_end(flags, regs, SIGSEGV);
475 }
476
477 static inline void
478 die_if_kernel(const char *str, struct pt_regs *regs, long err)
479 {
480 if (!user_mode_vm(regs))
481 die(str, regs, err);
482 }
483
484 static void __kprobes
485 do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs *regs,
486 long error_code, siginfo_t *info)
487 {
488 struct task_struct *tsk = current;
489
490 if (regs->flags & X86_VM_MASK) {
491 if (vm86)
492 goto vm86_trap;
493 goto trap_signal;
494 }
495
496 if (!user_mode(regs))
497 goto kernel_trap;
498
499 trap_signal:
500 /*
501 * We want error_code and trap_no set for userspace faults and
502 * kernelspace faults which result in die(), but not
503 * kernelspace faults which are fixed up. die() gives the
504 * process no chance to handle the signal and notice the
505 * kernel fault information, so that won't result in polluting
506 * the information about previously queued, but not yet
507 * delivered, faults. See also do_general_protection below.
508 */
509 tsk->thread.error_code = error_code;
510 tsk->thread.trap_no = trapnr;
511
512 if (info)
513 force_sig_info(signr, info, tsk);
514 else
515 force_sig(signr, tsk);
516 return;
517
518 kernel_trap:
519 if (!fixup_exception(regs)) {
520 tsk->thread.error_code = error_code;
521 tsk->thread.trap_no = trapnr;
522 die(str, regs, error_code);
523 }
524 return;
525
526 vm86_trap:
527 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
528 error_code, trapnr))
529 goto trap_signal;
530 return;
531 }
532
533 #define DO_ERROR(trapnr, signr, str, name) \
534 void do_##name(struct pt_regs *regs, long error_code) \
535 { \
536 trace_hardirqs_fixup(); \
537 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
538 == NOTIFY_STOP) \
539 return; \
540 conditional_sti(regs); \
541 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
542 }
543
544 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
545 void do_##name(struct pt_regs *regs, long error_code) \
546 { \
547 siginfo_t info; \
548 if (irq) \
549 local_irq_enable(); \
550 info.si_signo = signr; \
551 info.si_errno = 0; \
552 info.si_code = sicode; \
553 info.si_addr = (void __user *)siaddr; \
554 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
555 == NOTIFY_STOP) \
556 return; \
557 conditional_sti(regs); \
558 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
559 }
560
561 #define DO_VM86_ERROR(trapnr, signr, str, name) \
562 void do_##name(struct pt_regs *regs, long error_code) \
563 { \
564 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
565 == NOTIFY_STOP) \
566 return; \
567 conditional_sti(regs); \
568 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
569 }
570
571 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
572 void do_##name(struct pt_regs *regs, long error_code) \
573 { \
574 siginfo_t info; \
575 info.si_signo = signr; \
576 info.si_errno = 0; \
577 info.si_code = sicode; \
578 info.si_addr = (void __user *)siaddr; \
579 trace_hardirqs_fixup(); \
580 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
581 == NOTIFY_STOP) \
582 return; \
583 conditional_sti(regs); \
584 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
585 }
586
587 DO_VM86_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
588 DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow)
589 DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds)
590 DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
591 DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
592 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
593 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
594 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
595 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
596
597 void __kprobes
598 do_general_protection(struct pt_regs *regs, long error_code)
599 {
600 struct task_struct *tsk;
601 struct thread_struct *thread;
602 struct tss_struct *tss;
603 int cpu;
604
605 conditional_sti(regs);
606
607 cpu = get_cpu();
608 tss = &per_cpu(init_tss, cpu);
609 thread = &current->thread;
610
611 /*
612 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
613 * invalid offset set (the LAZY one) and the faulting thread has
614 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
615 * and we set the offset field correctly. Then we let the CPU to
616 * restart the faulting instruction.
617 */
618 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
619 thread->io_bitmap_ptr) {
620 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
621 thread->io_bitmap_max);
622 /*
623 * If the previously set map was extending to higher ports
624 * than the current one, pad extra space with 0xff (no access).
625 */
626 if (thread->io_bitmap_max < tss->io_bitmap_max) {
627 memset((char *) tss->io_bitmap +
628 thread->io_bitmap_max, 0xff,
629 tss->io_bitmap_max - thread->io_bitmap_max);
630 }
631 tss->io_bitmap_max = thread->io_bitmap_max;
632 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
633 tss->io_bitmap_owner = thread;
634 put_cpu();
635
636 return;
637 }
638 put_cpu();
639
640 if (regs->flags & X86_VM_MASK)
641 goto gp_in_vm86;
642
643 tsk = current;
644 if (!user_mode(regs))
645 goto gp_in_kernel;
646
647 tsk->thread.error_code = error_code;
648 tsk->thread.trap_no = 13;
649
650 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
651 printk_ratelimit()) {
652 printk(KERN_INFO
653 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
654 tsk->comm, task_pid_nr(tsk),
655 regs->ip, regs->sp, error_code);
656 print_vma_addr(" in ", regs->ip);
657 printk("\n");
658 }
659
660 force_sig(SIGSEGV, tsk);
661 return;
662
663 gp_in_vm86:
664 local_irq_enable();
665 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
666 return;
667
668 gp_in_kernel:
669 if (fixup_exception(regs))
670 return;
671
672 tsk->thread.error_code = error_code;
673 tsk->thread.trap_no = 13;
674 if (notify_die(DIE_GPF, "general protection fault", regs,
675 error_code, 13, SIGSEGV) == NOTIFY_STOP)
676 return;
677 die("general protection fault", regs, error_code);
678 }
679
680 static notrace __kprobes void
681 mem_parity_error(unsigned char reason, struct pt_regs *regs)
682 {
683 printk(KERN_EMERG
684 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
685 reason, smp_processor_id());
686
687 printk(KERN_EMERG
688 "You have some hardware problem, likely on the PCI bus.\n");
689
690 #if defined(CONFIG_EDAC)
691 if (edac_handler_set()) {
692 edac_atomic_assert_error();
693 return;
694 }
695 #endif
696
697 if (panic_on_unrecovered_nmi)
698 panic("NMI: Not continuing");
699
700 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
701
702 /* Clear and disable the memory parity error line. */
703 clear_mem_error(reason);
704 }
705
706 static notrace __kprobes void
707 io_check_error(unsigned char reason, struct pt_regs *regs)
708 {
709 unsigned long i;
710
711 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
712 show_registers(regs);
713
714 /* Re-enable the IOCK line, wait for a few seconds */
715 reason = (reason & 0xf) | 8;
716 outb(reason, 0x61);
717
718 i = 2000;
719 while (--i)
720 udelay(1000);
721
722 reason &= ~8;
723 outb(reason, 0x61);
724 }
725
726 static notrace __kprobes void
727 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
728 {
729 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
730 return;
731 #ifdef CONFIG_MCA
732 /*
733 * Might actually be able to figure out what the guilty party
734 * is:
735 */
736 if (MCA_bus) {
737 mca_handle_nmi();
738 return;
739 }
740 #endif
741 printk(KERN_EMERG
742 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
743 reason, smp_processor_id());
744
745 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
746 if (panic_on_unrecovered_nmi)
747 panic("NMI: Not continuing");
748
749 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
750 }
751
752 static DEFINE_SPINLOCK(nmi_print_lock);
753
754 void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
755 {
756 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
757 return;
758
759 spin_lock(&nmi_print_lock);
760 /*
761 * We are in trouble anyway, lets at least try
762 * to get a message out:
763 */
764 bust_spinlocks(1);
765 printk(KERN_EMERG "%s", str);
766 printk(" on CPU%d, ip %08lx, registers:\n",
767 smp_processor_id(), regs->ip);
768 show_registers(regs);
769 if (do_panic)
770 panic("Non maskable interrupt");
771 console_silent();
772 spin_unlock(&nmi_print_lock);
773 bust_spinlocks(0);
774
775 /*
776 * If we are in kernel we are probably nested up pretty bad
777 * and might aswell get out now while we still can:
778 */
779 if (!user_mode_vm(regs)) {
780 current->thread.trap_no = 2;
781 crash_kexec(regs);
782 }
783
784 do_exit(SIGSEGV);
785 }
786
787 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
788 {
789 unsigned char reason = 0;
790 int cpu;
791
792 cpu = smp_processor_id();
793
794 /* Only the BSP gets external NMIs from the system. */
795 if (!cpu)
796 reason = get_nmi_reason();
797
798 if (!(reason & 0xc0)) {
799 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
800 == NOTIFY_STOP)
801 return;
802 #ifdef CONFIG_X86_LOCAL_APIC
803 /*
804 * Ok, so this is none of the documented NMI sources,
805 * so it must be the NMI watchdog.
806 */
807 if (nmi_watchdog_tick(regs, reason))
808 return;
809 if (!do_nmi_callback(regs, cpu))
810 unknown_nmi_error(reason, regs);
811 #else
812 unknown_nmi_error(reason, regs);
813 #endif
814
815 return;
816 }
817 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
818 return;
819
820 /* AK: following checks seem to be broken on modern chipsets. FIXME */
821 if (reason & 0x80)
822 mem_parity_error(reason, regs);
823 if (reason & 0x40)
824 io_check_error(reason, regs);
825 /*
826 * Reassert NMI in case it became active meanwhile
827 * as it's edge-triggered:
828 */
829 reassert_nmi();
830 }
831
832 notrace __kprobes void do_nmi(struct pt_regs *regs, long error_code)
833 {
834 int cpu;
835
836 nmi_enter();
837
838 cpu = smp_processor_id();
839
840 ++nmi_count(cpu);
841
842 if (!ignore_nmis)
843 default_do_nmi(regs);
844
845 nmi_exit();
846 }
847
848 void stop_nmi(void)
849 {
850 acpi_nmi_disable();
851 ignore_nmis++;
852 }
853
854 void restart_nmi(void)
855 {
856 ignore_nmis--;
857 acpi_nmi_enable();
858 }
859
860 void __kprobes do_int3(struct pt_regs *regs, long error_code)
861 {
862 #ifdef CONFIG_KPROBES
863 trace_hardirqs_fixup();
864
865 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
866 == NOTIFY_STOP)
867 return;
868 /*
869 * This is an interrupt gate, because kprobes wants interrupts
870 * disabled. Normal trap handlers don't.
871 */
872 conditional_sti(regs);
873 #else
874 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
875 == NOTIFY_STOP)
876 return;
877 #endif
878
879 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
880 }
881
882 /*
883 * Our handling of the processor debug registers is non-trivial.
884 * We do not clear them on entry and exit from the kernel. Therefore
885 * it is possible to get a watchpoint trap here from inside the kernel.
886 * However, the code in ./ptrace.c has ensured that the user can
887 * only set watchpoints on userspace addresses. Therefore the in-kernel
888 * watchpoint trap can only occur in code which is reading/writing
889 * from user space. Such code must not hold kernel locks (since it
890 * can equally take a page fault), therefore it is safe to call
891 * force_sig_info even though that claims and releases locks.
892 *
893 * Code in ./signal.c ensures that the debug control register
894 * is restored before we deliver any signal, and therefore that
895 * user code runs with the correct debug control register even though
896 * we clear it here.
897 *
898 * Being careful here means that we don't have to be as careful in a
899 * lot of more complicated places (task switching can be a bit lazy
900 * about restoring all the debug state, and ptrace doesn't have to
901 * find every occurrence of the TF bit that could be saved away even
902 * by user code)
903 */
904 void __kprobes do_debug(struct pt_regs *regs, long error_code)
905 {
906 struct task_struct *tsk = current;
907 unsigned int condition;
908 int si_code;
909
910 trace_hardirqs_fixup();
911
912 get_debugreg(condition, 6);
913
914 /*
915 * The processor cleared BTF, so don't mark that we need it set.
916 */
917 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
918 tsk->thread.debugctlmsr = 0;
919
920 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
921 SIGTRAP) == NOTIFY_STOP)
922 return;
923 /* It's safe to allow irq's after DR6 has been saved */
924 if (regs->flags & X86_EFLAGS_IF)
925 local_irq_enable();
926
927 /* Mask out spurious debug traps due to lazy DR7 setting */
928 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
929 if (!tsk->thread.debugreg7)
930 goto clear_dr7;
931 }
932
933 if (regs->flags & X86_VM_MASK)
934 goto debug_vm86;
935
936 /* Save debug status register where ptrace can see it */
937 tsk->thread.debugreg6 = condition;
938
939 /*
940 * Single-stepping through TF: make sure we ignore any events in
941 * kernel space (but re-enable TF when returning to user mode).
942 */
943 if (condition & DR_STEP) {
944 /*
945 * We already checked v86 mode above, so we can
946 * check for kernel mode by just checking the CPL
947 * of CS.
948 */
949 if (!user_mode(regs))
950 goto clear_TF_reenable;
951 }
952
953 si_code = get_si_code((unsigned long)condition);
954 /* Ok, finally something we can handle */
955 send_sigtrap(tsk, regs, error_code, si_code);
956
957 /*
958 * Disable additional traps. They'll be re-enabled when
959 * the signal is delivered.
960 */
961 clear_dr7:
962 set_debugreg(0, 7);
963 return;
964
965 debug_vm86:
966 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
967 return;
968
969 clear_TF_reenable:
970 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
971 regs->flags &= ~X86_EFLAGS_TF;
972 return;
973 }
974
975 /*
976 * Note that we play around with the 'TS' bit in an attempt to get
977 * the correct behaviour even in the presence of the asynchronous
978 * IRQ13 behaviour
979 */
980 void math_error(void __user *ip)
981 {
982 struct task_struct *task;
983 siginfo_t info;
984 unsigned short cwd, swd;
985
986 /*
987 * Save the info for the exception handler and clear the error.
988 */
989 task = current;
990 save_init_fpu(task);
991 task->thread.trap_no = 16;
992 task->thread.error_code = 0;
993 info.si_signo = SIGFPE;
994 info.si_errno = 0;
995 info.si_code = __SI_FAULT;
996 info.si_addr = ip;
997 /*
998 * (~cwd & swd) will mask out exceptions that are not set to unmasked
999 * status. 0x3f is the exception bits in these regs, 0x200 is the
1000 * C1 reg you need in case of a stack fault, 0x040 is the stack
1001 * fault bit. We should only be taking one exception at a time,
1002 * so if this combination doesn't produce any single exception,
1003 * then we have a bad program that isn't synchronizing its FPU usage
1004 * and it will suffer the consequences since we won't be able to
1005 * fully reproduce the context of the exception
1006 */
1007 cwd = get_fpu_cwd(task);
1008 swd = get_fpu_swd(task);
1009 switch (swd & ~cwd & 0x3f) {
1010 case 0x000: /* No unmasked exception */
1011 return;
1012 default: /* Multiple exceptions */
1013 break;
1014 case 0x001: /* Invalid Op */
1015 /*
1016 * swd & 0x240 == 0x040: Stack Underflow
1017 * swd & 0x240 == 0x240: Stack Overflow
1018 * User must clear the SF bit (0x40) if set
1019 */
1020 info.si_code = FPE_FLTINV;
1021 break;
1022 case 0x002: /* Denormalize */
1023 case 0x010: /* Underflow */
1024 info.si_code = FPE_FLTUND;
1025 break;
1026 case 0x004: /* Zero Divide */
1027 info.si_code = FPE_FLTDIV;
1028 break;
1029 case 0x008: /* Overflow */
1030 info.si_code = FPE_FLTOVF;
1031 break;
1032 case 0x020: /* Precision */
1033 info.si_code = FPE_FLTRES;
1034 break;
1035 }
1036 force_sig_info(SIGFPE, &info, task);
1037 }
1038
1039 void do_coprocessor_error(struct pt_regs *regs, long error_code)
1040 {
1041 conditional_sti(regs);
1042 ignore_fpu_irq = 1;
1043 math_error((void __user *)regs->ip);
1044 }
1045
1046 static void simd_math_error(void __user *ip)
1047 {
1048 struct task_struct *task;
1049 siginfo_t info;
1050 unsigned short mxcsr;
1051
1052 /*
1053 * Save the info for the exception handler and clear the error.
1054 */
1055 task = current;
1056 save_init_fpu(task);
1057 task->thread.trap_no = 19;
1058 task->thread.error_code = 0;
1059 info.si_signo = SIGFPE;
1060 info.si_errno = 0;
1061 info.si_code = __SI_FAULT;
1062 info.si_addr = ip;
1063 /*
1064 * The SIMD FPU exceptions are handled a little differently, as there
1065 * is only a single status/control register. Thus, to determine which
1066 * unmasked exception was caught we must mask the exception mask bits
1067 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1068 */
1069 mxcsr = get_fpu_mxcsr(task);
1070 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1071 case 0x000:
1072 default:
1073 break;
1074 case 0x001: /* Invalid Op */
1075 info.si_code = FPE_FLTINV;
1076 break;
1077 case 0x002: /* Denormalize */
1078 case 0x010: /* Underflow */
1079 info.si_code = FPE_FLTUND;
1080 break;
1081 case 0x004: /* Zero Divide */
1082 info.si_code = FPE_FLTDIV;
1083 break;
1084 case 0x008: /* Overflow */
1085 info.si_code = FPE_FLTOVF;
1086 break;
1087 case 0x020: /* Precision */
1088 info.si_code = FPE_FLTRES;
1089 break;
1090 }
1091 force_sig_info(SIGFPE, &info, task);
1092 }
1093
1094 void do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
1095 {
1096 conditional_sti(regs);
1097
1098 if (cpu_has_xmm) {
1099 /* Handle SIMD FPU exceptions on PIII+ processors. */
1100 ignore_fpu_irq = 1;
1101 simd_math_error((void __user *)regs->ip);
1102 return;
1103 }
1104 /*
1105 * Handle strange cache flush from user space exception
1106 * in all other cases. This is undocumented behaviour.
1107 */
1108 if (regs->flags & X86_VM_MASK) {
1109 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
1110 return;
1111 }
1112 current->thread.trap_no = 19;
1113 current->thread.error_code = error_code;
1114 die_if_kernel("cache flush denied", regs, error_code);
1115 force_sig(SIGSEGV, current);
1116 }
1117
1118 void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
1119 {
1120 conditional_sti(regs);
1121 #if 0
1122 /* No need to warn about this any longer. */
1123 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1124 #endif
1125 }
1126
1127 unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
1128 {
1129 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
1130 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
1131 unsigned long new_kesp = kesp - base;
1132 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
1133 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
1134
1135 /* Set up base for espfix segment */
1136 desc &= 0x00f0ff0000000000ULL;
1137 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
1138 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
1139 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
1140 (lim_pages & 0xffff);
1141 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
1142
1143 return new_kesp;
1144 }
1145
1146 /*
1147 * 'math_state_restore()' saves the current math information in the
1148 * old math state array, and gets the new ones from the current task
1149 *
1150 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1151 * Don't touch unless you *really* know how it works.
1152 *
1153 * Must be called with kernel preemption disabled (in this case,
1154 * local interrupts are disabled at the call-site in entry.S).
1155 */
1156 asmlinkage void math_state_restore(void)
1157 {
1158 struct thread_info *thread = current_thread_info();
1159 struct task_struct *tsk = thread->task;
1160
1161 if (!tsk_used_math(tsk)) {
1162 local_irq_enable();
1163 /*
1164 * does a slab alloc which can sleep
1165 */
1166 if (init_fpu(tsk)) {
1167 /*
1168 * ran out of memory!
1169 */
1170 do_group_exit(SIGKILL);
1171 return;
1172 }
1173 local_irq_disable();
1174 }
1175
1176 clts(); /* Allow maths ops (or we recurse) */
1177 restore_fpu(tsk);
1178 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
1179 tsk->fpu_counter++;
1180 }
1181 EXPORT_SYMBOL_GPL(math_state_restore);
1182
1183 #ifndef CONFIG_MATH_EMULATION
1184
1185 asmlinkage void math_emulate(long arg)
1186 {
1187 printk(KERN_EMERG
1188 "math-emulation not enabled and no coprocessor found.\n");
1189 printk(KERN_EMERG "killing %s.\n", current->comm);
1190 force_sig(SIGFPE, current);
1191 schedule();
1192 }
1193
1194 #endif /* CONFIG_MATH_EMULATION */
1195
1196 void __kprobes do_device_not_available(struct pt_regs *regs, long error)
1197 {
1198 if (read_cr0() & X86_CR0_EM) {
1199 conditional_sti(regs);
1200 math_emulate(0);
1201 } else {
1202 math_state_restore(); /* interrupts still off */
1203 conditional_sti(regs);
1204 }
1205 }
1206
1207 #ifdef CONFIG_X86_MCE
1208 void __kprobes do_machine_check(struct pt_regs *regs, long error)
1209 {
1210 conditional_sti(regs);
1211 machine_check_vector(regs, error);
1212 }
1213 #endif
1214
1215 void do_iret_error(struct pt_regs *regs, long error_code)
1216 {
1217 siginfo_t info;
1218 local_irq_enable();
1219
1220 info.si_signo = SIGILL;
1221 info.si_errno = 0;
1222 info.si_code = ILL_BADSTK;
1223 info.si_addr = 0;
1224 if (notify_die(DIE_TRAP, "iret exception",
1225 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
1226 return;
1227 do_trap(32, SIGILL, "iret exception", 0, regs, error_code, &info);
1228 }
1229
1230 void __init trap_init(void)
1231 {
1232 int i;
1233
1234 #ifdef CONFIG_EISA
1235 void __iomem *p = early_ioremap(0x0FFFD9, 4);
1236
1237 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
1238 EISA_bus = 1;
1239 early_iounmap(p, 4);
1240 #endif
1241
1242 set_intr_gate(0, &divide_error);
1243 set_intr_gate(1, &debug);
1244 set_intr_gate(2, &nmi);
1245 set_system_intr_gate(3, &int3); /* int3 can be called from all */
1246 set_system_intr_gate(4, &overflow); /* int4 can be called from all */
1247 set_intr_gate(5, &bounds);
1248 set_intr_gate(6, &invalid_op);
1249 set_intr_gate(7, &device_not_available);
1250 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
1251 set_intr_gate(9, &coprocessor_segment_overrun);
1252 set_intr_gate(10, &invalid_TSS);
1253 set_intr_gate(11, &segment_not_present);
1254 set_intr_gate(12, &stack_segment);
1255 set_intr_gate(13, &general_protection);
1256 set_intr_gate(14, &page_fault);
1257 set_intr_gate(15, &spurious_interrupt_bug);
1258 set_intr_gate(16, &coprocessor_error);
1259 set_intr_gate(17, &alignment_check);
1260 #ifdef CONFIG_X86_MCE
1261 set_intr_gate(18, &machine_check);
1262 #endif
1263 set_intr_gate(19, &simd_coprocessor_error);
1264
1265 if (cpu_has_fxsr) {
1266 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1267 set_in_cr4(X86_CR4_OSFXSR);
1268 printk("done.\n");
1269 }
1270 if (cpu_has_xmm) {
1271 printk(KERN_INFO
1272 "Enabling unmasked SIMD FPU exception support... ");
1273 set_in_cr4(X86_CR4_OSXMMEXCPT);
1274 printk("done.\n");
1275 }
1276
1277 set_system_gate(SYSCALL_VECTOR, &system_call);
1278
1279 /* Reserve all the builtin and the syscall vector: */
1280 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1281 set_bit(i, used_vectors);
1282
1283 set_bit(SYSCALL_VECTOR, used_vectors);
1284
1285 /*
1286 * Should be a barrier for any external CPU state:
1287 */
1288 cpu_init();
1289
1290 trap_init_hook();
1291 }
1292
1293 static int __init kstack_setup(char *s)
1294 {
1295 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1296
1297 return 1;
1298 }
1299 __setup("kstack=", kstack_setup);
1300
1301 static int __init code_bytes_setup(char *s)
1302 {
1303 code_bytes = simple_strtoul(s, NULL, 0);
1304 if (code_bytes > 8192)
1305 code_bytes = 8192;
1306
1307 return 1;
1308 }
1309 __setup("code_bytes=", code_bytes_setup);