]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/traps.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * Pentium III FXSR, SSE support | |
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
8 | */ | |
9 | ||
10 | /* | |
11 | * 'Traps.c' handles hardware traps and faults after we have saved some | |
12 | * state in 'asm.s'. | |
13 | */ | |
14 | #include <linux/config.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/string.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/timer.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/kallsyms.h> | |
27 | #include <linux/ptrace.h> | |
28 | #include <linux/utsname.h> | |
29 | #include <linux/kprobes.h> | |
6e274d14 | 30 | #include <linux/kexec.h> |
1da177e4 LT |
31 | |
32 | #ifdef CONFIG_EISA | |
33 | #include <linux/ioport.h> | |
34 | #include <linux/eisa.h> | |
35 | #endif | |
36 | ||
37 | #ifdef CONFIG_MCA | |
38 | #include <linux/mca.h> | |
39 | #endif | |
40 | ||
41 | #include <asm/processor.h> | |
42 | #include <asm/system.h> | |
43 | #include <asm/uaccess.h> | |
44 | #include <asm/io.h> | |
45 | #include <asm/atomic.h> | |
46 | #include <asm/debugreg.h> | |
47 | #include <asm/desc.h> | |
48 | #include <asm/i387.h> | |
49 | #include <asm/nmi.h> | |
50 | ||
51 | #include <asm/smp.h> | |
52 | #include <asm/arch_hooks.h> | |
53 | #include <asm/kdebug.h> | |
54 | ||
1da177e4 LT |
55 | #include <linux/module.h> |
56 | ||
57 | #include "mach_traps.h" | |
58 | ||
59 | asmlinkage int system_call(void); | |
60 | ||
61 | struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, | |
62 | { 0, 0 }, { 0, 0 } }; | |
63 | ||
64 | /* Do we ignore FPU interrupts ? */ | |
65 | char ignore_fpu_irq = 0; | |
66 | ||
67 | /* | |
68 | * The IDT has to be page-aligned to simplify the Pentium | |
69 | * F0 0F bug workaround.. We have a special link segment | |
70 | * for this. | |
71 | */ | |
72 | struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, }; | |
73 | ||
74 | asmlinkage void divide_error(void); | |
75 | asmlinkage void debug(void); | |
76 | asmlinkage void nmi(void); | |
77 | asmlinkage void int3(void); | |
78 | asmlinkage void overflow(void); | |
79 | asmlinkage void bounds(void); | |
80 | asmlinkage void invalid_op(void); | |
81 | asmlinkage void device_not_available(void); | |
82 | asmlinkage void coprocessor_segment_overrun(void); | |
83 | asmlinkage void invalid_TSS(void); | |
84 | asmlinkage void segment_not_present(void); | |
85 | asmlinkage void stack_segment(void); | |
86 | asmlinkage void general_protection(void); | |
87 | asmlinkage void page_fault(void); | |
88 | asmlinkage void coprocessor_error(void); | |
89 | asmlinkage void simd_coprocessor_error(void); | |
90 | asmlinkage void alignment_check(void); | |
91 | asmlinkage void spurious_interrupt_bug(void); | |
92 | asmlinkage void machine_check(void); | |
93 | ||
94 | static int kstack_depth_to_print = 24; | |
95 | struct notifier_block *i386die_chain; | |
96 | static DEFINE_SPINLOCK(die_notifier_lock); | |
97 | ||
98 | int register_die_notifier(struct notifier_block *nb) | |
99 | { | |
100 | int err = 0; | |
101 | unsigned long flags; | |
101f12af JB |
102 | |
103 | vmalloc_sync_all(); | |
1da177e4 LT |
104 | spin_lock_irqsave(&die_notifier_lock, flags); |
105 | err = notifier_chain_register(&i386die_chain, nb); | |
106 | spin_unlock_irqrestore(&die_notifier_lock, flags); | |
107 | return err; | |
108 | } | |
129f6946 | 109 | EXPORT_SYMBOL(register_die_notifier); |
1da177e4 LT |
110 | |
111 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | |
112 | { | |
113 | return p > (void *)tinfo && | |
114 | p < (void *)tinfo + THREAD_SIZE - 3; | |
115 | } | |
116 | ||
4d7d8c82 CE |
117 | /* |
118 | * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line. | |
119 | */ | |
120 | static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, | |
121 | int printed) | |
7aa89746 | 122 | { |
4d7d8c82 CE |
123 | if (!printed) |
124 | printk(log_lvl); | |
125 | ||
126 | #if CONFIG_STACK_BACKTRACE_COLS == 1 | |
7aa89746 | 127 | printk(" [<%08lx>] ", addr); |
4d7d8c82 CE |
128 | #else |
129 | printk(" <%08lx> ", addr); | |
130 | #endif | |
7aa89746 | 131 | print_symbol("%s", addr); |
4d7d8c82 CE |
132 | |
133 | printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; | |
134 | ||
135 | if (printed) | |
136 | printk(" "); | |
137 | else | |
138 | printk("\n"); | |
139 | ||
140 | return printed; | |
7aa89746 CE |
141 | } |
142 | ||
1da177e4 | 143 | static inline unsigned long print_context_stack(struct thread_info *tinfo, |
7aa89746 CE |
144 | unsigned long *stack, unsigned long ebp, |
145 | char *log_lvl) | |
1da177e4 LT |
146 | { |
147 | unsigned long addr; | |
4d7d8c82 | 148 | int printed = 0; /* nr of entries already printed on current line */ |
1da177e4 LT |
149 | |
150 | #ifdef CONFIG_FRAME_POINTER | |
151 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | |
152 | addr = *(unsigned long *)(ebp + 4); | |
4d7d8c82 | 153 | printed = print_addr_and_symbol(addr, log_lvl, printed); |
1da177e4 LT |
154 | ebp = *(unsigned long *)ebp; |
155 | } | |
156 | #else | |
157 | while (valid_stack_ptr(tinfo, stack)) { | |
158 | addr = *stack++; | |
7aa89746 | 159 | if (__kernel_text_address(addr)) |
4d7d8c82 | 160 | printed = print_addr_and_symbol(addr, log_lvl, printed); |
1da177e4 LT |
161 | } |
162 | #endif | |
4d7d8c82 CE |
163 | if (printed) |
164 | printk("\n"); | |
165 | ||
1da177e4 LT |
166 | return ebp; |
167 | } | |
168 | ||
7aa89746 CE |
169 | static void show_trace_log_lvl(struct task_struct *task, |
170 | unsigned long *stack, char *log_lvl) | |
1da177e4 LT |
171 | { |
172 | unsigned long ebp; | |
173 | ||
174 | if (!task) | |
175 | task = current; | |
176 | ||
177 | if (task == current) { | |
178 | /* Grab ebp right from our regs */ | |
179 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); | |
180 | } else { | |
181 | /* ebp is the last reg pushed by switch_to */ | |
182 | ebp = *(unsigned long *) task->thread.esp; | |
183 | } | |
184 | ||
185 | while (1) { | |
186 | struct thread_info *context; | |
187 | context = (struct thread_info *) | |
188 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | |
7aa89746 | 189 | ebp = print_context_stack(context, stack, ebp, log_lvl); |
1da177e4 LT |
190 | stack = (unsigned long*)context->previous_esp; |
191 | if (!stack) | |
192 | break; | |
cc04ee9c | 193 | printk("%s =======================\n", log_lvl); |
1da177e4 LT |
194 | } |
195 | } | |
196 | ||
7aa89746 CE |
197 | void show_trace(struct task_struct *task, unsigned long * stack) |
198 | { | |
199 | show_trace_log_lvl(task, stack, ""); | |
200 | } | |
201 | ||
202 | static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, | |
203 | char *log_lvl) | |
1da177e4 LT |
204 | { |
205 | unsigned long *stack; | |
206 | int i; | |
207 | ||
208 | if (esp == NULL) { | |
209 | if (task) | |
210 | esp = (unsigned long*)task->thread.esp; | |
211 | else | |
212 | esp = (unsigned long *)&esp; | |
213 | } | |
214 | ||
215 | stack = esp; | |
7aa89746 | 216 | printk(log_lvl); |
1da177e4 LT |
217 | for(i = 0; i < kstack_depth_to_print; i++) { |
218 | if (kstack_end(stack)) | |
219 | break; | |
75874d5c CE |
220 | if (i && ((i % 8) == 0)) |
221 | printk("\n%s ", log_lvl); | |
1da177e4 LT |
222 | printk("%08lx ", *stack++); |
223 | } | |
75874d5c | 224 | printk("\n%sCall Trace:\n", log_lvl); |
7aa89746 CE |
225 | show_trace_log_lvl(task, esp, log_lvl); |
226 | } | |
227 | ||
228 | void show_stack(struct task_struct *task, unsigned long *esp) | |
229 | { | |
75874d5c | 230 | printk(" "); |
7aa89746 | 231 | show_stack_log_lvl(task, esp, ""); |
1da177e4 LT |
232 | } |
233 | ||
234 | /* | |
235 | * The architecture-independent dump_stack generator | |
236 | */ | |
237 | void dump_stack(void) | |
238 | { | |
239 | unsigned long stack; | |
240 | ||
241 | show_trace(current, &stack); | |
242 | } | |
243 | ||
244 | EXPORT_SYMBOL(dump_stack); | |
245 | ||
246 | void show_registers(struct pt_regs *regs) | |
247 | { | |
248 | int i; | |
249 | int in_kernel = 1; | |
250 | unsigned long esp; | |
251 | unsigned short ss; | |
252 | ||
253 | esp = (unsigned long) (®s->esp); | |
0998e422 | 254 | savesegment(ss, ss); |
db753bdf | 255 | if (user_mode_vm(regs)) { |
1da177e4 LT |
256 | in_kernel = 0; |
257 | esp = regs->esp; | |
258 | ss = regs->xss & 0xffff; | |
259 | } | |
260 | print_modules(); | |
9c107805 | 261 | printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n" |
b53e8f68 | 262 | "EFLAGS: %08lx (%s %.*s) \n", |
1da177e4 | 263 | smp_processor_id(), 0xffff & regs->xcs, regs->eip, |
b53e8f68 CE |
264 | print_tainted(), regs->eflags, system_utsname.release, |
265 | (int)strcspn(system_utsname.version, " "), | |
266 | system_utsname.version); | |
9c107805 DJ |
267 | print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); |
268 | printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", | |
1da177e4 | 269 | regs->eax, regs->ebx, regs->ecx, regs->edx); |
9c107805 | 270 | printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", |
1da177e4 | 271 | regs->esi, regs->edi, regs->ebp, esp); |
9c107805 | 272 | printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n", |
1da177e4 | 273 | regs->xds & 0xffff, regs->xes & 0xffff, ss); |
9c107805 | 274 | printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)", |
1da177e4 LT |
275 | current->comm, current->pid, current_thread_info(), current); |
276 | /* | |
277 | * When in-kernel, we also print out the stack and code at the | |
278 | * time of the fault.. | |
279 | */ | |
280 | if (in_kernel) { | |
3f3ae347 | 281 | u8 __user *eip; |
1da177e4 | 282 | |
9c107805 | 283 | printk("\n" KERN_EMERG "Stack: "); |
7aa89746 | 284 | show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG); |
1da177e4 | 285 | |
9c107805 | 286 | printk(KERN_EMERG "Code: "); |
1da177e4 | 287 | |
3f3ae347 | 288 | eip = (u8 __user *)regs->eip - 43; |
1da177e4 LT |
289 | for (i = 0; i < 64; i++, eip++) { |
290 | unsigned char c; | |
291 | ||
3f3ae347 | 292 | if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) { |
1da177e4 LT |
293 | printk(" Bad EIP value."); |
294 | break; | |
295 | } | |
3f3ae347 | 296 | if (eip == (u8 __user *)regs->eip) |
1da177e4 LT |
297 | printk("<%02x> ", c); |
298 | else | |
299 | printk("%02x ", c); | |
300 | } | |
301 | } | |
302 | printk("\n"); | |
303 | } | |
304 | ||
305 | static void handle_BUG(struct pt_regs *regs) | |
306 | { | |
307 | unsigned short ud2; | |
308 | unsigned short line; | |
309 | char *file; | |
310 | char c; | |
311 | unsigned long eip; | |
312 | ||
1da177e4 LT |
313 | eip = regs->eip; |
314 | ||
315 | if (eip < PAGE_OFFSET) | |
316 | goto no_bug; | |
3f3ae347 | 317 | if (__get_user(ud2, (unsigned short __user *)eip)) |
1da177e4 LT |
318 | goto no_bug; |
319 | if (ud2 != 0x0b0f) | |
320 | goto no_bug; | |
3f3ae347 | 321 | if (__get_user(line, (unsigned short __user *)(eip + 2))) |
1da177e4 | 322 | goto bug; |
3f3ae347 | 323 | if (__get_user(file, (char * __user *)(eip + 4)) || |
1da177e4 LT |
324 | (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) |
325 | file = "<bad filename>"; | |
326 | ||
9c107805 DJ |
327 | printk(KERN_EMERG "------------[ cut here ]------------\n"); |
328 | printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line); | |
1da177e4 LT |
329 | |
330 | no_bug: | |
331 | return; | |
332 | ||
333 | /* Here we know it was a BUG but file-n-line is unavailable */ | |
334 | bug: | |
9c107805 | 335 | printk(KERN_EMERG "Kernel BUG\n"); |
1da177e4 LT |
336 | } |
337 | ||
6e274d14 AN |
338 | /* This is gone through when something in the kernel |
339 | * has done something bad and is about to be terminated. | |
340 | */ | |
1da177e4 LT |
341 | void die(const char * str, struct pt_regs * regs, long err) |
342 | { | |
343 | static struct { | |
344 | spinlock_t lock; | |
345 | u32 lock_owner; | |
346 | int lock_owner_depth; | |
347 | } die = { | |
348 | .lock = SPIN_LOCK_UNLOCKED, | |
349 | .lock_owner = -1, | |
350 | .lock_owner_depth = 0 | |
351 | }; | |
352 | static int die_counter; | |
e43d674f | 353 | unsigned long flags; |
1da177e4 | 354 | |
dd287796 AM |
355 | oops_enter(); |
356 | ||
39c715b7 | 357 | if (die.lock_owner != raw_smp_processor_id()) { |
1da177e4 | 358 | console_verbose(); |
e43d674f | 359 | spin_lock_irqsave(&die.lock, flags); |
1da177e4 LT |
360 | die.lock_owner = smp_processor_id(); |
361 | die.lock_owner_depth = 0; | |
362 | bust_spinlocks(1); | |
363 | } | |
e43d674f JB |
364 | else |
365 | local_save_flags(flags); | |
1da177e4 LT |
366 | |
367 | if (++die.lock_owner_depth < 3) { | |
368 | int nl = 0; | |
369 | handle_BUG(regs); | |
9c107805 | 370 | printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); |
1da177e4 | 371 | #ifdef CONFIG_PREEMPT |
9c107805 | 372 | printk(KERN_EMERG "PREEMPT "); |
1da177e4 LT |
373 | nl = 1; |
374 | #endif | |
375 | #ifdef CONFIG_SMP | |
9c107805 DJ |
376 | if (!nl) |
377 | printk(KERN_EMERG); | |
1da177e4 LT |
378 | printk("SMP "); |
379 | nl = 1; | |
380 | #endif | |
381 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
9c107805 DJ |
382 | if (!nl) |
383 | printk(KERN_EMERG); | |
1da177e4 LT |
384 | printk("DEBUG_PAGEALLOC"); |
385 | nl = 1; | |
386 | #endif | |
387 | if (nl) | |
388 | printk("\n"); | |
20c0d2d4 JB |
389 | if (notify_die(DIE_OOPS, str, regs, err, |
390 | current->thread.trap_no, SIGSEGV) != | |
391 | NOTIFY_STOP) | |
392 | show_registers(regs); | |
393 | else | |
394 | regs = NULL; | |
1da177e4 | 395 | } else |
9c107805 | 396 | printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); |
1da177e4 LT |
397 | |
398 | bust_spinlocks(0); | |
399 | die.lock_owner = -1; | |
e43d674f | 400 | spin_unlock_irqrestore(&die.lock, flags); |
6e274d14 | 401 | |
20c0d2d4 JB |
402 | if (!regs) |
403 | return; | |
404 | ||
6e274d14 AN |
405 | if (kexec_should_crash(current)) |
406 | crash_kexec(regs); | |
407 | ||
1da177e4 LT |
408 | if (in_interrupt()) |
409 | panic("Fatal exception in interrupt"); | |
410 | ||
411 | if (panic_on_oops) { | |
412 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); | |
413 | ssleep(5); | |
414 | panic("Fatal exception"); | |
415 | } | |
dd287796 | 416 | oops_exit(); |
1da177e4 LT |
417 | do_exit(SIGSEGV); |
418 | } | |
419 | ||
420 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | |
421 | { | |
717b594a | 422 | if (!user_mode_vm(regs)) |
1da177e4 LT |
423 | die(str, regs, err); |
424 | } | |
425 | ||
3d97ae5b PP |
426 | static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86, |
427 | struct pt_regs * regs, long error_code, | |
428 | siginfo_t *info) | |
1da177e4 | 429 | { |
4f339ecb AN |
430 | struct task_struct *tsk = current; |
431 | tsk->thread.error_code = error_code; | |
432 | tsk->thread.trap_no = trapnr; | |
433 | ||
1da177e4 LT |
434 | if (regs->eflags & VM_MASK) { |
435 | if (vm86) | |
436 | goto vm86_trap; | |
437 | goto trap_signal; | |
438 | } | |
439 | ||
717b594a | 440 | if (!user_mode(regs)) |
1da177e4 LT |
441 | goto kernel_trap; |
442 | ||
443 | trap_signal: { | |
1da177e4 LT |
444 | if (info) |
445 | force_sig_info(signr, info, tsk); | |
446 | else | |
447 | force_sig(signr, tsk); | |
448 | return; | |
449 | } | |
450 | ||
451 | kernel_trap: { | |
452 | if (!fixup_exception(regs)) | |
453 | die(str, regs, error_code); | |
454 | return; | |
455 | } | |
456 | ||
457 | vm86_trap: { | |
458 | int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr); | |
459 | if (ret) goto trap_signal; | |
460 | return; | |
461 | } | |
462 | } | |
463 | ||
464 | #define DO_ERROR(trapnr, signr, str, name) \ | |
465 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | |
466 | { \ | |
467 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | |
468 | == NOTIFY_STOP) \ | |
469 | return; \ | |
470 | do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ | |
471 | } | |
472 | ||
473 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | |
474 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | |
475 | { \ | |
476 | siginfo_t info; \ | |
477 | info.si_signo = signr; \ | |
478 | info.si_errno = 0; \ | |
479 | info.si_code = sicode; \ | |
480 | info.si_addr = (void __user *)siaddr; \ | |
481 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | |
482 | == NOTIFY_STOP) \ | |
483 | return; \ | |
484 | do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ | |
485 | } | |
486 | ||
487 | #define DO_VM86_ERROR(trapnr, signr, str, name) \ | |
488 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | |
489 | { \ | |
490 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | |
491 | == NOTIFY_STOP) \ | |
492 | return; \ | |
493 | do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ | |
494 | } | |
495 | ||
496 | #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | |
497 | fastcall void do_##name(struct pt_regs * regs, long error_code) \ | |
498 | { \ | |
499 | siginfo_t info; \ | |
500 | info.si_signo = signr; \ | |
501 | info.si_errno = 0; \ | |
502 | info.si_code = sicode; \ | |
503 | info.si_addr = (void __user *)siaddr; \ | |
504 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | |
505 | == NOTIFY_STOP) \ | |
506 | return; \ | |
507 | do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ | |
508 | } | |
509 | ||
510 | DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip) | |
511 | #ifndef CONFIG_KPROBES | |
512 | DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) | |
513 | #endif | |
514 | DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) | |
515 | DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) | |
631b0347 | 516 | DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip) |
1da177e4 LT |
517 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
518 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | |
519 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | |
520 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | |
521 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | |
a879cbbb | 522 | DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0) |
1da177e4 | 523 | |
3d97ae5b PP |
524 | fastcall void __kprobes do_general_protection(struct pt_regs * regs, |
525 | long error_code) | |
1da177e4 LT |
526 | { |
527 | int cpu = get_cpu(); | |
528 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
529 | struct thread_struct *thread = ¤t->thread; | |
530 | ||
531 | /* | |
532 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an | |
533 | * invalid offset set (the LAZY one) and the faulting thread has | |
534 | * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS | |
535 | * and we set the offset field correctly. Then we let the CPU to | |
536 | * restart the faulting instruction. | |
537 | */ | |
538 | if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && | |
539 | thread->io_bitmap_ptr) { | |
540 | memcpy(tss->io_bitmap, thread->io_bitmap_ptr, | |
541 | thread->io_bitmap_max); | |
542 | /* | |
543 | * If the previously set map was extending to higher ports | |
544 | * than the current one, pad extra space with 0xff (no access). | |
545 | */ | |
546 | if (thread->io_bitmap_max < tss->io_bitmap_max) | |
547 | memset((char *) tss->io_bitmap + | |
548 | thread->io_bitmap_max, 0xff, | |
549 | tss->io_bitmap_max - thread->io_bitmap_max); | |
550 | tss->io_bitmap_max = thread->io_bitmap_max; | |
551 | tss->io_bitmap_base = IO_BITMAP_OFFSET; | |
d5cd4aad | 552 | tss->io_bitmap_owner = thread; |
1da177e4 LT |
553 | put_cpu(); |
554 | return; | |
555 | } | |
556 | put_cpu(); | |
557 | ||
4f339ecb AN |
558 | current->thread.error_code = error_code; |
559 | current->thread.trap_no = 13; | |
560 | ||
1da177e4 LT |
561 | if (regs->eflags & VM_MASK) |
562 | goto gp_in_vm86; | |
563 | ||
717b594a | 564 | if (!user_mode(regs)) |
1da177e4 LT |
565 | goto gp_in_kernel; |
566 | ||
567 | current->thread.error_code = error_code; | |
568 | current->thread.trap_no = 13; | |
569 | force_sig(SIGSEGV, current); | |
570 | return; | |
571 | ||
572 | gp_in_vm86: | |
573 | local_irq_enable(); | |
574 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); | |
575 | return; | |
576 | ||
577 | gp_in_kernel: | |
578 | if (!fixup_exception(regs)) { | |
579 | if (notify_die(DIE_GPF, "general protection fault", regs, | |
580 | error_code, 13, SIGSEGV) == NOTIFY_STOP) | |
581 | return; | |
582 | die("general protection fault", regs, error_code); | |
583 | } | |
584 | } | |
585 | ||
586 | static void mem_parity_error(unsigned char reason, struct pt_regs * regs) | |
587 | { | |
9c107805 DJ |
588 | printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying " |
589 | "to continue\n"); | |
590 | printk(KERN_EMERG "You probably have a hardware problem with your RAM " | |
591 | "chips\n"); | |
1da177e4 LT |
592 | |
593 | /* Clear and disable the memory parity error line. */ | |
594 | clear_mem_error(reason); | |
595 | } | |
596 | ||
597 | static void io_check_error(unsigned char reason, struct pt_regs * regs) | |
598 | { | |
599 | unsigned long i; | |
600 | ||
9c107805 | 601 | printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); |
1da177e4 LT |
602 | show_registers(regs); |
603 | ||
604 | /* Re-enable the IOCK line, wait for a few seconds */ | |
605 | reason = (reason & 0xf) | 8; | |
606 | outb(reason, 0x61); | |
607 | i = 2000; | |
608 | while (--i) udelay(1000); | |
609 | reason &= ~8; | |
610 | outb(reason, 0x61); | |
611 | } | |
612 | ||
613 | static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | |
614 | { | |
615 | #ifdef CONFIG_MCA | |
616 | /* Might actually be able to figure out what the guilty party | |
617 | * is. */ | |
618 | if( MCA_bus ) { | |
619 | mca_handle_nmi(); | |
620 | return; | |
621 | } | |
622 | #endif | |
623 | printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | |
624 | reason, smp_processor_id()); | |
625 | printk("Dazed and confused, but trying to continue\n"); | |
626 | printk("Do you have a strange power saving mode enabled?\n"); | |
627 | } | |
628 | ||
629 | static DEFINE_SPINLOCK(nmi_print_lock); | |
630 | ||
631 | void die_nmi (struct pt_regs *regs, const char *msg) | |
632 | { | |
20c0d2d4 | 633 | if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == |
748f2edb GA |
634 | NOTIFY_STOP) |
635 | return; | |
636 | ||
1da177e4 LT |
637 | spin_lock(&nmi_print_lock); |
638 | /* | |
639 | * We are in trouble anyway, lets at least try | |
640 | * to get a message out. | |
641 | */ | |
642 | bust_spinlocks(1); | |
9c107805 | 643 | printk(KERN_EMERG "%s", msg); |
1da177e4 LT |
644 | printk(" on CPU%d, eip %08lx, registers:\n", |
645 | smp_processor_id(), regs->eip); | |
646 | show_registers(regs); | |
9c107805 | 647 | printk(KERN_EMERG "console shuts up ...\n"); |
1da177e4 LT |
648 | console_silent(); |
649 | spin_unlock(&nmi_print_lock); | |
650 | bust_spinlocks(0); | |
6e274d14 AN |
651 | |
652 | /* If we are in kernel we are probably nested up pretty bad | |
653 | * and might aswell get out now while we still can. | |
654 | */ | |
db753bdf | 655 | if (!user_mode_vm(regs)) { |
6e274d14 AN |
656 | current->thread.trap_no = 2; |
657 | crash_kexec(regs); | |
658 | } | |
659 | ||
1da177e4 LT |
660 | do_exit(SIGSEGV); |
661 | } | |
662 | ||
663 | static void default_do_nmi(struct pt_regs * regs) | |
664 | { | |
665 | unsigned char reason = 0; | |
666 | ||
667 | /* Only the BSP gets external NMIs from the system. */ | |
668 | if (!smp_processor_id()) | |
669 | reason = get_nmi_reason(); | |
670 | ||
671 | if (!(reason & 0xc0)) { | |
20c0d2d4 | 672 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) |
1da177e4 LT |
673 | == NOTIFY_STOP) |
674 | return; | |
675 | #ifdef CONFIG_X86_LOCAL_APIC | |
676 | /* | |
677 | * Ok, so this is none of the documented NMI sources, | |
678 | * so it must be the NMI watchdog. | |
679 | */ | |
680 | if (nmi_watchdog) { | |
681 | nmi_watchdog_tick(regs); | |
682 | return; | |
683 | } | |
684 | #endif | |
685 | unknown_nmi_error(reason, regs); | |
686 | return; | |
687 | } | |
20c0d2d4 | 688 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
1da177e4 LT |
689 | return; |
690 | if (reason & 0x80) | |
691 | mem_parity_error(reason, regs); | |
692 | if (reason & 0x40) | |
693 | io_check_error(reason, regs); | |
694 | /* | |
695 | * Reassert NMI in case it became active meanwhile | |
696 | * as it's edge-triggered. | |
697 | */ | |
698 | reassert_nmi(); | |
699 | } | |
700 | ||
701 | static int dummy_nmi_callback(struct pt_regs * regs, int cpu) | |
702 | { | |
703 | return 0; | |
704 | } | |
705 | ||
706 | static nmi_callback_t nmi_callback = dummy_nmi_callback; | |
707 | ||
708 | fastcall void do_nmi(struct pt_regs * regs, long error_code) | |
709 | { | |
710 | int cpu; | |
711 | ||
712 | nmi_enter(); | |
713 | ||
714 | cpu = smp_processor_id(); | |
f3705136 | 715 | |
1da177e4 LT |
716 | ++nmi_count(cpu); |
717 | ||
19306059 | 718 | if (!rcu_dereference(nmi_callback)(regs, cpu)) |
1da177e4 LT |
719 | default_do_nmi(regs); |
720 | ||
721 | nmi_exit(); | |
722 | } | |
723 | ||
724 | void set_nmi_callback(nmi_callback_t callback) | |
725 | { | |
101f12af | 726 | vmalloc_sync_all(); |
19306059 | 727 | rcu_assign_pointer(nmi_callback, callback); |
1da177e4 | 728 | } |
129f6946 | 729 | EXPORT_SYMBOL_GPL(set_nmi_callback); |
1da177e4 LT |
730 | |
731 | void unset_nmi_callback(void) | |
732 | { | |
733 | nmi_callback = dummy_nmi_callback; | |
734 | } | |
129f6946 | 735 | EXPORT_SYMBOL_GPL(unset_nmi_callback); |
1da177e4 LT |
736 | |
737 | #ifdef CONFIG_KPROBES | |
3d97ae5b | 738 | fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) |
1da177e4 LT |
739 | { |
740 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | |
741 | == NOTIFY_STOP) | |
48c88211 | 742 | return; |
1da177e4 LT |
743 | /* This is an interrupt gate, because kprobes wants interrupts |
744 | disabled. Normal trap handlers don't. */ | |
745 | restore_interrupts(regs); | |
746 | do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); | |
1da177e4 LT |
747 | } |
748 | #endif | |
749 | ||
750 | /* | |
751 | * Our handling of the processor debug registers is non-trivial. | |
752 | * We do not clear them on entry and exit from the kernel. Therefore | |
753 | * it is possible to get a watchpoint trap here from inside the kernel. | |
754 | * However, the code in ./ptrace.c has ensured that the user can | |
755 | * only set watchpoints on userspace addresses. Therefore the in-kernel | |
756 | * watchpoint trap can only occur in code which is reading/writing | |
757 | * from user space. Such code must not hold kernel locks (since it | |
758 | * can equally take a page fault), therefore it is safe to call | |
759 | * force_sig_info even though that claims and releases locks. | |
760 | * | |
761 | * Code in ./signal.c ensures that the debug control register | |
762 | * is restored before we deliver any signal, and therefore that | |
763 | * user code runs with the correct debug control register even though | |
764 | * we clear it here. | |
765 | * | |
766 | * Being careful here means that we don't have to be as careful in a | |
767 | * lot of more complicated places (task switching can be a bit lazy | |
768 | * about restoring all the debug state, and ptrace doesn't have to | |
769 | * find every occurrence of the TF bit that could be saved away even | |
770 | * by user code) | |
771 | */ | |
3d97ae5b | 772 | fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code) |
1da177e4 LT |
773 | { |
774 | unsigned int condition; | |
775 | struct task_struct *tsk = current; | |
776 | ||
1cc6f12e | 777 | get_debugreg(condition, 6); |
1da177e4 LT |
778 | |
779 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, | |
780 | SIGTRAP) == NOTIFY_STOP) | |
781 | return; | |
782 | /* It's safe to allow irq's after DR6 has been saved */ | |
783 | if (regs->eflags & X86_EFLAGS_IF) | |
784 | local_irq_enable(); | |
785 | ||
786 | /* Mask out spurious debug traps due to lazy DR7 setting */ | |
787 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | |
788 | if (!tsk->thread.debugreg[7]) | |
789 | goto clear_dr7; | |
790 | } | |
791 | ||
792 | if (regs->eflags & VM_MASK) | |
793 | goto debug_vm86; | |
794 | ||
795 | /* Save debug status register where ptrace can see it */ | |
796 | tsk->thread.debugreg[6] = condition; | |
797 | ||
798 | /* | |
799 | * Single-stepping through TF: make sure we ignore any events in | |
800 | * kernel space (but re-enable TF when returning to user mode). | |
801 | */ | |
802 | if (condition & DR_STEP) { | |
803 | /* | |
804 | * We already checked v86 mode above, so we can | |
805 | * check for kernel mode by just checking the CPL | |
806 | * of CS. | |
807 | */ | |
717b594a | 808 | if (!user_mode(regs)) |
1da177e4 LT |
809 | goto clear_TF_reenable; |
810 | } | |
811 | ||
812 | /* Ok, finally something we can handle */ | |
813 | send_sigtrap(tsk, regs, error_code); | |
814 | ||
815 | /* Disable additional traps. They'll be re-enabled when | |
816 | * the signal is delivered. | |
817 | */ | |
818 | clear_dr7: | |
1cc6f12e | 819 | set_debugreg(0, 7); |
1da177e4 LT |
820 | return; |
821 | ||
822 | debug_vm86: | |
823 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); | |
824 | return; | |
825 | ||
826 | clear_TF_reenable: | |
827 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | |
828 | regs->eflags &= ~TF_MASK; | |
829 | return; | |
830 | } | |
831 | ||
832 | /* | |
833 | * Note that we play around with the 'TS' bit in an attempt to get | |
834 | * the correct behaviour even in the presence of the asynchronous | |
835 | * IRQ13 behaviour | |
836 | */ | |
837 | void math_error(void __user *eip) | |
838 | { | |
839 | struct task_struct * task; | |
840 | siginfo_t info; | |
841 | unsigned short cwd, swd; | |
842 | ||
843 | /* | |
844 | * Save the info for the exception handler and clear the error. | |
845 | */ | |
846 | task = current; | |
847 | save_init_fpu(task); | |
848 | task->thread.trap_no = 16; | |
849 | task->thread.error_code = 0; | |
850 | info.si_signo = SIGFPE; | |
851 | info.si_errno = 0; | |
852 | info.si_code = __SI_FAULT; | |
853 | info.si_addr = eip; | |
854 | /* | |
855 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
856 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
857 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
858 | * fault bit. We should only be taking one exception at a time, | |
859 | * so if this combination doesn't produce any single exception, | |
860 | * then we have a bad program that isn't syncronizing its FPU usage | |
861 | * and it will suffer the consequences since we won't be able to | |
862 | * fully reproduce the context of the exception | |
863 | */ | |
864 | cwd = get_fpu_cwd(task); | |
865 | swd = get_fpu_swd(task); | |
b1daec30 | 866 | switch (swd & ~cwd & 0x3f) { |
33333373 CE |
867 | case 0x000: /* No unmasked exception */ |
868 | return; | |
869 | default: /* Multiple exceptions */ | |
1da177e4 LT |
870 | break; |
871 | case 0x001: /* Invalid Op */ | |
b1daec30 CE |
872 | /* |
873 | * swd & 0x240 == 0x040: Stack Underflow | |
874 | * swd & 0x240 == 0x240: Stack Overflow | |
875 | * User must clear the SF bit (0x40) if set | |
876 | */ | |
1da177e4 | 877 | info.si_code = FPE_FLTINV; |
1da177e4 LT |
878 | break; |
879 | case 0x002: /* Denormalize */ | |
880 | case 0x010: /* Underflow */ | |
881 | info.si_code = FPE_FLTUND; | |
882 | break; | |
883 | case 0x004: /* Zero Divide */ | |
884 | info.si_code = FPE_FLTDIV; | |
885 | break; | |
886 | case 0x008: /* Overflow */ | |
887 | info.si_code = FPE_FLTOVF; | |
888 | break; | |
889 | case 0x020: /* Precision */ | |
890 | info.si_code = FPE_FLTRES; | |
891 | break; | |
892 | } | |
893 | force_sig_info(SIGFPE, &info, task); | |
894 | } | |
895 | ||
896 | fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code) | |
897 | { | |
898 | ignore_fpu_irq = 1; | |
899 | math_error((void __user *)regs->eip); | |
900 | } | |
901 | ||
902 | static void simd_math_error(void __user *eip) | |
903 | { | |
904 | struct task_struct * task; | |
905 | siginfo_t info; | |
906 | unsigned short mxcsr; | |
907 | ||
908 | /* | |
909 | * Save the info for the exception handler and clear the error. | |
910 | */ | |
911 | task = current; | |
912 | save_init_fpu(task); | |
913 | task->thread.trap_no = 19; | |
914 | task->thread.error_code = 0; | |
915 | info.si_signo = SIGFPE; | |
916 | info.si_errno = 0; | |
917 | info.si_code = __SI_FAULT; | |
918 | info.si_addr = eip; | |
919 | /* | |
920 | * The SIMD FPU exceptions are handled a little differently, as there | |
921 | * is only a single status/control register. Thus, to determine which | |
922 | * unmasked exception was caught we must mask the exception mask bits | |
923 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
924 | */ | |
925 | mxcsr = get_fpu_mxcsr(task); | |
926 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | |
927 | case 0x000: | |
928 | default: | |
929 | break; | |
930 | case 0x001: /* Invalid Op */ | |
931 | info.si_code = FPE_FLTINV; | |
932 | break; | |
933 | case 0x002: /* Denormalize */ | |
934 | case 0x010: /* Underflow */ | |
935 | info.si_code = FPE_FLTUND; | |
936 | break; | |
937 | case 0x004: /* Zero Divide */ | |
938 | info.si_code = FPE_FLTDIV; | |
939 | break; | |
940 | case 0x008: /* Overflow */ | |
941 | info.si_code = FPE_FLTOVF; | |
942 | break; | |
943 | case 0x020: /* Precision */ | |
944 | info.si_code = FPE_FLTRES; | |
945 | break; | |
946 | } | |
947 | force_sig_info(SIGFPE, &info, task); | |
948 | } | |
949 | ||
950 | fastcall void do_simd_coprocessor_error(struct pt_regs * regs, | |
951 | long error_code) | |
952 | { | |
953 | if (cpu_has_xmm) { | |
954 | /* Handle SIMD FPU exceptions on PIII+ processors. */ | |
955 | ignore_fpu_irq = 1; | |
956 | simd_math_error((void __user *)regs->eip); | |
957 | } else { | |
958 | /* | |
959 | * Handle strange cache flush from user space exception | |
960 | * in all other cases. This is undocumented behaviour. | |
961 | */ | |
962 | if (regs->eflags & VM_MASK) { | |
963 | handle_vm86_fault((struct kernel_vm86_regs *)regs, | |
964 | error_code); | |
965 | return; | |
966 | } | |
1da177e4 LT |
967 | current->thread.trap_no = 19; |
968 | current->thread.error_code = error_code; | |
4f339ecb | 969 | die_if_kernel("cache flush denied", regs, error_code); |
1da177e4 LT |
970 | force_sig(SIGSEGV, current); |
971 | } | |
972 | } | |
973 | ||
974 | fastcall void do_spurious_interrupt_bug(struct pt_regs * regs, | |
975 | long error_code) | |
976 | { | |
977 | #if 0 | |
978 | /* No need to warn about this any longer. */ | |
979 | printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); | |
980 | #endif | |
981 | } | |
982 | ||
983 | fastcall void setup_x86_bogus_stack(unsigned char * stk) | |
984 | { | |
985 | unsigned long *switch16_ptr, *switch32_ptr; | |
986 | struct pt_regs *regs; | |
987 | unsigned long stack_top, stack_bot; | |
988 | unsigned short iret_frame16_off; | |
989 | int cpu = smp_processor_id(); | |
990 | /* reserve the space on 32bit stack for the magic switch16 pointer */ | |
991 | memmove(stk, stk + 8, sizeof(struct pt_regs)); | |
992 | switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs)); | |
993 | regs = (struct pt_regs *)stk; | |
994 | /* now the switch32 on 16bit stack */ | |
995 | stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu); | |
996 | stack_top = stack_bot + CPU_16BIT_STACK_SIZE; | |
997 | switch32_ptr = (unsigned long *)(stack_top - 8); | |
998 | iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20; | |
999 | /* copy iret frame on 16bit stack */ | |
1000 | memcpy((void *)(stack_bot + iret_frame16_off), ®s->eip, 20); | |
1001 | /* fill in the switch pointers */ | |
1002 | switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off; | |
1003 | switch16_ptr[1] = __ESPFIX_SS; | |
1004 | switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) + | |
1005 | 8 - CPU_16BIT_STACK_SIZE; | |
1006 | switch32_ptr[1] = __KERNEL_DS; | |
1007 | } | |
1008 | ||
1009 | fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp) | |
1010 | { | |
1011 | unsigned long *switch32_ptr; | |
1012 | unsigned char *stack16, *stack32; | |
1013 | unsigned long stack_top, stack_bot; | |
1014 | int len; | |
1015 | int cpu = smp_processor_id(); | |
1016 | stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu); | |
1017 | stack_top = stack_bot + CPU_16BIT_STACK_SIZE; | |
1018 | switch32_ptr = (unsigned long *)(stack_top - 8); | |
1019 | /* copy the data from 16bit stack to 32bit stack */ | |
1020 | len = CPU_16BIT_STACK_SIZE - 8 - sp; | |
1021 | stack16 = (unsigned char *)(stack_bot + sp); | |
1022 | stack32 = (unsigned char *) | |
1023 | (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len); | |
1024 | memcpy(stack32, stack16, len); | |
1025 | return stack32; | |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * 'math_state_restore()' saves the current math information in the | |
1030 | * old math state array, and gets the new ones from the current task | |
1031 | * | |
1032 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | |
1033 | * Don't touch unless you *really* know how it works. | |
1034 | * | |
1035 | * Must be called with kernel preemption disabled (in this case, | |
1036 | * local interrupts are disabled at the call-site in entry.S). | |
1037 | */ | |
1038 | asmlinkage void math_state_restore(struct pt_regs regs) | |
1039 | { | |
1040 | struct thread_info *thread = current_thread_info(); | |
1041 | struct task_struct *tsk = thread->task; | |
1042 | ||
1043 | clts(); /* Allow maths ops (or we recurse) */ | |
1044 | if (!tsk_used_math(tsk)) | |
1045 | init_fpu(tsk); | |
1046 | restore_fpu(tsk); | |
1047 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | |
1048 | } | |
1049 | ||
1050 | #ifndef CONFIG_MATH_EMULATION | |
1051 | ||
1052 | asmlinkage void math_emulate(long arg) | |
1053 | { | |
9c107805 DJ |
1054 | printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n"); |
1055 | printk(KERN_EMERG "killing %s.\n",current->comm); | |
1da177e4 LT |
1056 | force_sig(SIGFPE,current); |
1057 | schedule(); | |
1058 | } | |
1059 | ||
1060 | #endif /* CONFIG_MATH_EMULATION */ | |
1061 | ||
1062 | #ifdef CONFIG_X86_F00F_BUG | |
1063 | void __init trap_init_f00f_bug(void) | |
1064 | { | |
1065 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | |
1066 | ||
1067 | /* | |
1068 | * Update the IDT descriptor and reload the IDT so that | |
1069 | * it uses the read-only mapped virtual address. | |
1070 | */ | |
1071 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | |
4d37e7e3 | 1072 | load_idt(&idt_descr); |
1da177e4 LT |
1073 | } |
1074 | #endif | |
1075 | ||
1076 | #define _set_gate(gate_addr,type,dpl,addr,seg) \ | |
1077 | do { \ | |
1078 | int __d0, __d1; \ | |
1079 | __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ | |
1080 | "movw %4,%%dx\n\t" \ | |
1081 | "movl %%eax,%0\n\t" \ | |
1082 | "movl %%edx,%1" \ | |
1083 | :"=m" (*((long *) (gate_addr))), \ | |
1084 | "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ | |
1085 | :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ | |
1086 | "3" ((char *) (addr)),"2" ((seg) << 16)); \ | |
1087 | } while (0) | |
1088 | ||
1089 | ||
1090 | /* | |
1091 | * This needs to use 'idt_table' rather than 'idt', and | |
1092 | * thus use the _nonmapped_ version of the IDT, as the | |
1093 | * Pentium F0 0F bugfix can have resulted in the mapped | |
1094 | * IDT being write-protected. | |
1095 | */ | |
1096 | void set_intr_gate(unsigned int n, void *addr) | |
1097 | { | |
1098 | _set_gate(idt_table+n,14,0,addr,__KERNEL_CS); | |
1099 | } | |
1100 | ||
1101 | /* | |
1102 | * This routine sets up an interrupt gate at directory privilege level 3. | |
1103 | */ | |
1104 | static inline void set_system_intr_gate(unsigned int n, void *addr) | |
1105 | { | |
1106 | _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS); | |
1107 | } | |
1108 | ||
1109 | static void __init set_trap_gate(unsigned int n, void *addr) | |
1110 | { | |
1111 | _set_gate(idt_table+n,15,0,addr,__KERNEL_CS); | |
1112 | } | |
1113 | ||
1114 | static void __init set_system_gate(unsigned int n, void *addr) | |
1115 | { | |
1116 | _set_gate(idt_table+n,15,3,addr,__KERNEL_CS); | |
1117 | } | |
1118 | ||
1119 | static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) | |
1120 | { | |
1121 | _set_gate(idt_table+n,5,0,0,(gdt_entry<<3)); | |
1122 | } | |
1123 | ||
1124 | ||
1125 | void __init trap_init(void) | |
1126 | { | |
1127 | #ifdef CONFIG_EISA | |
1128 | void __iomem *p = ioremap(0x0FFFD9, 4); | |
1129 | if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { | |
1130 | EISA_bus = 1; | |
1131 | } | |
1132 | iounmap(p); | |
1133 | #endif | |
1134 | ||
1135 | #ifdef CONFIG_X86_LOCAL_APIC | |
1136 | init_apic_mappings(); | |
1137 | #endif | |
1138 | ||
1139 | set_trap_gate(0,÷_error); | |
1140 | set_intr_gate(1,&debug); | |
1141 | set_intr_gate(2,&nmi); | |
eb05c324 | 1142 | set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ |
1da177e4 | 1143 | set_system_gate(4,&overflow); |
eb05c324 | 1144 | set_trap_gate(5,&bounds); |
1da177e4 LT |
1145 | set_trap_gate(6,&invalid_op); |
1146 | set_trap_gate(7,&device_not_available); | |
1147 | set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); | |
1148 | set_trap_gate(9,&coprocessor_segment_overrun); | |
1149 | set_trap_gate(10,&invalid_TSS); | |
1150 | set_trap_gate(11,&segment_not_present); | |
1151 | set_trap_gate(12,&stack_segment); | |
1152 | set_trap_gate(13,&general_protection); | |
1153 | set_intr_gate(14,&page_fault); | |
1154 | set_trap_gate(15,&spurious_interrupt_bug); | |
1155 | set_trap_gate(16,&coprocessor_error); | |
1156 | set_trap_gate(17,&alignment_check); | |
1157 | #ifdef CONFIG_X86_MCE | |
1158 | set_trap_gate(18,&machine_check); | |
1159 | #endif | |
1160 | set_trap_gate(19,&simd_coprocessor_error); | |
1161 | ||
d43c6e80 JB |
1162 | if (cpu_has_fxsr) { |
1163 | /* | |
1164 | * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. | |
1165 | * Generates a compile-time "error: zero width for bit-field" if | |
1166 | * the alignment is wrong. | |
1167 | */ | |
1168 | struct fxsrAlignAssert { | |
1169 | int _:!(offsetof(struct task_struct, | |
1170 | thread.i387.fxsave) & 15); | |
1171 | }; | |
1172 | ||
1173 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | |
1174 | set_in_cr4(X86_CR4_OSFXSR); | |
1175 | printk("done.\n"); | |
1176 | } | |
1177 | if (cpu_has_xmm) { | |
1178 | printk(KERN_INFO "Enabling unmasked SIMD FPU exception " | |
1179 | "support... "); | |
1180 | set_in_cr4(X86_CR4_OSXMMEXCPT); | |
1181 | printk("done.\n"); | |
1182 | } | |
1183 | ||
1da177e4 LT |
1184 | set_system_gate(SYSCALL_VECTOR,&system_call); |
1185 | ||
1186 | /* | |
1187 | * Should be a barrier for any external CPU state. | |
1188 | */ | |
1189 | cpu_init(); | |
1190 | ||
1191 | trap_init_hook(); | |
1192 | } | |
1193 | ||
1194 | static int __init kstack_setup(char *s) | |
1195 | { | |
1196 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); | |
1197 | return 0; | |
1198 | } | |
1199 | __setup("kstack=", kstack_setup); |