]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
a8c1be9d | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
1da177e4 LT |
4 | * |
5 | * Pentium III FXSR, SSE support | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
8 | ||
9 | /* | |
c1d518c8 | 10 | * Handle hardware traps and faults. |
1da177e4 | 11 | */ |
b5964405 IM |
12 | #include <linux/interrupt.h> |
13 | #include <linux/kallsyms.h> | |
14 | #include <linux/spinlock.h> | |
b5964405 IM |
15 | #include <linux/kprobes.h> |
16 | #include <linux/uaccess.h> | |
b5964405 | 17 | #include <linux/kdebug.h> |
f503b5ae | 18 | #include <linux/kgdb.h> |
1da177e4 | 19 | #include <linux/kernel.h> |
b5964405 IM |
20 | #include <linux/module.h> |
21 | #include <linux/ptrace.h> | |
1da177e4 | 22 | #include <linux/string.h> |
b5964405 | 23 | #include <linux/delay.h> |
1da177e4 | 24 | #include <linux/errno.h> |
b5964405 IM |
25 | #include <linux/kexec.h> |
26 | #include <linux/sched.h> | |
1da177e4 | 27 | #include <linux/timer.h> |
1da177e4 | 28 | #include <linux/init.h> |
91768d6c | 29 | #include <linux/bug.h> |
b5964405 IM |
30 | #include <linux/nmi.h> |
31 | #include <linux/mm.h> | |
c1d518c8 AH |
32 | #include <linux/smp.h> |
33 | #include <linux/io.h> | |
1da177e4 LT |
34 | |
35 | #ifdef CONFIG_EISA | |
36 | #include <linux/ioport.h> | |
37 | #include <linux/eisa.h> | |
38 | #endif | |
39 | ||
40 | #ifdef CONFIG_MCA | |
41 | #include <linux/mca.h> | |
42 | #endif | |
43 | ||
c0d12172 DJ |
44 | #if defined(CONFIG_EDAC) |
45 | #include <linux/edac.h> | |
46 | #endif | |
47 | ||
f8561296 | 48 | #include <asm/kmemcheck.h> |
b5964405 | 49 | #include <asm/stacktrace.h> |
1da177e4 | 50 | #include <asm/processor.h> |
1da177e4 | 51 | #include <asm/debugreg.h> |
60063497 | 52 | #include <linux/atomic.h> |
b5964405 | 53 | #include <asm/system.h> |
c1d518c8 | 54 | #include <asm/traps.h> |
1da177e4 LT |
55 | #include <asm/desc.h> |
56 | #include <asm/i387.h> | |
9e55e44e | 57 | #include <asm/mce.h> |
c1d518c8 | 58 | |
1164dd00 | 59 | #include <asm/mach_traps.h> |
c1d518c8 | 60 | |
081f75bb | 61 | #ifdef CONFIG_X86_64 |
428cf902 | 62 | #include <asm/x86_init.h> |
081f75bb AH |
63 | #include <asm/pgalloc.h> |
64 | #include <asm/proto.h> | |
081f75bb | 65 | #else |
c1d518c8 | 66 | #include <asm/processor-flags.h> |
8e6dafd6 | 67 | #include <asm/setup.h> |
1da177e4 | 68 | |
1da177e4 LT |
69 | asmlinkage int system_call(void); |
70 | ||
1da177e4 | 71 | /* Do we ignore FPU interrupts ? */ |
b5964405 | 72 | char ignore_fpu_irq; |
1da177e4 LT |
73 | |
74 | /* | |
75 | * The IDT has to be page-aligned to simplify the Pentium | |
07e81d61 | 76 | * F0 0F bug workaround. |
1da177e4 | 77 | */ |
07e81d61 | 78 | gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; |
081f75bb | 79 | #endif |
1da177e4 | 80 | |
b77b881f YL |
81 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
82 | EXPORT_SYMBOL_GPL(used_vectors); | |
83 | ||
badc7652 | 84 | static int ignore_nmis; |
e041c683 | 85 | |
5dc30558 | 86 | int unknown_nmi_panic; |
ab846f13 DZ |
87 | /* |
88 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | |
89 | * only be used in NMI handler. | |
90 | */ | |
91 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | |
5dc30558 | 92 | |
762db434 AH |
93 | static inline void conditional_sti(struct pt_regs *regs) |
94 | { | |
95 | if (regs->flags & X86_EFLAGS_IF) | |
96 | local_irq_enable(); | |
97 | } | |
98 | ||
3d2a71a5 AH |
99 | static inline void preempt_conditional_sti(struct pt_regs *regs) |
100 | { | |
101 | inc_preempt_count(); | |
102 | if (regs->flags & X86_EFLAGS_IF) | |
103 | local_irq_enable(); | |
104 | } | |
105 | ||
be716615 TG |
106 | static inline void conditional_cli(struct pt_regs *regs) |
107 | { | |
108 | if (regs->flags & X86_EFLAGS_IF) | |
109 | local_irq_disable(); | |
110 | } | |
111 | ||
3d2a71a5 AH |
112 | static inline void preempt_conditional_cli(struct pt_regs *regs) |
113 | { | |
114 | if (regs->flags & X86_EFLAGS_IF) | |
115 | local_irq_disable(); | |
116 | dec_preempt_count(); | |
117 | } | |
118 | ||
b5964405 | 119 | static void __kprobes |
3c1326f8 | 120 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
b5964405 | 121 | long error_code, siginfo_t *info) |
1da177e4 | 122 | { |
4f339ecb | 123 | struct task_struct *tsk = current; |
4f339ecb | 124 | |
081f75bb | 125 | #ifdef CONFIG_X86_32 |
6b6891f9 | 126 | if (regs->flags & X86_VM_MASK) { |
3c1326f8 AH |
127 | /* |
128 | * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. | |
129 | * On nmi (interrupt 2), do_trap should not be called. | |
130 | */ | |
131 | if (trapnr < 6) | |
1da177e4 LT |
132 | goto vm86_trap; |
133 | goto trap_signal; | |
134 | } | |
081f75bb | 135 | #endif |
1da177e4 | 136 | |
717b594a | 137 | if (!user_mode(regs)) |
1da177e4 LT |
138 | goto kernel_trap; |
139 | ||
081f75bb | 140 | #ifdef CONFIG_X86_32 |
b5964405 | 141 | trap_signal: |
081f75bb | 142 | #endif |
b5964405 IM |
143 | /* |
144 | * We want error_code and trap_no set for userspace faults and | |
145 | * kernelspace faults which result in die(), but not | |
146 | * kernelspace faults which are fixed up. die() gives the | |
147 | * process no chance to handle the signal and notice the | |
148 | * kernel fault information, so that won't result in polluting | |
149 | * the information about previously queued, but not yet | |
150 | * delivered, faults. See also do_general_protection below. | |
151 | */ | |
152 | tsk->thread.error_code = error_code; | |
153 | tsk->thread.trap_no = trapnr; | |
d1895183 | 154 | |
081f75bb AH |
155 | #ifdef CONFIG_X86_64 |
156 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && | |
157 | printk_ratelimit()) { | |
158 | printk(KERN_INFO | |
159 | "%s[%d] trap %s ip:%lx sp:%lx error:%lx", | |
160 | tsk->comm, tsk->pid, str, | |
161 | regs->ip, regs->sp, error_code); | |
162 | print_vma_addr(" in ", regs->ip); | |
163 | printk("\n"); | |
164 | } | |
165 | #endif | |
166 | ||
b5964405 IM |
167 | if (info) |
168 | force_sig_info(signr, info, tsk); | |
169 | else | |
170 | force_sig(signr, tsk); | |
171 | return; | |
1da177e4 | 172 | |
b5964405 IM |
173 | kernel_trap: |
174 | if (!fixup_exception(regs)) { | |
175 | tsk->thread.error_code = error_code; | |
176 | tsk->thread.trap_no = trapnr; | |
177 | die(str, regs, error_code); | |
1da177e4 | 178 | } |
b5964405 | 179 | return; |
1da177e4 | 180 | |
081f75bb | 181 | #ifdef CONFIG_X86_32 |
b5964405 IM |
182 | vm86_trap: |
183 | if (handle_vm86_trap((struct kernel_vm86_regs *) regs, | |
184 | error_code, trapnr)) | |
185 | goto trap_signal; | |
186 | return; | |
081f75bb | 187 | #endif |
1da177e4 LT |
188 | } |
189 | ||
b5964405 | 190 | #define DO_ERROR(trapnr, signr, str, name) \ |
e407d620 | 191 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
b5964405 IM |
192 | { \ |
193 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | |
a8c1be9d | 194 | == NOTIFY_STOP) \ |
b5964405 | 195 | return; \ |
61aef7d2 | 196 | conditional_sti(regs); \ |
3c1326f8 | 197 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
1da177e4 LT |
198 | } |
199 | ||
3c1326f8 | 200 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
e407d620 | 201 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
b5964405 IM |
202 | { \ |
203 | siginfo_t info; \ | |
204 | info.si_signo = signr; \ | |
205 | info.si_errno = 0; \ | |
206 | info.si_code = sicode; \ | |
207 | info.si_addr = (void __user *)siaddr; \ | |
b5964405 | 208 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
a8c1be9d | 209 | == NOTIFY_STOP) \ |
b5964405 | 210 | return; \ |
61aef7d2 | 211 | conditional_sti(regs); \ |
3c1326f8 | 212 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
1da177e4 LT |
213 | } |
214 | ||
3c1326f8 AH |
215 | DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) |
216 | DO_ERROR(4, SIGSEGV, "overflow", overflow) | |
217 | DO_ERROR(5, SIGSEGV, "bounds", bounds) | |
218 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) | |
51bc1ed6 | 219 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
6bf77bf9 | 220 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
36d936c7 | 221 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
081f75bb | 222 | #ifdef CONFIG_X86_32 |
f5ca8187 | 223 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) |
081f75bb | 224 | #endif |
3c1326f8 | 225 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
1da177e4 | 226 | |
081f75bb AH |
227 | #ifdef CONFIG_X86_64 |
228 | /* Runs on IST stack */ | |
229 | dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) | |
230 | { | |
231 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | |
232 | 12, SIGBUS) == NOTIFY_STOP) | |
233 | return; | |
234 | preempt_conditional_sti(regs); | |
235 | do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); | |
236 | preempt_conditional_cli(regs); | |
237 | } | |
238 | ||
239 | dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) | |
240 | { | |
241 | static const char str[] = "double fault"; | |
242 | struct task_struct *tsk = current; | |
243 | ||
244 | /* Return not checked because double check cannot be ignored */ | |
245 | notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); | |
246 | ||
247 | tsk->thread.error_code = error_code; | |
248 | tsk->thread.trap_no = 8; | |
249 | ||
bd8b96df IM |
250 | /* |
251 | * This is always a kernel trap and never fixable (and thus must | |
252 | * never return). | |
253 | */ | |
081f75bb AH |
254 | for (;;) |
255 | die(str, regs, error_code); | |
256 | } | |
257 | #endif | |
258 | ||
e407d620 | 259 | dotraplinkage void __kprobes |
13485ab5 | 260 | do_general_protection(struct pt_regs *regs, long error_code) |
1da177e4 | 261 | { |
13485ab5 | 262 | struct task_struct *tsk; |
b5964405 | 263 | |
c6df0d71 AH |
264 | conditional_sti(regs); |
265 | ||
081f75bb | 266 | #ifdef CONFIG_X86_32 |
6b6891f9 | 267 | if (regs->flags & X86_VM_MASK) |
1da177e4 | 268 | goto gp_in_vm86; |
081f75bb | 269 | #endif |
1da177e4 | 270 | |
13485ab5 | 271 | tsk = current; |
717b594a | 272 | if (!user_mode(regs)) |
1da177e4 LT |
273 | goto gp_in_kernel; |
274 | ||
13485ab5 AH |
275 | tsk->thread.error_code = error_code; |
276 | tsk->thread.trap_no = 13; | |
b5964405 | 277 | |
13485ab5 AH |
278 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
279 | printk_ratelimit()) { | |
abd4f750 | 280 | printk(KERN_INFO |
13485ab5 AH |
281 | "%s[%d] general protection ip:%lx sp:%lx error:%lx", |
282 | tsk->comm, task_pid_nr(tsk), | |
283 | regs->ip, regs->sp, error_code); | |
03252919 AK |
284 | print_vma_addr(" in ", regs->ip); |
285 | printk("\n"); | |
286 | } | |
abd4f750 | 287 | |
13485ab5 | 288 | force_sig(SIGSEGV, tsk); |
1da177e4 LT |
289 | return; |
290 | ||
081f75bb | 291 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
292 | gp_in_vm86: |
293 | local_irq_enable(); | |
294 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); | |
295 | return; | |
081f75bb | 296 | #endif |
1da177e4 LT |
297 | |
298 | gp_in_kernel: | |
13485ab5 AH |
299 | if (fixup_exception(regs)) |
300 | return; | |
301 | ||
302 | tsk->thread.error_code = error_code; | |
303 | tsk->thread.trap_no = 13; | |
304 | if (notify_die(DIE_GPF, "general protection fault", regs, | |
1da177e4 | 305 | error_code, 13, SIGSEGV) == NOTIFY_STOP) |
13485ab5 AH |
306 | return; |
307 | die("general protection fault", regs, error_code); | |
1da177e4 LT |
308 | } |
309 | ||
5dc30558 DZ |
310 | static int __init setup_unknown_nmi_panic(char *str) |
311 | { | |
312 | unknown_nmi_panic = 1; | |
313 | return 1; | |
314 | } | |
315 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | |
316 | ||
5deb45e3 | 317 | static notrace __kprobes void |
1c7b74d4 | 318 | pci_serr_error(unsigned char reason, struct pt_regs *regs) |
1da177e4 | 319 | { |
1c7b74d4 HY |
320 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", |
321 | reason, smp_processor_id()); | |
c0d12172 | 322 | |
1c7b74d4 HY |
323 | /* |
324 | * On some machines, PCI SERR line is used to report memory | |
325 | * errors. EDAC makes use of it. | |
326 | */ | |
c0d12172 | 327 | #if defined(CONFIG_EDAC) |
b5964405 | 328 | if (edac_handler_set()) { |
c0d12172 DJ |
329 | edac_atomic_assert_error(); |
330 | return; | |
331 | } | |
332 | #endif | |
333 | ||
8da5adda | 334 | if (panic_on_unrecovered_nmi) |
b5964405 | 335 | panic("NMI: Not continuing"); |
1da177e4 | 336 | |
1c7b74d4 | 337 | pr_emerg("Dazed and confused, but trying to continue\n"); |
1da177e4 | 338 | |
1c7b74d4 HY |
339 | /* Clear and disable the PCI SERR error line. */ |
340 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; | |
341 | outb(reason, NMI_REASON_PORT); | |
1da177e4 LT |
342 | } |
343 | ||
5deb45e3 | 344 | static notrace __kprobes void |
b5964405 | 345 | io_check_error(unsigned char reason, struct pt_regs *regs) |
1da177e4 LT |
346 | { |
347 | unsigned long i; | |
348 | ||
1c7b74d4 HY |
349 | pr_emerg( |
350 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | |
351 | reason, smp_processor_id()); | |
1da177e4 LT |
352 | show_registers(regs); |
353 | ||
5211a242 KG |
354 | if (panic_on_io_nmi) |
355 | panic("NMI IOCK error: Not continuing"); | |
356 | ||
1da177e4 | 357 | /* Re-enable the IOCK line, wait for a few seconds */ |
1c7b74d4 HY |
358 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; |
359 | outb(reason, NMI_REASON_PORT); | |
b5964405 | 360 | |
74d91e3c HY |
361 | i = 20000; |
362 | while (--i) { | |
363 | touch_nmi_watchdog(); | |
364 | udelay(100); | |
365 | } | |
b5964405 | 366 | |
1c7b74d4 HY |
367 | reason &= ~NMI_REASON_CLEAR_IOCHK; |
368 | outb(reason, NMI_REASON_PORT); | |
1da177e4 LT |
369 | } |
370 | ||
5deb45e3 | 371 | static notrace __kprobes void |
b5964405 | 372 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) |
1da177e4 | 373 | { |
c1d518c8 AH |
374 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == |
375 | NOTIFY_STOP) | |
d3597524 | 376 | return; |
1da177e4 | 377 | #ifdef CONFIG_MCA |
b5964405 IM |
378 | /* |
379 | * Might actually be able to figure out what the guilty party | |
380 | * is: | |
381 | */ | |
382 | if (MCA_bus) { | |
1da177e4 LT |
383 | mca_handle_nmi(); |
384 | return; | |
385 | } | |
386 | #endif | |
1c7b74d4 HY |
387 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", |
388 | reason, smp_processor_id()); | |
b5964405 | 389 | |
1c7b74d4 | 390 | pr_emerg("Do you have a strange power saving mode enabled?\n"); |
5dc30558 | 391 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) |
b5964405 | 392 | panic("NMI: Not continuing"); |
8da5adda | 393 | |
1c7b74d4 | 394 | pr_emerg("Dazed and confused, but trying to continue\n"); |
1da177e4 LT |
395 | } |
396 | ||
5deb45e3 | 397 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) |
1da177e4 LT |
398 | { |
399 | unsigned char reason = 0; | |
abd34807 | 400 | |
c410b830 DZ |
401 | /* |
402 | * CPU-specific NMI must be processed before non-CPU-specific | |
403 | * NMI, otherwise we may lose it, because the CPU-specific | |
404 | * NMI can not be detected/processed on other CPUs. | |
405 | */ | |
406 | if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) | |
407 | return; | |
408 | ||
ab846f13 DZ |
409 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ |
410 | raw_spin_lock(&nmi_reason_lock); | |
411 | reason = get_nmi_reason(); | |
b5964405 | 412 | |
f2fd4395 DZ |
413 | if (reason & NMI_REASON_MASK) { |
414 | if (reason & NMI_REASON_SERR) | |
415 | pci_serr_error(reason, regs); | |
416 | else if (reason & NMI_REASON_IOCHK) | |
417 | io_check_error(reason, regs); | |
418 | #ifdef CONFIG_X86_32 | |
419 | /* | |
420 | * Reassert NMI in case it became active | |
421 | * meanwhile as it's edge-triggered: | |
422 | */ | |
423 | reassert_nmi(); | |
424 | #endif | |
ab846f13 | 425 | raw_spin_unlock(&nmi_reason_lock); |
1da177e4 LT |
426 | return; |
427 | } | |
ab846f13 | 428 | raw_spin_unlock(&nmi_reason_lock); |
f2fd4395 DZ |
429 | |
430 | unknown_nmi_error(reason, regs); | |
1da177e4 LT |
431 | } |
432 | ||
e407d620 AH |
433 | dotraplinkage notrace __kprobes void |
434 | do_nmi(struct pt_regs *regs, long error_code) | |
1da177e4 | 435 | { |
1da177e4 LT |
436 | nmi_enter(); |
437 | ||
915b0d01 | 438 | inc_irq_stat(__nmi_count); |
1da177e4 | 439 | |
8f4e956b AK |
440 | if (!ignore_nmis) |
441 | default_do_nmi(regs); | |
1da177e4 LT |
442 | |
443 | nmi_exit(); | |
444 | } | |
445 | ||
8f4e956b AK |
446 | void stop_nmi(void) |
447 | { | |
8f4e956b AK |
448 | ignore_nmis++; |
449 | } | |
450 | ||
451 | void restart_nmi(void) | |
452 | { | |
453 | ignore_nmis--; | |
8f4e956b AK |
454 | } |
455 | ||
c1d518c8 | 456 | /* May run on IST stack. */ |
e407d620 | 457 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
1da177e4 | 458 | { |
f503b5ae JW |
459 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
460 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | |
461 | == NOTIFY_STOP) | |
462 | return; | |
463 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ | |
b94da1e4 | 464 | #ifdef CONFIG_KPROBES |
1da177e4 LT |
465 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) |
466 | == NOTIFY_STOP) | |
48c88211 | 467 | return; |
b94da1e4 AH |
468 | #else |
469 | if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP) | |
470 | == NOTIFY_STOP) | |
471 | return; | |
472 | #endif | |
b5964405 | 473 | |
4915a35e | 474 | preempt_conditional_sti(regs); |
3c1326f8 | 475 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); |
4915a35e | 476 | preempt_conditional_cli(regs); |
1da177e4 | 477 | } |
1da177e4 | 478 | |
081f75bb | 479 | #ifdef CONFIG_X86_64 |
bd8b96df IM |
480 | /* |
481 | * Help handler running on IST stack to switch back to user stack | |
482 | * for scheduling or signal handling. The actual stack switch is done in | |
483 | * entry.S | |
484 | */ | |
081f75bb AH |
485 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) |
486 | { | |
487 | struct pt_regs *regs = eregs; | |
488 | /* Did already sync */ | |
489 | if (eregs == (struct pt_regs *)eregs->sp) | |
490 | ; | |
491 | /* Exception from user space */ | |
492 | else if (user_mode(eregs)) | |
493 | regs = task_pt_regs(current); | |
bd8b96df IM |
494 | /* |
495 | * Exception from kernel and interrupts are enabled. Move to | |
496 | * kernel process stack. | |
497 | */ | |
081f75bb AH |
498 | else if (eregs->flags & X86_EFLAGS_IF) |
499 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | |
500 | if (eregs != regs) | |
501 | *regs = *eregs; | |
502 | return regs; | |
503 | } | |
504 | #endif | |
505 | ||
1da177e4 LT |
506 | /* |
507 | * Our handling of the processor debug registers is non-trivial. | |
508 | * We do not clear them on entry and exit from the kernel. Therefore | |
509 | * it is possible to get a watchpoint trap here from inside the kernel. | |
510 | * However, the code in ./ptrace.c has ensured that the user can | |
511 | * only set watchpoints on userspace addresses. Therefore the in-kernel | |
512 | * watchpoint trap can only occur in code which is reading/writing | |
513 | * from user space. Such code must not hold kernel locks (since it | |
514 | * can equally take a page fault), therefore it is safe to call | |
515 | * force_sig_info even though that claims and releases locks. | |
b5964405 | 516 | * |
1da177e4 LT |
517 | * Code in ./signal.c ensures that the debug control register |
518 | * is restored before we deliver any signal, and therefore that | |
519 | * user code runs with the correct debug control register even though | |
520 | * we clear it here. | |
521 | * | |
522 | * Being careful here means that we don't have to be as careful in a | |
523 | * lot of more complicated places (task switching can be a bit lazy | |
524 | * about restoring all the debug state, and ptrace doesn't have to | |
525 | * find every occurrence of the TF bit that could be saved away even | |
526 | * by user code) | |
c1d518c8 AH |
527 | * |
528 | * May run on IST stack. | |
1da177e4 | 529 | */ |
e407d620 | 530 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) |
1da177e4 | 531 | { |
1da177e4 | 532 | struct task_struct *tsk = current; |
a1e80faf | 533 | int user_icebp = 0; |
08d68323 | 534 | unsigned long dr6; |
da654b74 | 535 | int si_code; |
1da177e4 | 536 | |
08d68323 | 537 | get_debugreg(dr6, 6); |
1da177e4 | 538 | |
40f9249a P |
539 | /* Filter out all the reserved bits which are preset to 1 */ |
540 | dr6 &= ~DR6_RESERVED; | |
541 | ||
a1e80faf FW |
542 | /* |
543 | * If dr6 has no reason to give us about the origin of this trap, | |
544 | * then it's very likely the result of an icebp/int01 trap. | |
545 | * User wants a sigtrap for that. | |
546 | */ | |
547 | if (!dr6 && user_mode(regs)) | |
548 | user_icebp = 1; | |
549 | ||
f8561296 | 550 | /* Catch kmemcheck conditions first of all! */ |
eadb8a09 | 551 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
f8561296 VN |
552 | return; |
553 | ||
08d68323 P |
554 | /* DR6 may or may not be cleared by the CPU */ |
555 | set_debugreg(0, 6); | |
10faa81e | 556 | |
ea8e61b7 PZ |
557 | /* |
558 | * The processor cleared BTF, so don't mark that we need it set. | |
559 | */ | |
560 | clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); | |
561 | ||
08d68323 P |
562 | /* Store the virtualized DR6 value */ |
563 | tsk->thread.debugreg6 = dr6; | |
564 | ||
62edab90 P |
565 | if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, |
566 | SIGTRAP) == NOTIFY_STOP) | |
1da177e4 | 567 | return; |
3d2a71a5 | 568 | |
1da177e4 | 569 | /* It's safe to allow irq's after DR6 has been saved */ |
3d2a71a5 | 570 | preempt_conditional_sti(regs); |
1da177e4 | 571 | |
08d68323 P |
572 | if (regs->flags & X86_VM_MASK) { |
573 | handle_vm86_trap((struct kernel_vm86_regs *) regs, | |
574 | error_code, 1); | |
6554287b | 575 | preempt_conditional_cli(regs); |
08d68323 | 576 | return; |
1da177e4 LT |
577 | } |
578 | ||
1da177e4 | 579 | /* |
08d68323 P |
580 | * Single-stepping through system calls: ignore any exceptions in |
581 | * kernel space, but re-enable TF when returning to user mode. | |
582 | * | |
583 | * We already checked v86 mode above, so we can check for kernel mode | |
584 | * by just checking the CPL of CS. | |
1da177e4 | 585 | */ |
08d68323 P |
586 | if ((dr6 & DR_STEP) && !user_mode(regs)) { |
587 | tsk->thread.debugreg6 &= ~DR_STEP; | |
588 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | |
589 | regs->flags &= ~X86_EFLAGS_TF; | |
1da177e4 | 590 | } |
08d68323 | 591 | si_code = get_si_code(tsk->thread.debugreg6); |
a1e80faf | 592 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) |
08d68323 | 593 | send_sigtrap(tsk, regs, error_code, si_code); |
3d2a71a5 | 594 | preempt_conditional_cli(regs); |
1da177e4 | 595 | |
1da177e4 LT |
596 | return; |
597 | } | |
598 | ||
599 | /* | |
600 | * Note that we play around with the 'TS' bit in an attempt to get | |
601 | * the correct behaviour even in the presence of the asynchronous | |
602 | * IRQ13 behaviour | |
603 | */ | |
9b6dba9e | 604 | void math_error(struct pt_regs *regs, int error_code, int trapnr) |
1da177e4 | 605 | { |
e2e75c91 | 606 | struct task_struct *task = current; |
1da177e4 | 607 | siginfo_t info; |
9b6dba9e | 608 | unsigned short err; |
e2e75c91 BG |
609 | char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; |
610 | ||
611 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) | |
612 | return; | |
613 | conditional_sti(regs); | |
614 | ||
615 | if (!user_mode_vm(regs)) | |
616 | { | |
617 | if (!fixup_exception(regs)) { | |
618 | task->thread.error_code = error_code; | |
619 | task->thread.trap_no = trapnr; | |
620 | die(str, regs, error_code); | |
621 | } | |
622 | return; | |
623 | } | |
1da177e4 LT |
624 | |
625 | /* | |
626 | * Save the info for the exception handler and clear the error. | |
627 | */ | |
1da177e4 | 628 | save_init_fpu(task); |
9b6dba9e BG |
629 | task->thread.trap_no = trapnr; |
630 | task->thread.error_code = error_code; | |
1da177e4 LT |
631 | info.si_signo = SIGFPE; |
632 | info.si_errno = 0; | |
9b6dba9e BG |
633 | info.si_addr = (void __user *)regs->ip; |
634 | if (trapnr == 16) { | |
635 | unsigned short cwd, swd; | |
636 | /* | |
637 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
638 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
639 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
640 | * fault bit. We should only be taking one exception at a time, | |
641 | * so if this combination doesn't produce any single exception, | |
642 | * then we have a bad program that isn't synchronizing its FPU usage | |
643 | * and it will suffer the consequences since we won't be able to | |
644 | * fully reproduce the context of the exception | |
645 | */ | |
646 | cwd = get_fpu_cwd(task); | |
647 | swd = get_fpu_swd(task); | |
adf77bac | 648 | |
9b6dba9e BG |
649 | err = swd & ~cwd; |
650 | } else { | |
651 | /* | |
652 | * The SIMD FPU exceptions are handled a little differently, as there | |
653 | * is only a single status/control register. Thus, to determine which | |
654 | * unmasked exception was caught we must mask the exception mask bits | |
655 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
656 | */ | |
657 | unsigned short mxcsr = get_fpu_mxcsr(task); | |
658 | err = ~(mxcsr >> 7) & mxcsr; | |
659 | } | |
adf77bac PA |
660 | |
661 | if (err & 0x001) { /* Invalid op */ | |
b5964405 IM |
662 | /* |
663 | * swd & 0x240 == 0x040: Stack Underflow | |
664 | * swd & 0x240 == 0x240: Stack Overflow | |
665 | * User must clear the SF bit (0x40) if set | |
666 | */ | |
667 | info.si_code = FPE_FLTINV; | |
adf77bac | 668 | } else if (err & 0x004) { /* Divide by Zero */ |
b5964405 | 669 | info.si_code = FPE_FLTDIV; |
adf77bac | 670 | } else if (err & 0x008) { /* Overflow */ |
b5964405 | 671 | info.si_code = FPE_FLTOVF; |
adf77bac PA |
672 | } else if (err & 0x012) { /* Denormal, Underflow */ |
673 | info.si_code = FPE_FLTUND; | |
674 | } else if (err & 0x020) { /* Precision */ | |
b5964405 | 675 | info.si_code = FPE_FLTRES; |
adf77bac | 676 | } else { |
bd8b96df IM |
677 | /* |
678 | * If we're using IRQ 13, or supposedly even some trap 16 | |
679 | * implementations, it's possible we get a spurious trap... | |
680 | */ | |
a73ad333 | 681 | return; /* Spurious trap, no error */ |
1da177e4 LT |
682 | } |
683 | force_sig_info(SIGFPE, &info, task); | |
684 | } | |
685 | ||
e407d620 | 686 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) |
1da177e4 | 687 | { |
081f75bb | 688 | #ifdef CONFIG_X86_32 |
1da177e4 | 689 | ignore_fpu_irq = 1; |
081f75bb AH |
690 | #endif |
691 | ||
9b6dba9e | 692 | math_error(regs, error_code, 16); |
1da177e4 LT |
693 | } |
694 | ||
e407d620 AH |
695 | dotraplinkage void |
696 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) | |
1da177e4 | 697 | { |
9b6dba9e | 698 | math_error(regs, error_code, 19); |
1da177e4 LT |
699 | } |
700 | ||
e407d620 AH |
701 | dotraplinkage void |
702 | do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |
1da177e4 | 703 | { |
cf81978d | 704 | conditional_sti(regs); |
1da177e4 LT |
705 | #if 0 |
706 | /* No need to warn about this any longer. */ | |
b5964405 | 707 | printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); |
1da177e4 LT |
708 | #endif |
709 | } | |
710 | ||
081f75bb | 711 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) |
1da177e4 | 712 | { |
1da177e4 | 713 | } |
4efc0670 | 714 | |
7856f6cc | 715 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) |
081f75bb AH |
716 | { |
717 | } | |
718 | ||
e6e9cac8 JF |
719 | /* |
720 | * __math_state_restore assumes that cr0.TS is already clear and the | |
721 | * fpu state is all ready for use. Used during context switch. | |
722 | */ | |
723 | void __math_state_restore(void) | |
081f75bb | 724 | { |
e6e9cac8 JF |
725 | struct thread_info *thread = current_thread_info(); |
726 | struct task_struct *tsk = thread->task; | |
727 | ||
728 | /* | |
729 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | |
730 | */ | |
731 | if (unlikely(restore_fpu_checking(tsk))) { | |
732 | stts(); | |
733 | force_sig(SIGSEGV, tsk); | |
734 | return; | |
735 | } | |
736 | ||
737 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | |
738 | tsk->fpu_counter++; | |
081f75bb | 739 | } |
1da177e4 LT |
740 | |
741 | /* | |
b5964405 | 742 | * 'math_state_restore()' saves the current math information in the |
1da177e4 LT |
743 | * old math state array, and gets the new ones from the current task |
744 | * | |
745 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | |
746 | * Don't touch unless you *really* know how it works. | |
747 | * | |
748 | * Must be called with kernel preemption disabled (in this case, | |
749 | * local interrupts are disabled at the call-site in entry.S). | |
750 | */ | |
acc20761 | 751 | asmlinkage void math_state_restore(void) |
1da177e4 LT |
752 | { |
753 | struct thread_info *thread = current_thread_info(); | |
754 | struct task_struct *tsk = thread->task; | |
755 | ||
aa283f49 SS |
756 | if (!tsk_used_math(tsk)) { |
757 | local_irq_enable(); | |
758 | /* | |
759 | * does a slab alloc which can sleep | |
760 | */ | |
761 | if (init_fpu(tsk)) { | |
762 | /* | |
763 | * ran out of memory! | |
764 | */ | |
765 | do_group_exit(SIGKILL); | |
766 | return; | |
767 | } | |
768 | local_irq_disable(); | |
769 | } | |
770 | ||
b5964405 | 771 | clts(); /* Allow maths ops (or we recurse) */ |
fcb2ac5b | 772 | |
e6e9cac8 | 773 | __math_state_restore(); |
1da177e4 | 774 | } |
5992b6da | 775 | EXPORT_SYMBOL_GPL(math_state_restore); |
1da177e4 | 776 | |
e407d620 | 777 | dotraplinkage void __kprobes |
aa78bcfa | 778 | do_device_not_available(struct pt_regs *regs, long error_code) |
7643e9b9 | 779 | { |
a334fe43 | 780 | #ifdef CONFIG_MATH_EMULATION |
7643e9b9 | 781 | if (read_cr0() & X86_CR0_EM) { |
d315760f TH |
782 | struct math_emu_info info = { }; |
783 | ||
7643e9b9 | 784 | conditional_sti(regs); |
d315760f | 785 | |
aa78bcfa | 786 | info.regs = regs; |
d315760f | 787 | math_emulate(&info); |
a334fe43 | 788 | return; |
7643e9b9 | 789 | } |
a334fe43 BG |
790 | #endif |
791 | math_state_restore(); /* interrupts still off */ | |
792 | #ifdef CONFIG_X86_32 | |
793 | conditional_sti(regs); | |
081f75bb | 794 | #endif |
7643e9b9 AH |
795 | } |
796 | ||
081f75bb | 797 | #ifdef CONFIG_X86_32 |
e407d620 | 798 | dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) |
f8e0870f AH |
799 | { |
800 | siginfo_t info; | |
801 | local_irq_enable(); | |
802 | ||
803 | info.si_signo = SIGILL; | |
804 | info.si_errno = 0; | |
805 | info.si_code = ILL_BADSTK; | |
fc6fcdfb | 806 | info.si_addr = NULL; |
f8e0870f AH |
807 | if (notify_die(DIE_TRAP, "iret exception", |
808 | regs, error_code, 32, SIGILL) == NOTIFY_STOP) | |
809 | return; | |
3c1326f8 | 810 | do_trap(32, SIGILL, "iret exception", regs, error_code, &info); |
f8e0870f | 811 | } |
081f75bb | 812 | #endif |
f8e0870f | 813 | |
29c84391 JK |
814 | /* Set of traps needed for early debugging. */ |
815 | void __init early_trap_init(void) | |
816 | { | |
817 | set_intr_gate_ist(1, &debug, DEBUG_STACK); | |
818 | /* int3 can be called from all */ | |
819 | set_system_intr_gate_ist(3, &int3, DEBUG_STACK); | |
820 | set_intr_gate(14, &page_fault); | |
821 | load_idt(&idt_descr); | |
822 | } | |
823 | ||
1da177e4 LT |
824 | void __init trap_init(void) |
825 | { | |
dbeb2be2 RR |
826 | int i; |
827 | ||
1da177e4 | 828 | #ifdef CONFIG_EISA |
927222b1 | 829 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
b5964405 IM |
830 | |
831 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) | |
1da177e4 | 832 | EISA_bus = 1; |
927222b1 | 833 | early_iounmap(p, 4); |
1da177e4 LT |
834 | #endif |
835 | ||
976382dc | 836 | set_intr_gate(0, ÷_error); |
699d2937 | 837 | set_intr_gate_ist(2, &nmi, NMI_STACK); |
699d2937 AH |
838 | /* int4 can be called from all */ |
839 | set_system_intr_gate(4, &overflow); | |
64f644c0 | 840 | set_intr_gate(5, &bounds); |
12394cf5 | 841 | set_intr_gate(6, &invalid_op); |
7643e9b9 | 842 | set_intr_gate(7, &device_not_available); |
081f75bb | 843 | #ifdef CONFIG_X86_32 |
a8c1be9d | 844 | set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); |
081f75bb AH |
845 | #else |
846 | set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); | |
847 | #endif | |
51bc1ed6 | 848 | set_intr_gate(9, &coprocessor_segment_overrun); |
6bf77bf9 | 849 | set_intr_gate(10, &invalid_TSS); |
36d936c7 | 850 | set_intr_gate(11, &segment_not_present); |
699d2937 | 851 | set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); |
c6df0d71 | 852 | set_intr_gate(13, &general_protection); |
cf81978d | 853 | set_intr_gate(15, &spurious_interrupt_bug); |
252d28fe | 854 | set_intr_gate(16, &coprocessor_error); |
5feedfd4 | 855 | set_intr_gate(17, &alignment_check); |
1da177e4 | 856 | #ifdef CONFIG_X86_MCE |
699d2937 | 857 | set_intr_gate_ist(18, &machine_check, MCE_STACK); |
1da177e4 | 858 | #endif |
b939bde2 | 859 | set_intr_gate(19, &simd_coprocessor_error); |
1da177e4 | 860 | |
bb3f0b59 YL |
861 | /* Reserve all the builtin and the syscall vector: */ |
862 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | |
863 | set_bit(i, used_vectors); | |
864 | ||
081f75bb AH |
865 | #ifdef CONFIG_IA32_EMULATION |
866 | set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | |
bb3f0b59 | 867 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); |
081f75bb AH |
868 | #endif |
869 | ||
870 | #ifdef CONFIG_X86_32 | |
699d2937 | 871 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
dbeb2be2 | 872 | set_bit(SYSCALL_VECTOR, used_vectors); |
081f75bb | 873 | #endif |
bb3f0b59 | 874 | |
5cec93c2 AL |
875 | #ifdef CONFIG_X86_64 |
876 | BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors)); | |
877 | set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall); | |
878 | set_bit(VSYSCALL_EMU_VECTOR, used_vectors); | |
879 | #endif | |
880 | ||
1da177e4 | 881 | /* |
b5964405 | 882 | * Should be a barrier for any external CPU state: |
1da177e4 LT |
883 | */ |
884 | cpu_init(); | |
885 | ||
428cf902 | 886 | x86_init.irqs.trap_init(); |
1da177e4 | 887 | } |