]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/s390/kernel/traps.c
ptrace: kill trivial tracehooks
[mirror_ubuntu-zesty-kernel.git] / arch / s390 / kernel / traps.c
1 /*
2 * arch/s390/kernel/traps.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13 /*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
16 */
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/tracehook.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/seq_file.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kdebug.h>
31 #include <linux/kallsyms.h>
32 #include <linux/reboot.h>
33 #include <linux/kprobes.h>
34 #include <linux/bug.h>
35 #include <linux/utsname.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/atomic.h>
40 #include <asm/mathemu.h>
41 #include <asm/cpcmd.h>
42 #include <asm/lowcore.h>
43 #include <asm/debug.h>
44 #include "entry.h"
45
46 pgm_check_handler_t *pgm_check_table[128];
47
48 int show_unhandled_signals;
49
50 extern pgm_check_handler_t do_protection_exception;
51 extern pgm_check_handler_t do_dat_exception;
52 extern pgm_check_handler_t do_asce_exception;
53
54 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
55
56 #ifndef CONFIG_64BIT
57 #define LONG "%08lx "
58 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
59 static int kstack_depth_to_print = 12;
60 #else /* CONFIG_64BIT */
61 #define LONG "%016lx "
62 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
63 static int kstack_depth_to_print = 20;
64 #endif /* CONFIG_64BIT */
65
66 /*
67 * For show_trace we have tree different stack to consider:
68 * - the panic stack which is used if the kernel stack has overflown
69 * - the asynchronous interrupt stack (cpu related)
70 * - the synchronous kernel stack (process related)
71 * The stack trace can start at any of the three stack and can potentially
72 * touch all of them. The order is: panic stack, async stack, sync stack.
73 */
74 static unsigned long
75 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
76 {
77 struct stack_frame *sf;
78 struct pt_regs *regs;
79
80 while (1) {
81 sp = sp & PSW_ADDR_INSN;
82 if (sp < low || sp > high - sizeof(*sf))
83 return sp;
84 sf = (struct stack_frame *) sp;
85 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
86 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
87 /* Follow the backchain. */
88 while (1) {
89 low = sp;
90 sp = sf->back_chain & PSW_ADDR_INSN;
91 if (!sp)
92 break;
93 if (sp <= low || sp > high - sizeof(*sf))
94 return sp;
95 sf = (struct stack_frame *) sp;
96 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
97 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
98 }
99 /* Zero backchain detected, check for interrupt frame. */
100 sp = (unsigned long) (sf + 1);
101 if (sp <= low || sp > high - sizeof(*regs))
102 return sp;
103 regs = (struct pt_regs *) sp;
104 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
105 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
106 low = sp;
107 sp = regs->gprs[15];
108 }
109 }
110
111 static void show_trace(struct task_struct *task, unsigned long *stack)
112 {
113 register unsigned long __r15 asm ("15");
114 unsigned long sp;
115
116 sp = (unsigned long) stack;
117 if (!sp)
118 sp = task ? task->thread.ksp : __r15;
119 printk("Call Trace:\n");
120 #ifdef CONFIG_CHECK_STACK
121 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
122 S390_lowcore.panic_stack);
123 #endif
124 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
125 S390_lowcore.async_stack);
126 if (task)
127 __show_trace(sp, (unsigned long) task_stack_page(task),
128 (unsigned long) task_stack_page(task) + THREAD_SIZE);
129 else
130 __show_trace(sp, S390_lowcore.thread_info,
131 S390_lowcore.thread_info + THREAD_SIZE);
132 if (!task)
133 task = current;
134 debug_show_held_locks(task);
135 }
136
137 void show_stack(struct task_struct *task, unsigned long *sp)
138 {
139 register unsigned long * __r15 asm ("15");
140 unsigned long *stack;
141 int i;
142
143 if (!sp)
144 stack = task ? (unsigned long *) task->thread.ksp : __r15;
145 else
146 stack = sp;
147
148 for (i = 0; i < kstack_depth_to_print; i++) {
149 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
150 break;
151 if (i && ((i * sizeof (long) % 32) == 0))
152 printk("\n ");
153 printk(LONG, *stack++);
154 }
155 printk("\n");
156 show_trace(task, sp);
157 }
158
159 static void show_last_breaking_event(struct pt_regs *regs)
160 {
161 #ifdef CONFIG_64BIT
162 printk("Last Breaking-Event-Address:\n");
163 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
164 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
165 #endif
166 }
167
168 /*
169 * The architecture-independent dump_stack generator
170 */
171 void dump_stack(void)
172 {
173 printk("CPU: %d %s %s %.*s\n",
174 task_thread_info(current)->cpu, print_tainted(),
175 init_utsname()->release,
176 (int)strcspn(init_utsname()->version, " "),
177 init_utsname()->version);
178 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
179 current->comm, current->pid, current,
180 (void *) current->thread.ksp);
181 show_stack(NULL, NULL);
182 }
183 EXPORT_SYMBOL(dump_stack);
184
185 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
186 {
187 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
188 }
189
190 void show_registers(struct pt_regs *regs)
191 {
192 char *mode;
193
194 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
195 printk("%s PSW : %p %p",
196 mode, (void *) regs->psw.mask,
197 (void *) regs->psw.addr);
198 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
199 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
200 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
201 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
202 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
203 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
204 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
205 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
206 #ifdef CONFIG_64BIT
207 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
208 #endif
209 printk("\n%s GPRS: " FOURLONG, mode,
210 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
211 printk(" " FOURLONG,
212 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
213 printk(" " FOURLONG,
214 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
215 printk(" " FOURLONG,
216 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
217
218 show_code(regs);
219 }
220
221 void show_regs(struct pt_regs *regs)
222 {
223 print_modules();
224 printk("CPU: %d %s %s %.*s\n",
225 task_thread_info(current)->cpu, print_tainted(),
226 init_utsname()->release,
227 (int)strcspn(init_utsname()->version, " "),
228 init_utsname()->version);
229 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
230 current->comm, current->pid, current,
231 (void *) current->thread.ksp);
232 show_registers(regs);
233 /* Show stack backtrace if pt_regs is from kernel mode */
234 if (!(regs->psw.mask & PSW_MASK_PSTATE))
235 show_trace(NULL, (unsigned long *) regs->gprs[15]);
236 show_last_breaking_event(regs);
237 }
238
239 static DEFINE_SPINLOCK(die_lock);
240
241 void die(const char * str, struct pt_regs * regs, long err)
242 {
243 static int die_counter;
244
245 oops_enter();
246 debug_stop_all();
247 console_verbose();
248 spin_lock_irq(&die_lock);
249 bust_spinlocks(1);
250 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
251 #ifdef CONFIG_PREEMPT
252 printk("PREEMPT ");
253 #endif
254 #ifdef CONFIG_SMP
255 printk("SMP ");
256 #endif
257 #ifdef CONFIG_DEBUG_PAGEALLOC
258 printk("DEBUG_PAGEALLOC");
259 #endif
260 printk("\n");
261 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
262 show_regs(regs);
263 bust_spinlocks(0);
264 add_taint(TAINT_DIE);
265 spin_unlock_irq(&die_lock);
266 if (in_interrupt())
267 panic("Fatal exception in interrupt");
268 if (panic_on_oops)
269 panic("Fatal exception: panic_on_oops");
270 oops_exit();
271 do_exit(SIGSEGV);
272 }
273
274 static void inline report_user_fault(struct pt_regs *regs, long int_code,
275 int signr)
276 {
277 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
278 return;
279 if (!unhandled_signal(current, signr))
280 return;
281 if (!printk_ratelimit())
282 return;
283 printk("User process fault: interruption code 0x%lX ", int_code);
284 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
285 printk("\n");
286 show_regs(regs);
287 }
288
289 int is_valid_bugaddr(unsigned long addr)
290 {
291 return 1;
292 }
293
294 static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
295 struct pt_regs *regs, siginfo_t *info)
296 {
297 if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
298 pgm_int_code, signr) == NOTIFY_STOP)
299 return;
300
301 if (regs->psw.mask & PSW_MASK_PSTATE) {
302 struct task_struct *tsk = current;
303
304 tsk->thread.trap_no = pgm_int_code & 0xffff;
305 force_sig_info(signr, info, tsk);
306 report_user_fault(regs, pgm_int_code, signr);
307 } else {
308 const struct exception_table_entry *fixup;
309 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
310 if (fixup)
311 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
312 else {
313 enum bug_trap_type btt;
314
315 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
316 if (btt == BUG_TRAP_TYPE_WARN)
317 return;
318 die(str, regs, pgm_int_code);
319 }
320 }
321 }
322
323 static inline void __user *get_psw_address(struct pt_regs *regs,
324 long pgm_int_code)
325 {
326 return (void __user *)
327 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
328 }
329
330 void __kprobes do_per_trap(struct pt_regs *regs)
331 {
332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
333 return;
334 if (current->ptrace)
335 force_sig(SIGTRAP, current);
336 }
337
338 static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
339 unsigned long trans_exc_code)
340 {
341 if (regs->psw.mask & PSW_MASK_PSTATE) {
342 report_user_fault(regs, pgm_int_code, SIGSEGV);
343 do_exit(SIGSEGV);
344 } else
345 die("Unknown program exception", regs, pgm_int_code);
346 }
347
348 #define DO_ERROR_INFO(name, signr, sicode, str) \
349 static void name(struct pt_regs *regs, long pgm_int_code, \
350 unsigned long trans_exc_code) \
351 { \
352 siginfo_t info; \
353 info.si_signo = signr; \
354 info.si_errno = 0; \
355 info.si_code = sicode; \
356 info.si_addr = get_psw_address(regs, pgm_int_code); \
357 do_trap(pgm_int_code, signr, str, regs, &info); \
358 }
359
360 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
361 "addressing exception")
362 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
363 "execute exception")
364 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
365 "fixpoint divide exception")
366 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
367 "fixpoint overflow exception")
368 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
369 "HFP overflow exception")
370 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
371 "HFP underflow exception")
372 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
373 "HFP significance exception")
374 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
375 "HFP divide exception")
376 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
377 "HFP square root exception")
378 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
379 "operand exception")
380 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
381 "privileged operation")
382 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
383 "special operation exception")
384 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
385 "translation exception")
386
387 static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
388 int fpc, long pgm_int_code)
389 {
390 siginfo_t si;
391
392 si.si_signo = SIGFPE;
393 si.si_errno = 0;
394 si.si_addr = location;
395 si.si_code = 0;
396 /* FPC[2] is Data Exception Code */
397 if ((fpc & 0x00000300) == 0) {
398 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
399 if (fpc & 0x8000) /* invalid fp operation */
400 si.si_code = FPE_FLTINV;
401 else if (fpc & 0x4000) /* div by 0 */
402 si.si_code = FPE_FLTDIV;
403 else if (fpc & 0x2000) /* overflow */
404 si.si_code = FPE_FLTOVF;
405 else if (fpc & 0x1000) /* underflow */
406 si.si_code = FPE_FLTUND;
407 else if (fpc & 0x0800) /* inexact */
408 si.si_code = FPE_FLTRES;
409 }
410 do_trap(pgm_int_code, SIGFPE,
411 "floating point exception", regs, &si);
412 }
413
414 static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
415 unsigned long trans_exc_code)
416 {
417 siginfo_t info;
418 __u8 opcode[6];
419 __u16 __user *location;
420 int signal = 0;
421
422 location = get_psw_address(regs, pgm_int_code);
423
424 if (regs->psw.mask & PSW_MASK_PSTATE) {
425 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
426 return;
427 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
428 if (current->ptrace)
429 force_sig(SIGTRAP, current);
430 else
431 signal = SIGILL;
432 #ifdef CONFIG_MATHEMU
433 } else if (opcode[0] == 0xb3) {
434 if (get_user(*((__u16 *) (opcode+2)), location+1))
435 return;
436 signal = math_emu_b3(opcode, regs);
437 } else if (opcode[0] == 0xed) {
438 if (get_user(*((__u32 *) (opcode+2)),
439 (__u32 __user *)(location+1)))
440 return;
441 signal = math_emu_ed(opcode, regs);
442 } else if (*((__u16 *) opcode) == 0xb299) {
443 if (get_user(*((__u16 *) (opcode+2)), location+1))
444 return;
445 signal = math_emu_srnm(opcode, regs);
446 } else if (*((__u16 *) opcode) == 0xb29c) {
447 if (get_user(*((__u16 *) (opcode+2)), location+1))
448 return;
449 signal = math_emu_stfpc(opcode, regs);
450 } else if (*((__u16 *) opcode) == 0xb29d) {
451 if (get_user(*((__u16 *) (opcode+2)), location+1))
452 return;
453 signal = math_emu_lfpc(opcode, regs);
454 #endif
455 } else
456 signal = SIGILL;
457 } else {
458 /*
459 * If we get an illegal op in kernel mode, send it through the
460 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
461 */
462 if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
463 3, SIGTRAP) != NOTIFY_STOP)
464 signal = SIGILL;
465 }
466
467 #ifdef CONFIG_MATHEMU
468 if (signal == SIGFPE)
469 do_fp_trap(regs, location,
470 current->thread.fp_regs.fpc, pgm_int_code);
471 else if (signal == SIGSEGV) {
472 info.si_signo = signal;
473 info.si_errno = 0;
474 info.si_code = SEGV_MAPERR;
475 info.si_addr = (void __user *) location;
476 do_trap(pgm_int_code, signal,
477 "user address fault", regs, &info);
478 } else
479 #endif
480 if (signal) {
481 info.si_signo = signal;
482 info.si_errno = 0;
483 info.si_code = ILL_ILLOPC;
484 info.si_addr = (void __user *) location;
485 do_trap(pgm_int_code, signal,
486 "illegal operation", regs, &info);
487 }
488 }
489
490
491 #ifdef CONFIG_MATHEMU
492 asmlinkage void specification_exception(struct pt_regs *regs,
493 long pgm_int_code,
494 unsigned long trans_exc_code)
495 {
496 __u8 opcode[6];
497 __u16 __user *location = NULL;
498 int signal = 0;
499
500 location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
501
502 if (regs->psw.mask & PSW_MASK_PSTATE) {
503 get_user(*((__u16 *) opcode), location);
504 switch (opcode[0]) {
505 case 0x28: /* LDR Rx,Ry */
506 signal = math_emu_ldr(opcode);
507 break;
508 case 0x38: /* LER Rx,Ry */
509 signal = math_emu_ler(opcode);
510 break;
511 case 0x60: /* STD R,D(X,B) */
512 get_user(*((__u16 *) (opcode+2)), location+1);
513 signal = math_emu_std(opcode, regs);
514 break;
515 case 0x68: /* LD R,D(X,B) */
516 get_user(*((__u16 *) (opcode+2)), location+1);
517 signal = math_emu_ld(opcode, regs);
518 break;
519 case 0x70: /* STE R,D(X,B) */
520 get_user(*((__u16 *) (opcode+2)), location+1);
521 signal = math_emu_ste(opcode, regs);
522 break;
523 case 0x78: /* LE R,D(X,B) */
524 get_user(*((__u16 *) (opcode+2)), location+1);
525 signal = math_emu_le(opcode, regs);
526 break;
527 default:
528 signal = SIGILL;
529 break;
530 }
531 } else
532 signal = SIGILL;
533
534 if (signal == SIGFPE)
535 do_fp_trap(regs, location,
536 current->thread.fp_regs.fpc, pgm_int_code);
537 else if (signal) {
538 siginfo_t info;
539 info.si_signo = signal;
540 info.si_errno = 0;
541 info.si_code = ILL_ILLOPN;
542 info.si_addr = location;
543 do_trap(pgm_int_code, signal,
544 "specification exception", regs, &info);
545 }
546 }
547 #else
548 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
549 "specification exception");
550 #endif
551
552 static void data_exception(struct pt_regs *regs, long pgm_int_code,
553 unsigned long trans_exc_code)
554 {
555 __u16 __user *location;
556 int signal = 0;
557
558 location = get_psw_address(regs, pgm_int_code);
559
560 if (MACHINE_HAS_IEEE)
561 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
562
563 #ifdef CONFIG_MATHEMU
564 else if (regs->psw.mask & PSW_MASK_PSTATE) {
565 __u8 opcode[6];
566 get_user(*((__u16 *) opcode), location);
567 switch (opcode[0]) {
568 case 0x28: /* LDR Rx,Ry */
569 signal = math_emu_ldr(opcode);
570 break;
571 case 0x38: /* LER Rx,Ry */
572 signal = math_emu_ler(opcode);
573 break;
574 case 0x60: /* STD R,D(X,B) */
575 get_user(*((__u16 *) (opcode+2)), location+1);
576 signal = math_emu_std(opcode, regs);
577 break;
578 case 0x68: /* LD R,D(X,B) */
579 get_user(*((__u16 *) (opcode+2)), location+1);
580 signal = math_emu_ld(opcode, regs);
581 break;
582 case 0x70: /* STE R,D(X,B) */
583 get_user(*((__u16 *) (opcode+2)), location+1);
584 signal = math_emu_ste(opcode, regs);
585 break;
586 case 0x78: /* LE R,D(X,B) */
587 get_user(*((__u16 *) (opcode+2)), location+1);
588 signal = math_emu_le(opcode, regs);
589 break;
590 case 0xb3:
591 get_user(*((__u16 *) (opcode+2)), location+1);
592 signal = math_emu_b3(opcode, regs);
593 break;
594 case 0xed:
595 get_user(*((__u32 *) (opcode+2)),
596 (__u32 __user *)(location+1));
597 signal = math_emu_ed(opcode, regs);
598 break;
599 case 0xb2:
600 if (opcode[1] == 0x99) {
601 get_user(*((__u16 *) (opcode+2)), location+1);
602 signal = math_emu_srnm(opcode, regs);
603 } else if (opcode[1] == 0x9c) {
604 get_user(*((__u16 *) (opcode+2)), location+1);
605 signal = math_emu_stfpc(opcode, regs);
606 } else if (opcode[1] == 0x9d) {
607 get_user(*((__u16 *) (opcode+2)), location+1);
608 signal = math_emu_lfpc(opcode, regs);
609 } else
610 signal = SIGILL;
611 break;
612 default:
613 signal = SIGILL;
614 break;
615 }
616 }
617 #endif
618 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
619 signal = SIGFPE;
620 else
621 signal = SIGILL;
622 if (signal == SIGFPE)
623 do_fp_trap(regs, location,
624 current->thread.fp_regs.fpc, pgm_int_code);
625 else if (signal) {
626 siginfo_t info;
627 info.si_signo = signal;
628 info.si_errno = 0;
629 info.si_code = ILL_ILLOPN;
630 info.si_addr = location;
631 do_trap(pgm_int_code, signal, "data exception", regs, &info);
632 }
633 }
634
635 static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
636 unsigned long trans_exc_code)
637 {
638 siginfo_t info;
639
640 /* Set user psw back to home space mode. */
641 if (regs->psw.mask & PSW_MASK_PSTATE)
642 regs->psw.mask |= PSW_ASC_HOME;
643 /* Send SIGILL. */
644 info.si_signo = SIGILL;
645 info.si_errno = 0;
646 info.si_code = ILL_PRVOPC;
647 info.si_addr = get_psw_address(regs, pgm_int_code);
648 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
649 }
650
651 asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
652 {
653 bust_spinlocks(1);
654 printk("Kernel stack overflow.\n");
655 show_regs(regs);
656 bust_spinlocks(0);
657 panic("Corrupt kernel stack, can't continue.");
658 }
659
660 /* init is done in lowcore.S and head.S */
661
662 void __init trap_init(void)
663 {
664 int i;
665
666 for (i = 0; i < 128; i++)
667 pgm_check_table[i] = &default_trap_handler;
668 pgm_check_table[1] = &illegal_op;
669 pgm_check_table[2] = &privileged_op;
670 pgm_check_table[3] = &execute_exception;
671 pgm_check_table[4] = &do_protection_exception;
672 pgm_check_table[5] = &addressing_exception;
673 pgm_check_table[6] = &specification_exception;
674 pgm_check_table[7] = &data_exception;
675 pgm_check_table[8] = &overflow_exception;
676 pgm_check_table[9] = &divide_exception;
677 pgm_check_table[0x0A] = &overflow_exception;
678 pgm_check_table[0x0B] = &divide_exception;
679 pgm_check_table[0x0C] = &hfp_overflow_exception;
680 pgm_check_table[0x0D] = &hfp_underflow_exception;
681 pgm_check_table[0x0E] = &hfp_significance_exception;
682 pgm_check_table[0x0F] = &hfp_divide_exception;
683 pgm_check_table[0x10] = &do_dat_exception;
684 pgm_check_table[0x11] = &do_dat_exception;
685 pgm_check_table[0x12] = &translation_exception;
686 pgm_check_table[0x13] = &special_op_exception;
687 #ifdef CONFIG_64BIT
688 pgm_check_table[0x38] = &do_asce_exception;
689 pgm_check_table[0x39] = &do_dat_exception;
690 pgm_check_table[0x3A] = &do_dat_exception;
691 pgm_check_table[0x3B] = &do_dat_exception;
692 #endif /* CONFIG_64BIT */
693 pgm_check_table[0x15] = &operand_exception;
694 pgm_check_table[0x1C] = &space_switch_exception;
695 pgm_check_table[0x1D] = &hfp_sqrt_exception;
696 /* Enable machine checks early. */
697 local_mcck_enable();
698 }