]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/parisc/kernel/traps.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / arch / parisc / kernel / traps.c
1 /*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/ptrace.h>
19 #include <linux/timer.h>
20 #include <linux/delay.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/smp.h>
24 #include <linux/spinlock.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/console.h>
28 #include <linux/bug.h>
29 #include <linux/ratelimit.h>
30 #include <linux/uaccess.h>
31
32 #include <asm/assembly.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/traps.h>
36 #include <asm/unaligned.h>
37 #include <linux/atomic.h>
38 #include <asm/smp.h>
39 #include <asm/pdc.h>
40 #include <asm/pdc_chassis.h>
41 #include <asm/unwind.h>
42 #include <asm/tlbflush.h>
43 #include <asm/cacheflush.h>
44
45 #include "../math-emu/math-emu.h" /* for handle_fpe() */
46
47 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
48 struct pt_regs *regs);
49
50 static int printbinary(char *buf, unsigned long x, int nbits)
51 {
52 unsigned long mask = 1UL << (nbits - 1);
53 while (mask != 0) {
54 *buf++ = (mask & x ? '1' : '0');
55 mask >>= 1;
56 }
57 *buf = '\0';
58
59 return nbits;
60 }
61
62 #ifdef CONFIG_64BIT
63 #define RFMT "%016lx"
64 #else
65 #define RFMT "%08lx"
66 #endif
67 #define FFMT "%016llx" /* fpregs are 64-bit always */
68
69 #define PRINTREGS(lvl,r,f,fmt,x) \
70 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
71 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
72 (r)[(x)+2], (r)[(x)+3])
73
74 static void print_gr(char *level, struct pt_regs *regs)
75 {
76 int i;
77 char buf[64];
78
79 printk("%s\n", level);
80 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
81 printbinary(buf, regs->gr[0], 32);
82 printk("%sPSW: %s %s\n", level, buf, print_tainted());
83
84 for (i = 0; i < 32; i += 4)
85 PRINTREGS(level, regs->gr, "r", RFMT, i);
86 }
87
88 static void print_fr(char *level, struct pt_regs *regs)
89 {
90 int i;
91 char buf[64];
92 struct { u32 sw[2]; } s;
93
94 /* FR are 64bit everywhere. Need to use asm to get the content
95 * of fpsr/fper1, and we assume that we won't have a FP Identify
96 * in our way, otherwise we're screwed.
97 * The fldd is used to restore the T-bit if there was one, as the
98 * store clears it anyway.
99 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
100 asm volatile ("fstd %%fr0,0(%1) \n\t"
101 "fldd 0(%1),%%fr0 \n\t"
102 : "=m" (s) : "r" (&s) : "r0");
103
104 printk("%s\n", level);
105 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
106 printbinary(buf, s.sw[0], 32);
107 printk("%sFPSR: %s\n", level, buf);
108 printk("%sFPER1: %08x\n", level, s.sw[1]);
109
110 /* here we'll print fr0 again, tho it'll be meaningless */
111 for (i = 0; i < 32; i += 4)
112 PRINTREGS(level, regs->fr, "fr", FFMT, i);
113 }
114
115 void show_regs(struct pt_regs *regs)
116 {
117 int i, user;
118 char *level;
119 unsigned long cr30, cr31;
120
121 user = user_mode(regs);
122 level = user ? KERN_DEBUG : KERN_CRIT;
123
124 show_regs_print_info(level);
125
126 print_gr(level, regs);
127
128 for (i = 0; i < 8; i += 4)
129 PRINTREGS(level, regs->sr, "sr", RFMT, i);
130
131 if (user)
132 print_fr(level, regs);
133
134 cr30 = mfctl(30);
135 cr31 = mfctl(31);
136 printk("%s\n", level);
137 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
138 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
139 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
140 level, regs->iir, regs->isr, regs->ior);
141 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
142 level, current_thread_info()->cpu, cr30, cr31);
143 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
144
145 if (user) {
146 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
147 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
148 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
149 } else {
150 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
151 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
152 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
153
154 parisc_show_stack(current, NULL, regs);
155 }
156 }
157
158 static DEFINE_RATELIMIT_STATE(_hppa_rs,
159 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
160
161 #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
162 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
163 printk(fmt, ##__VA_ARGS__); \
164 show_regs(regs); \
165 } \
166 }
167
168
169 static void do_show_stack(struct unwind_frame_info *info)
170 {
171 int i = 1;
172
173 printk(KERN_CRIT "Backtrace:\n");
174 while (i <= 16) {
175 if (unwind_once(info) < 0 || info->ip == 0)
176 break;
177
178 if (__kernel_text_address(info->ip)) {
179 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
180 info->ip, (void *) info->ip);
181 i++;
182 }
183 }
184 printk(KERN_CRIT "\n");
185 }
186
187 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
188 struct pt_regs *regs)
189 {
190 struct unwind_frame_info info;
191 struct task_struct *t;
192
193 t = task ? task : current;
194 if (regs) {
195 unwind_frame_init(&info, t, regs);
196 goto show_stack;
197 }
198
199 if (t == current) {
200 unsigned long sp;
201
202 HERE:
203 asm volatile ("copy %%r30, %0" : "=r"(sp));
204 {
205 struct pt_regs r;
206
207 memset(&r, 0, sizeof(struct pt_regs));
208 r.iaoq[0] = (unsigned long)&&HERE;
209 r.gr[2] = (unsigned long)__builtin_return_address(0);
210 r.gr[30] = sp;
211
212 unwind_frame_init(&info, current, &r);
213 }
214 } else {
215 unwind_frame_init_from_blocked_task(&info, t);
216 }
217
218 show_stack:
219 do_show_stack(&info);
220 }
221
222 void show_stack(struct task_struct *t, unsigned long *sp)
223 {
224 return parisc_show_stack(t, sp, NULL);
225 }
226
227 int is_valid_bugaddr(unsigned long iaoq)
228 {
229 return 1;
230 }
231
232 void die_if_kernel(char *str, struct pt_regs *regs, long err)
233 {
234 if (user_mode(regs)) {
235 if (err == 0)
236 return; /* STFU */
237
238 parisc_printk_ratelimited(1, regs,
239 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
240 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
241
242 return;
243 }
244
245 oops_in_progress = 1;
246
247 oops_enter();
248
249 /* Amuse the user in a SPARC fashion */
250 if (err) printk(KERN_CRIT
251 " _______________________________ \n"
252 " < Your System ate a SPARC! Gah! >\n"
253 " ------------------------------- \n"
254 " \\ ^__^\n"
255 " (__)\\ )\\/\\\n"
256 " U ||----w |\n"
257 " || ||\n");
258
259 /* unlock the pdc lock if necessary */
260 pdc_emergency_unlock();
261
262 /* maybe the kernel hasn't booted very far yet and hasn't been able
263 * to initialize the serial or STI console. In that case we should
264 * re-enable the pdc console, so that the user will be able to
265 * identify the problem. */
266 if (!console_drivers)
267 pdc_console_restart();
268
269 if (err)
270 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
271 current->comm, task_pid_nr(current), str, err);
272
273 /* Wot's wrong wif bein' racy? */
274 if (current->thread.flags & PARISC_KERNEL_DEATH) {
275 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
276 local_irq_enable();
277 while (1);
278 }
279 current->thread.flags |= PARISC_KERNEL_DEATH;
280
281 show_regs(regs);
282 dump_stack();
283 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
284
285 if (in_interrupt())
286 panic("Fatal exception in interrupt");
287
288 if (panic_on_oops)
289 panic("Fatal exception");
290
291 oops_exit();
292 do_exit(SIGSEGV);
293 }
294
295 /* gdb uses break 4,8 */
296 #define GDB_BREAK_INSN 0x10004
297 static void handle_gdb_break(struct pt_regs *regs, int wot)
298 {
299 struct siginfo si;
300
301 si.si_signo = SIGTRAP;
302 si.si_errno = 0;
303 si.si_code = wot;
304 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
305 force_sig_info(SIGTRAP, &si, current);
306 }
307
308 static void handle_break(struct pt_regs *regs)
309 {
310 unsigned iir = regs->iir;
311
312 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
313 /* check if a BUG() or WARN() trapped here. */
314 enum bug_trap_type tt;
315 tt = report_bug(regs->iaoq[0] & ~3, regs);
316 if (tt == BUG_TRAP_TYPE_WARN) {
317 regs->iaoq[0] += 4;
318 regs->iaoq[1] += 4;
319 return; /* return to next instruction when WARN_ON(). */
320 }
321 die_if_kernel("Unknown kernel breakpoint", regs,
322 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
323 }
324
325 if (unlikely(iir != GDB_BREAK_INSN))
326 parisc_printk_ratelimited(0, regs,
327 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
328 iir & 31, (iir>>13) & ((1<<13)-1),
329 task_pid_nr(current), current->comm);
330
331 /* send standard GDB signal */
332 handle_gdb_break(regs, TRAP_BRKPT);
333 }
334
335 static void default_trap(int code, struct pt_regs *regs)
336 {
337 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
338 show_regs(regs);
339 }
340
341 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
342
343
344 void transfer_pim_to_trap_frame(struct pt_regs *regs)
345 {
346 register int i;
347 extern unsigned int hpmc_pim_data[];
348 struct pdc_hpmc_pim_11 *pim_narrow;
349 struct pdc_hpmc_pim_20 *pim_wide;
350
351 if (boot_cpu_data.cpu_type >= pcxu) {
352
353 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
354
355 /*
356 * Note: The following code will probably generate a
357 * bunch of truncation error warnings from the compiler.
358 * Could be handled with an ifdef, but perhaps there
359 * is a better way.
360 */
361
362 regs->gr[0] = pim_wide->cr[22];
363
364 for (i = 1; i < 32; i++)
365 regs->gr[i] = pim_wide->gr[i];
366
367 for (i = 0; i < 32; i++)
368 regs->fr[i] = pim_wide->fr[i];
369
370 for (i = 0; i < 8; i++)
371 regs->sr[i] = pim_wide->sr[i];
372
373 regs->iasq[0] = pim_wide->cr[17];
374 regs->iasq[1] = pim_wide->iasq_back;
375 regs->iaoq[0] = pim_wide->cr[18];
376 regs->iaoq[1] = pim_wide->iaoq_back;
377
378 regs->sar = pim_wide->cr[11];
379 regs->iir = pim_wide->cr[19];
380 regs->isr = pim_wide->cr[20];
381 regs->ior = pim_wide->cr[21];
382 }
383 else {
384 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
385
386 regs->gr[0] = pim_narrow->cr[22];
387
388 for (i = 1; i < 32; i++)
389 regs->gr[i] = pim_narrow->gr[i];
390
391 for (i = 0; i < 32; i++)
392 regs->fr[i] = pim_narrow->fr[i];
393
394 for (i = 0; i < 8; i++)
395 regs->sr[i] = pim_narrow->sr[i];
396
397 regs->iasq[0] = pim_narrow->cr[17];
398 regs->iasq[1] = pim_narrow->iasq_back;
399 regs->iaoq[0] = pim_narrow->cr[18];
400 regs->iaoq[1] = pim_narrow->iaoq_back;
401
402 regs->sar = pim_narrow->cr[11];
403 regs->iir = pim_narrow->cr[19];
404 regs->isr = pim_narrow->cr[20];
405 regs->ior = pim_narrow->cr[21];
406 }
407
408 /*
409 * The following fields only have meaning if we came through
410 * another path. So just zero them here.
411 */
412
413 regs->ksp = 0;
414 regs->kpc = 0;
415 regs->orig_r28 = 0;
416 }
417
418
419 /*
420 * This routine is called as a last resort when everything else
421 * has gone clearly wrong. We get called for faults in kernel space,
422 * and HPMC's.
423 */
424 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
425 {
426 static DEFINE_SPINLOCK(terminate_lock);
427
428 oops_in_progress = 1;
429
430 set_eiem(0);
431 local_irq_disable();
432 spin_lock(&terminate_lock);
433
434 /* unlock the pdc lock if necessary */
435 pdc_emergency_unlock();
436
437 /* restart pdc console if necessary */
438 if (!console_drivers)
439 pdc_console_restart();
440
441 /* Not all paths will gutter the processor... */
442 switch(code){
443
444 case 1:
445 transfer_pim_to_trap_frame(regs);
446 break;
447
448 default:
449 /* Fall through */
450 break;
451
452 }
453
454 {
455 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
456 struct unwind_frame_info info;
457 unwind_frame_init(&info, current, regs);
458 do_show_stack(&info);
459 }
460
461 printk("\n");
462 pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
463 msg, code, trap_name(code), regs, offset);
464 show_regs(regs);
465
466 spin_unlock(&terminate_lock);
467
468 /* put soft power button back under hardware control;
469 * if the user had pressed it once at any time, the
470 * system will shut down immediately right here. */
471 pdc_soft_power_button(0);
472
473 /* Call kernel panic() so reboot timeouts work properly
474 * FIXME: This function should be on the list of
475 * panic notifiers, and we should call panic
476 * directly from the location that we wish.
477 * e.g. We should not call panic from
478 * parisc_terminate, but rather the oter way around.
479 * This hack works, prints the panic message twice,
480 * and it enables reboot timers!
481 */
482 panic(msg);
483 }
484
485 void notrace handle_interruption(int code, struct pt_regs *regs)
486 {
487 unsigned long fault_address = 0;
488 unsigned long fault_space = 0;
489 struct siginfo si;
490
491 if (code == 1)
492 pdc_console_restart(); /* switch back to pdc if HPMC */
493 else
494 local_irq_enable();
495
496 /* Security check:
497 * If the priority level is still user, and the
498 * faulting space is not equal to the active space
499 * then the user is attempting something in a space
500 * that does not belong to them. Kill the process.
501 *
502 * This is normally the situation when the user
503 * attempts to jump into the kernel space at the
504 * wrong offset, be it at the gateway page or a
505 * random location.
506 *
507 * We cannot normally signal the process because it
508 * could *be* on the gateway page, and processes
509 * executing on the gateway page can't have signals
510 * delivered.
511 *
512 * We merely readjust the address into the users
513 * space, at a destination address of zero, and
514 * allow processing to continue.
515 */
516 if (((unsigned long)regs->iaoq[0] & 3) &&
517 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
518 /* Kill the user process later */
519 regs->iaoq[0] = 0 | 3;
520 regs->iaoq[1] = regs->iaoq[0] + 4;
521 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
522 regs->gr[0] &= ~PSW_B;
523 return;
524 }
525
526 #if 0
527 printk(KERN_CRIT "Interruption # %d\n", code);
528 #endif
529
530 switch(code) {
531
532 case 1:
533 /* High-priority machine check (HPMC) */
534
535 /* set up a new led state on systems shipped with a LED State panel */
536 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
537
538 parisc_terminate("High Priority Machine Check (HPMC)",
539 regs, code, 0);
540 /* NOT REACHED */
541
542 case 2:
543 /* Power failure interrupt */
544 printk(KERN_CRIT "Power failure interrupt !\n");
545 return;
546
547 case 3:
548 /* Recovery counter trap */
549 regs->gr[0] &= ~PSW_R;
550 if (user_space(regs))
551 handle_gdb_break(regs, TRAP_TRACE);
552 /* else this must be the start of a syscall - just let it run */
553 return;
554
555 case 5:
556 /* Low-priority machine check */
557 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
558
559 flush_cache_all();
560 flush_tlb_all();
561 cpu_lpmc(5, regs);
562 return;
563
564 case 6:
565 /* Instruction TLB miss fault/Instruction page fault */
566 fault_address = regs->iaoq[0];
567 fault_space = regs->iasq[0];
568 break;
569
570 case 8:
571 /* Illegal instruction trap */
572 die_if_kernel("Illegal instruction", regs, code);
573 si.si_code = ILL_ILLOPC;
574 goto give_sigill;
575
576 case 9:
577 /* Break instruction trap */
578 handle_break(regs);
579 return;
580
581 case 10:
582 /* Privileged operation trap */
583 die_if_kernel("Privileged operation", regs, code);
584 si.si_code = ILL_PRVOPC;
585 goto give_sigill;
586
587 case 11:
588 /* Privileged register trap */
589 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
590
591 /* This is a MFCTL cr26/cr27 to gr instruction.
592 * PCXS traps on this, so we need to emulate it.
593 */
594
595 if (regs->iir & 0x00200000)
596 regs->gr[regs->iir & 0x1f] = mfctl(27);
597 else
598 regs->gr[regs->iir & 0x1f] = mfctl(26);
599
600 regs->iaoq[0] = regs->iaoq[1];
601 regs->iaoq[1] += 4;
602 regs->iasq[0] = regs->iasq[1];
603 return;
604 }
605
606 die_if_kernel("Privileged register usage", regs, code);
607 si.si_code = ILL_PRVREG;
608 give_sigill:
609 si.si_signo = SIGILL;
610 si.si_errno = 0;
611 si.si_addr = (void __user *) regs->iaoq[0];
612 force_sig_info(SIGILL, &si, current);
613 return;
614
615 case 12:
616 /* Overflow Trap, let the userland signal handler do the cleanup */
617 si.si_signo = SIGFPE;
618 si.si_code = FPE_INTOVF;
619 si.si_addr = (void __user *) regs->iaoq[0];
620 force_sig_info(SIGFPE, &si, current);
621 return;
622
623 case 13:
624 /* Conditional Trap
625 The condition succeeds in an instruction which traps
626 on condition */
627 if(user_mode(regs)){
628 si.si_signo = SIGFPE;
629 /* Set to zero, and let the userspace app figure it out from
630 the insn pointed to by si_addr */
631 si.si_code = 0;
632 si.si_addr = (void __user *) regs->iaoq[0];
633 force_sig_info(SIGFPE, &si, current);
634 return;
635 }
636 /* The kernel doesn't want to handle condition codes */
637 break;
638
639 case 14:
640 /* Assist Exception Trap, i.e. floating point exception. */
641 die_if_kernel("Floating point exception", regs, 0); /* quiet */
642 __inc_irq_stat(irq_fpassist_count);
643 handle_fpe(regs);
644 return;
645
646 case 15:
647 /* Data TLB miss fault/Data page fault */
648 /* Fall through */
649 case 16:
650 /* Non-access instruction TLB miss fault */
651 /* The instruction TLB entry needed for the target address of the FIC
652 is absent, and hardware can't find it, so we get to cleanup */
653 /* Fall through */
654 case 17:
655 /* Non-access data TLB miss fault/Non-access data page fault */
656 /* FIXME:
657 Still need to add slow path emulation code here!
658 If the insn used a non-shadow register, then the tlb
659 handlers could not have their side-effect (e.g. probe
660 writing to a target register) emulated since rfir would
661 erase the changes to said register. Instead we have to
662 setup everything, call this function we are in, and emulate
663 by hand. Technically we need to emulate:
664 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
665 */
666 fault_address = regs->ior;
667 fault_space = regs->isr;
668 break;
669
670 case 18:
671 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
672 /* Check for unaligned access */
673 if (check_unaligned(regs)) {
674 handle_unaligned(regs);
675 return;
676 }
677 /* Fall Through */
678 case 26:
679 /* PCXL: Data memory access rights trap */
680 fault_address = regs->ior;
681 fault_space = regs->isr;
682 break;
683
684 case 19:
685 /* Data memory break trap */
686 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
687 /* fall thru */
688 case 21:
689 /* Page reference trap */
690 handle_gdb_break(regs, TRAP_HWBKPT);
691 return;
692
693 case 25:
694 /* Taken branch trap */
695 regs->gr[0] &= ~PSW_T;
696 if (user_space(regs))
697 handle_gdb_break(regs, TRAP_BRANCH);
698 /* else this must be the start of a syscall - just let it
699 * run.
700 */
701 return;
702
703 case 7:
704 /* Instruction access rights */
705 /* PCXL: Instruction memory protection trap */
706
707 /*
708 * This could be caused by either: 1) a process attempting
709 * to execute within a vma that does not have execute
710 * permission, or 2) an access rights violation caused by a
711 * flush only translation set up by ptep_get_and_clear().
712 * So we check the vma permissions to differentiate the two.
713 * If the vma indicates we have execute permission, then
714 * the cause is the latter one. In this case, we need to
715 * call do_page_fault() to fix the problem.
716 */
717
718 if (user_mode(regs)) {
719 struct vm_area_struct *vma;
720
721 down_read(&current->mm->mmap_sem);
722 vma = find_vma(current->mm,regs->iaoq[0]);
723 if (vma && (regs->iaoq[0] >= vma->vm_start)
724 && (vma->vm_flags & VM_EXEC)) {
725
726 fault_address = regs->iaoq[0];
727 fault_space = regs->iasq[0];
728
729 up_read(&current->mm->mmap_sem);
730 break; /* call do_page_fault() */
731 }
732 up_read(&current->mm->mmap_sem);
733 }
734 /* Fall Through */
735 case 27:
736 /* Data memory protection ID trap */
737 if (code == 27 && !user_mode(regs) &&
738 fixup_exception(regs))
739 return;
740
741 die_if_kernel("Protection id trap", regs, code);
742 si.si_code = SEGV_MAPERR;
743 si.si_signo = SIGSEGV;
744 si.si_errno = 0;
745 if (code == 7)
746 si.si_addr = (void __user *) regs->iaoq[0];
747 else
748 si.si_addr = (void __user *) regs->ior;
749 force_sig_info(SIGSEGV, &si, current);
750 return;
751
752 case 28:
753 /* Unaligned data reference trap */
754 handle_unaligned(regs);
755 return;
756
757 default:
758 if (user_mode(regs)) {
759 parisc_printk_ratelimited(0, regs, KERN_DEBUG
760 "handle_interruption() pid=%d command='%s'\n",
761 task_pid_nr(current), current->comm);
762 /* SIGBUS, for lack of a better one. */
763 si.si_signo = SIGBUS;
764 si.si_code = BUS_OBJERR;
765 si.si_errno = 0;
766 si.si_addr = (void __user *) regs->ior;
767 force_sig_info(SIGBUS, &si, current);
768 return;
769 }
770 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
771
772 parisc_terminate("Unexpected interruption", regs, code, 0);
773 /* NOT REACHED */
774 }
775
776 if (user_mode(regs)) {
777 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
778 parisc_printk_ratelimited(0, regs, KERN_DEBUG
779 "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
780 code, fault_space,
781 task_pid_nr(current), current->comm);
782 si.si_signo = SIGSEGV;
783 si.si_errno = 0;
784 si.si_code = SEGV_MAPERR;
785 si.si_addr = (void __user *) regs->ior;
786 force_sig_info(SIGSEGV, &si, current);
787 return;
788 }
789 }
790 else {
791
792 /*
793 * The kernel should never fault on its own address space,
794 * unless pagefault_disable() was called before.
795 */
796
797 if (fault_space == 0 && !faulthandler_disabled())
798 {
799 /* Clean up and return if in exception table. */
800 if (fixup_exception(regs))
801 return;
802 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
803 parisc_terminate("Kernel Fault", regs, code, fault_address);
804 }
805 }
806
807 do_page_fault(regs, code, fault_address);
808 }
809
810
811 void __init initialize_ivt(const void *iva)
812 {
813 extern u32 os_hpmc_size;
814 extern const u32 os_hpmc[];
815
816 int i;
817 u32 check = 0;
818 u32 *ivap;
819 u32 *hpmcp;
820 u32 length;
821
822 if (strcmp((const char *)iva, "cows can fly"))
823 panic("IVT invalid");
824
825 ivap = (u32 *)iva;
826
827 for (i = 0; i < 8; i++)
828 *ivap++ = 0;
829
830 /* Compute Checksum for HPMC handler */
831 length = os_hpmc_size;
832 ivap[7] = length;
833
834 hpmcp = (u32 *)os_hpmc;
835
836 for (i=0; i<length/4; i++)
837 check += *hpmcp++;
838
839 for (i=0; i<8; i++)
840 check += ivap[i];
841
842 ivap[5] = -check;
843 }
844
845
846 /* early_trap_init() is called before we set up kernel mappings and
847 * write-protect the kernel */
848 void __init early_trap_init(void)
849 {
850 extern const void fault_vector_20;
851
852 #ifndef CONFIG_64BIT
853 extern const void fault_vector_11;
854 initialize_ivt(&fault_vector_11);
855 #endif
856
857 initialize_ivt(&fault_vector_20);
858 }
859
860 void __init trap_init(void)
861 {
862 }