]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/parisc/kernel/traps.c
parisc: fix interruption handler to respect pagefault_disable()
[mirror_ubuntu-zesty-kernel.git] / arch / parisc / kernel / traps.c
1 /*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/bug.h>
28
29 #include <asm/assembly.h>
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <asm/traps.h>
34 #include <asm/unaligned.h>
35 #include <linux/atomic.h>
36 #include <asm/smp.h>
37 #include <asm/pdc.h>
38 #include <asm/pdc_chassis.h>
39 #include <asm/unwind.h>
40 #include <asm/tlbflush.h>
41 #include <asm/cacheflush.h>
42
43 #include "../math-emu/math-emu.h" /* for handle_fpe() */
44
45 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
46 /* dumped to the console via printk) */
47
48 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
49 DEFINE_SPINLOCK(pa_dbit_lock);
50 #endif
51
52 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
53 struct pt_regs *regs);
54
55 static int printbinary(char *buf, unsigned long x, int nbits)
56 {
57 unsigned long mask = 1UL << (nbits - 1);
58 while (mask != 0) {
59 *buf++ = (mask & x ? '1' : '0');
60 mask >>= 1;
61 }
62 *buf = '\0';
63
64 return nbits;
65 }
66
67 #ifdef CONFIG_64BIT
68 #define RFMT "%016lx"
69 #else
70 #define RFMT "%08lx"
71 #endif
72 #define FFMT "%016llx" /* fpregs are 64-bit always */
73
74 #define PRINTREGS(lvl,r,f,fmt,x) \
75 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
76 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
77 (r)[(x)+2], (r)[(x)+3])
78
79 static void print_gr(char *level, struct pt_regs *regs)
80 {
81 int i;
82 char buf[64];
83
84 printk("%s\n", level);
85 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
86 printbinary(buf, regs->gr[0], 32);
87 printk("%sPSW: %s %s\n", level, buf, print_tainted());
88
89 for (i = 0; i < 32; i += 4)
90 PRINTREGS(level, regs->gr, "r", RFMT, i);
91 }
92
93 static void print_fr(char *level, struct pt_regs *regs)
94 {
95 int i;
96 char buf[64];
97 struct { u32 sw[2]; } s;
98
99 /* FR are 64bit everywhere. Need to use asm to get the content
100 * of fpsr/fper1, and we assume that we won't have a FP Identify
101 * in our way, otherwise we're screwed.
102 * The fldd is used to restore the T-bit if there was one, as the
103 * store clears it anyway.
104 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
105 asm volatile ("fstd %%fr0,0(%1) \n\t"
106 "fldd 0(%1),%%fr0 \n\t"
107 : "=m" (s) : "r" (&s) : "r0");
108
109 printk("%s\n", level);
110 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
111 printbinary(buf, s.sw[0], 32);
112 printk("%sFPSR: %s\n", level, buf);
113 printk("%sFPER1: %08x\n", level, s.sw[1]);
114
115 /* here we'll print fr0 again, tho it'll be meaningless */
116 for (i = 0; i < 32; i += 4)
117 PRINTREGS(level, regs->fr, "fr", FFMT, i);
118 }
119
120 void show_regs(struct pt_regs *regs)
121 {
122 int i, user;
123 char *level;
124 unsigned long cr30, cr31;
125
126 user = user_mode(regs);
127 level = user ? KERN_DEBUG : KERN_CRIT;
128
129 show_regs_print_info(level);
130
131 print_gr(level, regs);
132
133 for (i = 0; i < 8; i += 4)
134 PRINTREGS(level, regs->sr, "sr", RFMT, i);
135
136 if (user)
137 print_fr(level, regs);
138
139 cr30 = mfctl(30);
140 cr31 = mfctl(31);
141 printk("%s\n", level);
142 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
143 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
144 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
145 level, regs->iir, regs->isr, regs->ior);
146 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
147 level, current_thread_info()->cpu, cr30, cr31);
148 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
149
150 if (user) {
151 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
152 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
153 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
154 } else {
155 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
156 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
157 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
158
159 parisc_show_stack(current, NULL, regs);
160 }
161 }
162
163 static void do_show_stack(struct unwind_frame_info *info)
164 {
165 int i = 1;
166
167 printk(KERN_CRIT "Backtrace:\n");
168 while (i <= 16) {
169 if (unwind_once(info) < 0 || info->ip == 0)
170 break;
171
172 if (__kernel_text_address(info->ip)) {
173 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
174 info->ip, (void *) info->ip);
175 i++;
176 }
177 }
178 printk(KERN_CRIT "\n");
179 }
180
181 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
182 struct pt_regs *regs)
183 {
184 struct unwind_frame_info info;
185 struct task_struct *t;
186
187 t = task ? task : current;
188 if (regs) {
189 unwind_frame_init(&info, t, regs);
190 goto show_stack;
191 }
192
193 if (t == current) {
194 unsigned long sp;
195
196 HERE:
197 asm volatile ("copy %%r30, %0" : "=r"(sp));
198 {
199 struct pt_regs r;
200
201 memset(&r, 0, sizeof(struct pt_regs));
202 r.iaoq[0] = (unsigned long)&&HERE;
203 r.gr[2] = (unsigned long)__builtin_return_address(0);
204 r.gr[30] = sp;
205
206 unwind_frame_init(&info, current, &r);
207 }
208 } else {
209 unwind_frame_init_from_blocked_task(&info, t);
210 }
211
212 show_stack:
213 do_show_stack(&info);
214 }
215
216 void show_stack(struct task_struct *t, unsigned long *sp)
217 {
218 return parisc_show_stack(t, sp, NULL);
219 }
220
221 int is_valid_bugaddr(unsigned long iaoq)
222 {
223 return 1;
224 }
225
226 void die_if_kernel(char *str, struct pt_regs *regs, long err)
227 {
228 if (user_mode(regs)) {
229 if (err == 0)
230 return; /* STFU */
231
232 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
233 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
234 #ifdef PRINT_USER_FAULTS
235 /* XXX for debugging only */
236 show_regs(regs);
237 #endif
238 return;
239 }
240
241 oops_in_progress = 1;
242
243 oops_enter();
244
245 /* Amuse the user in a SPARC fashion */
246 if (err) printk(KERN_CRIT
247 " _______________________________ \n"
248 " < Your System ate a SPARC! Gah! >\n"
249 " ------------------------------- \n"
250 " \\ ^__^\n"
251 " (__)\\ )\\/\\\n"
252 " U ||----w |\n"
253 " || ||\n");
254
255 /* unlock the pdc lock if necessary */
256 pdc_emergency_unlock();
257
258 /* maybe the kernel hasn't booted very far yet and hasn't been able
259 * to initialize the serial or STI console. In that case we should
260 * re-enable the pdc console, so that the user will be able to
261 * identify the problem. */
262 if (!console_drivers)
263 pdc_console_restart();
264
265 if (err)
266 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
267 current->comm, task_pid_nr(current), str, err);
268
269 /* Wot's wrong wif bein' racy? */
270 if (current->thread.flags & PARISC_KERNEL_DEATH) {
271 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
272 local_irq_enable();
273 while (1);
274 }
275 current->thread.flags |= PARISC_KERNEL_DEATH;
276
277 show_regs(regs);
278 dump_stack();
279 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
280
281 if (in_interrupt())
282 panic("Fatal exception in interrupt");
283
284 if (panic_on_oops) {
285 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
286 ssleep(5);
287 panic("Fatal exception");
288 }
289
290 oops_exit();
291 do_exit(SIGSEGV);
292 }
293
294 /* gdb uses break 4,8 */
295 #define GDB_BREAK_INSN 0x10004
296 static void handle_gdb_break(struct pt_regs *regs, int wot)
297 {
298 struct siginfo si;
299
300 si.si_signo = SIGTRAP;
301 si.si_errno = 0;
302 si.si_code = wot;
303 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
304 force_sig_info(SIGTRAP, &si, current);
305 }
306
307 static void handle_break(struct pt_regs *regs)
308 {
309 unsigned iir = regs->iir;
310
311 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
312 /* check if a BUG() or WARN() trapped here. */
313 enum bug_trap_type tt;
314 tt = report_bug(regs->iaoq[0] & ~3, regs);
315 if (tt == BUG_TRAP_TYPE_WARN) {
316 regs->iaoq[0] += 4;
317 regs->iaoq[1] += 4;
318 return; /* return to next instruction when WARN_ON(). */
319 }
320 die_if_kernel("Unknown kernel breakpoint", regs,
321 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
322 }
323
324 #ifdef PRINT_USER_FAULTS
325 if (unlikely(iir != GDB_BREAK_INSN)) {
326 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
327 iir & 31, (iir>>13) & ((1<<13)-1),
328 task_pid_nr(current), current->comm);
329 show_regs(regs);
330 }
331 #endif
332
333 /* send standard GDB signal */
334 handle_gdb_break(regs, TRAP_BRKPT);
335 }
336
337 static void default_trap(int code, struct pt_regs *regs)
338 {
339 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
340 show_regs(regs);
341 }
342
343 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
344
345
346 void transfer_pim_to_trap_frame(struct pt_regs *regs)
347 {
348 register int i;
349 extern unsigned int hpmc_pim_data[];
350 struct pdc_hpmc_pim_11 *pim_narrow;
351 struct pdc_hpmc_pim_20 *pim_wide;
352
353 if (boot_cpu_data.cpu_type >= pcxu) {
354
355 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
356
357 /*
358 * Note: The following code will probably generate a
359 * bunch of truncation error warnings from the compiler.
360 * Could be handled with an ifdef, but perhaps there
361 * is a better way.
362 */
363
364 regs->gr[0] = pim_wide->cr[22];
365
366 for (i = 1; i < 32; i++)
367 regs->gr[i] = pim_wide->gr[i];
368
369 for (i = 0; i < 32; i++)
370 regs->fr[i] = pim_wide->fr[i];
371
372 for (i = 0; i < 8; i++)
373 regs->sr[i] = pim_wide->sr[i];
374
375 regs->iasq[0] = pim_wide->cr[17];
376 regs->iasq[1] = pim_wide->iasq_back;
377 regs->iaoq[0] = pim_wide->cr[18];
378 regs->iaoq[1] = pim_wide->iaoq_back;
379
380 regs->sar = pim_wide->cr[11];
381 regs->iir = pim_wide->cr[19];
382 regs->isr = pim_wide->cr[20];
383 regs->ior = pim_wide->cr[21];
384 }
385 else {
386 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
387
388 regs->gr[0] = pim_narrow->cr[22];
389
390 for (i = 1; i < 32; i++)
391 regs->gr[i] = pim_narrow->gr[i];
392
393 for (i = 0; i < 32; i++)
394 regs->fr[i] = pim_narrow->fr[i];
395
396 for (i = 0; i < 8; i++)
397 regs->sr[i] = pim_narrow->sr[i];
398
399 regs->iasq[0] = pim_narrow->cr[17];
400 regs->iasq[1] = pim_narrow->iasq_back;
401 regs->iaoq[0] = pim_narrow->cr[18];
402 regs->iaoq[1] = pim_narrow->iaoq_back;
403
404 regs->sar = pim_narrow->cr[11];
405 regs->iir = pim_narrow->cr[19];
406 regs->isr = pim_narrow->cr[20];
407 regs->ior = pim_narrow->cr[21];
408 }
409
410 /*
411 * The following fields only have meaning if we came through
412 * another path. So just zero them here.
413 */
414
415 regs->ksp = 0;
416 regs->kpc = 0;
417 regs->orig_r28 = 0;
418 }
419
420
421 /*
422 * This routine is called as a last resort when everything else
423 * has gone clearly wrong. We get called for faults in kernel space,
424 * and HPMC's.
425 */
426 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
427 {
428 static DEFINE_SPINLOCK(terminate_lock);
429
430 oops_in_progress = 1;
431
432 set_eiem(0);
433 local_irq_disable();
434 spin_lock(&terminate_lock);
435
436 /* unlock the pdc lock if necessary */
437 pdc_emergency_unlock();
438
439 /* restart pdc console if necessary */
440 if (!console_drivers)
441 pdc_console_restart();
442
443 /* Not all paths will gutter the processor... */
444 switch(code){
445
446 case 1:
447 transfer_pim_to_trap_frame(regs);
448 break;
449
450 default:
451 /* Fall through */
452 break;
453
454 }
455
456 {
457 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
458 struct unwind_frame_info info;
459 unwind_frame_init(&info, current, regs);
460 do_show_stack(&info);
461 }
462
463 printk("\n");
464 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
465 msg, code, regs, offset);
466 show_regs(regs);
467
468 spin_unlock(&terminate_lock);
469
470 /* put soft power button back under hardware control;
471 * if the user had pressed it once at any time, the
472 * system will shut down immediately right here. */
473 pdc_soft_power_button(0);
474
475 /* Call kernel panic() so reboot timeouts work properly
476 * FIXME: This function should be on the list of
477 * panic notifiers, and we should call panic
478 * directly from the location that we wish.
479 * e.g. We should not call panic from
480 * parisc_terminate, but rather the oter way around.
481 * This hack works, prints the panic message twice,
482 * and it enables reboot timers!
483 */
484 panic(msg);
485 }
486
487 void notrace handle_interruption(int code, struct pt_regs *regs)
488 {
489 unsigned long fault_address = 0;
490 unsigned long fault_space = 0;
491 struct siginfo si;
492
493 if (code == 1)
494 pdc_console_restart(); /* switch back to pdc if HPMC */
495 else
496 local_irq_enable();
497
498 /* Security check:
499 * If the priority level is still user, and the
500 * faulting space is not equal to the active space
501 * then the user is attempting something in a space
502 * that does not belong to them. Kill the process.
503 *
504 * This is normally the situation when the user
505 * attempts to jump into the kernel space at the
506 * wrong offset, be it at the gateway page or a
507 * random location.
508 *
509 * We cannot normally signal the process because it
510 * could *be* on the gateway page, and processes
511 * executing on the gateway page can't have signals
512 * delivered.
513 *
514 * We merely readjust the address into the users
515 * space, at a destination address of zero, and
516 * allow processing to continue.
517 */
518 if (((unsigned long)regs->iaoq[0] & 3) &&
519 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
520 /* Kill the user process later */
521 regs->iaoq[0] = 0 | 3;
522 regs->iaoq[1] = regs->iaoq[0] + 4;
523 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
524 regs->gr[0] &= ~PSW_B;
525 return;
526 }
527
528 #if 0
529 printk(KERN_CRIT "Interruption # %d\n", code);
530 #endif
531
532 switch(code) {
533
534 case 1:
535 /* High-priority machine check (HPMC) */
536
537 /* set up a new led state on systems shipped with a LED State panel */
538 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
539
540 parisc_terminate("High Priority Machine Check (HPMC)",
541 regs, code, 0);
542 /* NOT REACHED */
543
544 case 2:
545 /* Power failure interrupt */
546 printk(KERN_CRIT "Power failure interrupt !\n");
547 return;
548
549 case 3:
550 /* Recovery counter trap */
551 regs->gr[0] &= ~PSW_R;
552 if (user_space(regs))
553 handle_gdb_break(regs, TRAP_TRACE);
554 /* else this must be the start of a syscall - just let it run */
555 return;
556
557 case 5:
558 /* Low-priority machine check */
559 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
560
561 flush_cache_all();
562 flush_tlb_all();
563 cpu_lpmc(5, regs);
564 return;
565
566 case 6:
567 /* Instruction TLB miss fault/Instruction page fault */
568 fault_address = regs->iaoq[0];
569 fault_space = regs->iasq[0];
570 break;
571
572 case 8:
573 /* Illegal instruction trap */
574 die_if_kernel("Illegal instruction", regs, code);
575 si.si_code = ILL_ILLOPC;
576 goto give_sigill;
577
578 case 9:
579 /* Break instruction trap */
580 handle_break(regs);
581 return;
582
583 case 10:
584 /* Privileged operation trap */
585 die_if_kernel("Privileged operation", regs, code);
586 si.si_code = ILL_PRVOPC;
587 goto give_sigill;
588
589 case 11:
590 /* Privileged register trap */
591 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
592
593 /* This is a MFCTL cr26/cr27 to gr instruction.
594 * PCXS traps on this, so we need to emulate it.
595 */
596
597 if (regs->iir & 0x00200000)
598 regs->gr[regs->iir & 0x1f] = mfctl(27);
599 else
600 regs->gr[regs->iir & 0x1f] = mfctl(26);
601
602 regs->iaoq[0] = regs->iaoq[1];
603 regs->iaoq[1] += 4;
604 regs->iasq[0] = regs->iasq[1];
605 return;
606 }
607
608 die_if_kernel("Privileged register usage", regs, code);
609 si.si_code = ILL_PRVREG;
610 give_sigill:
611 si.si_signo = SIGILL;
612 si.si_errno = 0;
613 si.si_addr = (void __user *) regs->iaoq[0];
614 force_sig_info(SIGILL, &si, current);
615 return;
616
617 case 12:
618 /* Overflow Trap, let the userland signal handler do the cleanup */
619 si.si_signo = SIGFPE;
620 si.si_code = FPE_INTOVF;
621 si.si_addr = (void __user *) regs->iaoq[0];
622 force_sig_info(SIGFPE, &si, current);
623 return;
624
625 case 13:
626 /* Conditional Trap
627 The condition succeeds in an instruction which traps
628 on condition */
629 if(user_mode(regs)){
630 si.si_signo = SIGFPE;
631 /* Set to zero, and let the userspace app figure it out from
632 the insn pointed to by si_addr */
633 si.si_code = 0;
634 si.si_addr = (void __user *) regs->iaoq[0];
635 force_sig_info(SIGFPE, &si, current);
636 return;
637 }
638 /* The kernel doesn't want to handle condition codes */
639 break;
640
641 case 14:
642 /* Assist Exception Trap, i.e. floating point exception. */
643 die_if_kernel("Floating point exception", regs, 0); /* quiet */
644 __inc_irq_stat(irq_fpassist_count);
645 handle_fpe(regs);
646 return;
647
648 case 15:
649 /* Data TLB miss fault/Data page fault */
650 /* Fall through */
651 case 16:
652 /* Non-access instruction TLB miss fault */
653 /* The instruction TLB entry needed for the target address of the FIC
654 is absent, and hardware can't find it, so we get to cleanup */
655 /* Fall through */
656 case 17:
657 /* Non-access data TLB miss fault/Non-access data page fault */
658 /* FIXME:
659 Still need to add slow path emulation code here!
660 If the insn used a non-shadow register, then the tlb
661 handlers could not have their side-effect (e.g. probe
662 writing to a target register) emulated since rfir would
663 erase the changes to said register. Instead we have to
664 setup everything, call this function we are in, and emulate
665 by hand. Technically we need to emulate:
666 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
667 */
668 fault_address = regs->ior;
669 fault_space = regs->isr;
670 break;
671
672 case 18:
673 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
674 /* Check for unaligned access */
675 if (check_unaligned(regs)) {
676 handle_unaligned(regs);
677 return;
678 }
679 /* Fall Through */
680 case 26:
681 /* PCXL: Data memory access rights trap */
682 fault_address = regs->ior;
683 fault_space = regs->isr;
684 break;
685
686 case 19:
687 /* Data memory break trap */
688 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
689 /* fall thru */
690 case 21:
691 /* Page reference trap */
692 handle_gdb_break(regs, TRAP_HWBKPT);
693 return;
694
695 case 25:
696 /* Taken branch trap */
697 regs->gr[0] &= ~PSW_T;
698 if (user_space(regs))
699 handle_gdb_break(regs, TRAP_BRANCH);
700 /* else this must be the start of a syscall - just let it
701 * run.
702 */
703 return;
704
705 case 7:
706 /* Instruction access rights */
707 /* PCXL: Instruction memory protection trap */
708
709 /*
710 * This could be caused by either: 1) a process attempting
711 * to execute within a vma that does not have execute
712 * permission, or 2) an access rights violation caused by a
713 * flush only translation set up by ptep_get_and_clear().
714 * So we check the vma permissions to differentiate the two.
715 * If the vma indicates we have execute permission, then
716 * the cause is the latter one. In this case, we need to
717 * call do_page_fault() to fix the problem.
718 */
719
720 if (user_mode(regs)) {
721 struct vm_area_struct *vma;
722
723 down_read(&current->mm->mmap_sem);
724 vma = find_vma(current->mm,regs->iaoq[0]);
725 if (vma && (regs->iaoq[0] >= vma->vm_start)
726 && (vma->vm_flags & VM_EXEC)) {
727
728 fault_address = regs->iaoq[0];
729 fault_space = regs->iasq[0];
730
731 up_read(&current->mm->mmap_sem);
732 break; /* call do_page_fault() */
733 }
734 up_read(&current->mm->mmap_sem);
735 }
736 /* Fall Through */
737 case 27:
738 /* Data memory protection ID trap */
739 if (code == 27 && !user_mode(regs) &&
740 fixup_exception(regs))
741 return;
742
743 die_if_kernel("Protection id trap", regs, code);
744 si.si_code = SEGV_MAPERR;
745 si.si_signo = SIGSEGV;
746 si.si_errno = 0;
747 if (code == 7)
748 si.si_addr = (void __user *) regs->iaoq[0];
749 else
750 si.si_addr = (void __user *) regs->ior;
751 force_sig_info(SIGSEGV, &si, current);
752 return;
753
754 case 28:
755 /* Unaligned data reference trap */
756 handle_unaligned(regs);
757 return;
758
759 default:
760 if (user_mode(regs)) {
761 #ifdef PRINT_USER_FAULTS
762 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
763 task_pid_nr(current), current->comm);
764 show_regs(regs);
765 #endif
766 /* SIGBUS, for lack of a better one. */
767 si.si_signo = SIGBUS;
768 si.si_code = BUS_OBJERR;
769 si.si_errno = 0;
770 si.si_addr = (void __user *) regs->ior;
771 force_sig_info(SIGBUS, &si, current);
772 return;
773 }
774 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
775
776 parisc_terminate("Unexpected interruption", regs, code, 0);
777 /* NOT REACHED */
778 }
779
780 if (user_mode(regs)) {
781 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
782 #ifdef PRINT_USER_FAULTS
783 if (fault_space == 0)
784 printk(KERN_DEBUG "User Fault on Kernel Space ");
785 else
786 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
787 code);
788 printk(KERN_CONT "pid=%d command='%s'\n",
789 task_pid_nr(current), current->comm);
790 show_regs(regs);
791 #endif
792 si.si_signo = SIGSEGV;
793 si.si_errno = 0;
794 si.si_code = SEGV_MAPERR;
795 si.si_addr = (void __user *) regs->ior;
796 force_sig_info(SIGSEGV, &si, current);
797 return;
798 }
799 }
800 else {
801
802 /*
803 * The kernel should never fault on its own address space,
804 * unless pagefault_disable() was called before.
805 */
806
807 if (fault_space == 0 && !in_atomic())
808 {
809 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
810 parisc_terminate("Kernel Fault", regs, code, fault_address);
811 }
812 }
813
814 do_page_fault(regs, code, fault_address);
815 }
816
817
818 int __init check_ivt(void *iva)
819 {
820 extern u32 os_hpmc_size;
821 extern const u32 os_hpmc[];
822
823 int i;
824 u32 check = 0;
825 u32 *ivap;
826 u32 *hpmcp;
827 u32 length;
828
829 if (strcmp((char *)iva, "cows can fly"))
830 return -1;
831
832 ivap = (u32 *)iva;
833
834 for (i = 0; i < 8; i++)
835 *ivap++ = 0;
836
837 /* Compute Checksum for HPMC handler */
838 length = os_hpmc_size;
839 ivap[7] = length;
840
841 hpmcp = (u32 *)os_hpmc;
842
843 for (i=0; i<length/4; i++)
844 check += *hpmcp++;
845
846 for (i=0; i<8; i++)
847 check += ivap[i];
848
849 ivap[5] = -check;
850
851 return 0;
852 }
853
854 #ifndef CONFIG_64BIT
855 extern const void fault_vector_11;
856 #endif
857 extern const void fault_vector_20;
858
859 void __init trap_init(void)
860 {
861 void *iva;
862
863 if (boot_cpu_data.cpu_type >= pcxu)
864 iva = (void *) &fault_vector_20;
865 else
866 #ifdef CONFIG_64BIT
867 panic("Can't boot 64-bit OS on PA1.1 processor!");
868 #else
869 iva = (void *) &fault_vector_11;
870 #endif
871
872 if (check_ivt(iva))
873 panic("IVT invalid");
874 }