]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/parisc/kernel/traps.c
[PARISC] Fix show_stack() when we can't kmalloc
[mirror_ubuntu-bionic-kernel.git] / arch / parisc / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8/*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
1da177e4
LT
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/timer.h>
22fced88 19#include <linux/delay.h>
1da177e4
LT
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/spinlock.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/console.h>
28#include <linux/kallsyms.h>
29
30#include <asm/assembly.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/traps.h>
36#include <asm/unaligned.h>
37#include <asm/atomic.h>
38#include <asm/smp.h>
39#include <asm/pdc.h>
40#include <asm/pdc_chassis.h>
41#include <asm/unwind.h>
d6ce8626
RC
42#include <asm/tlbflush.h>
43#include <asm/cacheflush.h>
1da177e4
LT
44
45#include "../math-emu/math-emu.h" /* for handle_fpe() */
46
47#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
48 /* dumped to the console via printk) */
49
50#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
51DEFINE_SPINLOCK(pa_dbit_lock);
52#endif
53
54int printbinary(char *buf, unsigned long x, int nbits)
55{
56 unsigned long mask = 1UL << (nbits - 1);
57 while (mask != 0) {
58 *buf++ = (mask & x ? '1' : '0');
59 mask >>= 1;
60 }
61 *buf = '\0';
62
63 return nbits;
64}
65
66#ifdef __LP64__
67#define RFMT "%016lx"
68#else
69#define RFMT "%08lx"
70#endif
1c63b4b8 71#define FFMT "%016llx" /* fpregs are 64-bit always */
1da177e4 72
1c63b4b8
KM
73#define PRINTREGS(lvl,r,f,fmt,x) \
74 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
75 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
76 (r)[(x)+2], (r)[(x)+3])
77
78static void print_gr(char *level, struct pt_regs *regs)
1da177e4
LT
79{
80 int i;
1c63b4b8 81 char buf[64];
1da177e4 82
1c63b4b8 83 printk("%s\n", level);
1da177e4
LT
84 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
85 printbinary(buf, regs->gr[0], 32);
86 printk("%sPSW: %s %s\n", level, buf, print_tainted());
87
1c63b4b8
KM
88 for (i = 0; i < 32; i += 4)
89 PRINTREGS(level, regs->gr, "r", RFMT, i);
90}
1da177e4 91
1c63b4b8
KM
92static void print_fr(char *level, struct pt_regs *regs)
93{
94 int i;
95 char buf[64];
96 struct { u32 sw[2]; } s;
1da177e4 97
eba91727
TV
98 /* FR are 64bit everywhere. Need to use asm to get the content
99 * of fpsr/fper1, and we assume that we won't have a FP Identify
100 * in our way, otherwise we're screwed.
101 * The fldd is used to restore the T-bit if there was one, as the
102 * store clears it anyway.
1c63b4b8
KM
103 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
104 asm volatile ("fstd %%fr0,0(%1) \n\t"
105 "fldd 0(%1),%%fr0 \n\t"
106 : "=m" (s) : "r" (&s) : "r0");
eba91727
TV
107
108 printk("%s\n", level);
109 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
110 printbinary(buf, s.sw[0], 32);
111 printk("%sFPSR: %s\n", level, buf);
112 printk("%sFPER1: %08x\n", level, s.sw[1]);
113
114 /* here we'll print fr0 again, tho it'll be meaningless */
1c63b4b8
KM
115 for (i = 0; i < 32; i += 4)
116 PRINTREGS(level, regs->fr, "fr", FFMT, i);
117}
118
119void show_regs(struct pt_regs *regs)
120{
121 int i;
122 char *level;
123 unsigned long cr30, cr31;
124
125 level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
126
127 print_gr(level, regs);
128
129 for (i = 0; i < 8; i += 4)
130 PRINTREGS(level, regs->sr, "sr", RFMT, i);
131
132 if (user_mode(regs))
133 print_fr(level, regs);
1da177e4
LT
134
135 cr30 = mfctl(30);
136 cr31 = mfctl(31);
137 printk("%s\n", level);
138 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
139 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
140 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
141 level, regs->iir, regs->isr, regs->ior);
142 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
143 level, current_thread_info()->cpu, cr30, cr31);
144 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
145 printk(level);
146 print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
147 printk(level);
148 print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
149 printk(level);
150 print_symbol(" RP(r2): %s\n", regs->gr[2]);
151}
152
153
154void dump_stack(void)
155{
156 show_stack(NULL, NULL);
157}
158
159EXPORT_SYMBOL(dump_stack);
160
161static void do_show_stack(struct unwind_frame_info *info)
162{
163 int i = 1;
164
165 printk("Backtrace:\n");
166 while (i <= 16) {
167 if (unwind_once(info) < 0 || info->ip == 0)
168 break;
169
170 if (__kernel_text_address(info->ip)) {
171 printk(" [<" RFMT ">] ", info->ip);
172#ifdef CONFIG_KALLSYMS
173 print_symbol("%s\n", info->ip);
174#else
175 if ((i & 0x03) == 0)
176 printk("\n");
177#endif
178 i++;
179 }
180 }
181 printk("\n");
182}
183
184void show_stack(struct task_struct *task, unsigned long *s)
185{
186 struct unwind_frame_info info;
187
188 if (!task) {
189 unsigned long sp;
1da177e4
LT
190
191HERE:
192 asm volatile ("copy %%r30, %0" : "=r"(sp));
9f15c826
MW
193 {
194 struct pt_regs r;
195
196 memset(&r, 0, sizeof(struct pt_regs));
197 r.iaoq[0] = (unsigned long)&&HERE;
198 r.gr[2] = (unsigned long)__builtin_return_address(0);
199 r.gr[30] = sp;
200
201 unwind_frame_init(&info, current, &r);
202 }
1da177e4
LT
203 } else {
204 unwind_frame_init_from_blocked_task(&info, task);
205 }
206
207 do_show_stack(&info);
208}
209
210void die_if_kernel(char *str, struct pt_regs *regs, long err)
211{
212 if (user_mode(regs)) {
213 if (err == 0)
214 return; /* STFU */
215
216 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
217 current->comm, current->pid, str, err, regs->iaoq[0]);
218#ifdef PRINT_USER_FAULTS
219 /* XXX for debugging only */
220 show_regs(regs);
221#endif
222 return;
223 }
224
225 oops_in_progress = 1;
226
227 /* Amuse the user in a SPARC fashion */
228 printk(
229" _______________________________ \n"
230" < Your System ate a SPARC! Gah! >\n"
231" ------------------------------- \n"
232" \\ ^__^\n"
233" \\ (xx)\\_______\n"
234" (__)\\ )\\/\\\n"
235" U ||----w |\n"
236" || ||\n");
237
238 /* unlock the pdc lock if necessary */
239 pdc_emergency_unlock();
240
241 /* maybe the kernel hasn't booted very far yet and hasn't been able
242 * to initialize the serial or STI console. In that case we should
243 * re-enable the pdc console, so that the user will be able to
244 * identify the problem. */
245 if (!console_drivers)
246 pdc_console_restart();
247
248 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
249 current->comm, current->pid, str, err);
250 show_regs(regs);
251
22fced88
HD
252 if (in_interrupt())
253 panic("Fatal exception in interrupt");
254
255 if (panic_on_oops) {
256 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
257 ssleep(5);
258 panic("Fatal exception");
259 }
260
1da177e4
LT
261 /* Wot's wrong wif bein' racy? */
262 if (current->thread.flags & PARISC_KERNEL_DEATH) {
263 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
264 local_irq_enable();
265 while (1);
266 }
267
268 current->thread.flags |= PARISC_KERNEL_DEATH;
269 do_exit(SIGSEGV);
270}
271
272int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
273{
274 return syscall(regs);
275}
276
277/* gdb uses break 4,8 */
278#define GDB_BREAK_INSN 0x10004
279void handle_gdb_break(struct pt_regs *regs, int wot)
280{
281 struct siginfo si;
282
283 si.si_code = wot;
284 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
285 si.si_signo = SIGTRAP;
286 si.si_errno = 0;
287 force_sig_info(SIGTRAP, &si, current);
288}
289
290void handle_break(unsigned iir, struct pt_regs *regs)
291{
292 struct siginfo si;
293
294 switch(iir) {
295 case 0x00:
296#ifdef PRINT_USER_FAULTS
297 printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
298 current->pid, current->comm);
299#endif
300 die_if_kernel("Breakpoint", regs, 0);
301#ifdef PRINT_USER_FAULTS
302 show_regs(regs);
303#endif
304 si.si_code = TRAP_BRKPT;
305 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
306 si.si_signo = SIGTRAP;
307 force_sig_info(SIGTRAP, &si, current);
308 break;
309
310 case GDB_BREAK_INSN:
311 die_if_kernel("Breakpoint", regs, 0);
312 handle_gdb_break(regs, TRAP_BRKPT);
313 break;
314
315 default:
316#ifdef PRINT_USER_FAULTS
317 printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
318 iir, current->pid, current->comm);
319 show_regs(regs);
320#endif
321 si.si_signo = SIGTRAP;
322 si.si_code = TRAP_BRKPT;
323 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
324 force_sig_info(SIGTRAP, &si, current);
325 return;
326 }
327}
328
329
330int handle_toc(void)
331{
332 printk(KERN_CRIT "TOC call.\n");
333 return 0;
334}
335
336static void default_trap(int code, struct pt_regs *regs)
337{
338 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
339 show_regs(regs);
340}
341
342void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
343
344
345void transfer_pim_to_trap_frame(struct pt_regs *regs)
346{
347 register int i;
348 extern unsigned int hpmc_pim_data[];
349 struct pdc_hpmc_pim_11 *pim_narrow;
350 struct pdc_hpmc_pim_20 *pim_wide;
351
352 if (boot_cpu_data.cpu_type >= pcxu) {
353
354 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
355
356 /*
357 * Note: The following code will probably generate a
358 * bunch of truncation error warnings from the compiler.
359 * Could be handled with an ifdef, but perhaps there
360 * is a better way.
361 */
362
363 regs->gr[0] = pim_wide->cr[22];
364
365 for (i = 1; i < 32; i++)
366 regs->gr[i] = pim_wide->gr[i];
367
368 for (i = 0; i < 32; i++)
369 regs->fr[i] = pim_wide->fr[i];
370
371 for (i = 0; i < 8; i++)
372 regs->sr[i] = pim_wide->sr[i];
373
374 regs->iasq[0] = pim_wide->cr[17];
375 regs->iasq[1] = pim_wide->iasq_back;
376 regs->iaoq[0] = pim_wide->cr[18];
377 regs->iaoq[1] = pim_wide->iaoq_back;
378
379 regs->sar = pim_wide->cr[11];
380 regs->iir = pim_wide->cr[19];
381 regs->isr = pim_wide->cr[20];
382 regs->ior = pim_wide->cr[21];
383 }
384 else {
385 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
386
387 regs->gr[0] = pim_narrow->cr[22];
388
389 for (i = 1; i < 32; i++)
390 regs->gr[i] = pim_narrow->gr[i];
391
392 for (i = 0; i < 32; i++)
393 regs->fr[i] = pim_narrow->fr[i];
394
395 for (i = 0; i < 8; i++)
396 regs->sr[i] = pim_narrow->sr[i];
397
398 regs->iasq[0] = pim_narrow->cr[17];
399 regs->iasq[1] = pim_narrow->iasq_back;
400 regs->iaoq[0] = pim_narrow->cr[18];
401 regs->iaoq[1] = pim_narrow->iaoq_back;
402
403 regs->sar = pim_narrow->cr[11];
404 regs->iir = pim_narrow->cr[19];
405 regs->isr = pim_narrow->cr[20];
406 regs->ior = pim_narrow->cr[21];
407 }
408
409 /*
410 * The following fields only have meaning if we came through
411 * another path. So just zero them here.
412 */
413
414 regs->ksp = 0;
415 regs->kpc = 0;
416 regs->orig_r28 = 0;
417}
418
419
420/*
421 * This routine is called as a last resort when everything else
422 * has gone clearly wrong. We get called for faults in kernel space,
423 * and HPMC's.
424 */
425void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
426{
427 static DEFINE_SPINLOCK(terminate_lock);
428
429 oops_in_progress = 1;
430
431 set_eiem(0);
432 local_irq_disable();
433 spin_lock(&terminate_lock);
434
435 /* unlock the pdc lock if necessary */
436 pdc_emergency_unlock();
437
438 /* restart pdc console if necessary */
439 if (!console_drivers)
440 pdc_console_restart();
441
442 /* Not all paths will gutter the processor... */
443 switch(code){
444
445 case 1:
446 transfer_pim_to_trap_frame(regs);
447 break;
448
449 default:
450 /* Fall through */
451 break;
452
453 }
454
455 {
456 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
457 struct unwind_frame_info info;
458 unwind_frame_init(&info, current, regs);
459 do_show_stack(&info);
460 }
461
462 printk("\n");
463 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
464 msg, code, regs, offset);
465 show_regs(regs);
466
467 spin_unlock(&terminate_lock);
468
469 /* put soft power button back under hardware control;
470 * if the user had pressed it once at any time, the
471 * system will shut down immediately right here. */
472 pdc_soft_power_button(0);
473
474 /* Call kernel panic() so reboot timeouts work properly
475 * FIXME: This function should be on the list of
476 * panic notifiers, and we should call panic
477 * directly from the location that we wish.
478 * e.g. We should not call panic from
479 * parisc_terminate, but rather the oter way around.
480 * This hack works, prints the panic message twice,
481 * and it enables reboot timers!
482 */
483 panic(msg);
484}
485
486void handle_interruption(int code, struct pt_regs *regs)
487{
488 unsigned long fault_address = 0;
489 unsigned long fault_space = 0;
490 struct siginfo si;
491
492 if (code == 1)
493 pdc_console_restart(); /* switch back to pdc if HPMC */
494 else
495 local_irq_enable();
496
497 /* Security check:
498 * If the priority level is still user, and the
499 * faulting space is not equal to the active space
500 * then the user is attempting something in a space
501 * that does not belong to them. Kill the process.
502 *
503 * This is normally the situation when the user
504 * attempts to jump into the kernel space at the
505 * wrong offset, be it at the gateway page or a
506 * random location.
507 *
508 * We cannot normally signal the process because it
509 * could *be* on the gateway page, and processes
510 * executing on the gateway page can't have signals
511 * delivered.
512 *
513 * We merely readjust the address into the users
514 * space, at a destination address of zero, and
515 * allow processing to continue.
516 */
517 if (((unsigned long)regs->iaoq[0] & 3) &&
518 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
519 /* Kill the user process later */
520 regs->iaoq[0] = 0 | 3;
521 regs->iaoq[1] = regs->iaoq[0] + 4;
522 regs->iasq[0] = regs->iasq[0] = regs->sr[7];
523 regs->gr[0] &= ~PSW_B;
524 return;
525 }
526
527#if 0
528 printk(KERN_CRIT "Interruption # %d\n", code);
529#endif
530
531 switch(code) {
532
533 case 1:
534 /* High-priority machine check (HPMC) */
535
536 /* set up a new led state on systems shipped with a LED State panel */
537 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
538
539 parisc_terminate("High Priority Machine Check (HPMC)",
540 regs, code, 0);
541 /* NOT REACHED */
542
543 case 2:
544 /* Power failure interrupt */
545 printk(KERN_CRIT "Power failure interrupt !\n");
546 return;
547
548 case 3:
549 /* Recovery counter trap */
550 regs->gr[0] &= ~PSW_R;
551 if (user_space(regs))
552 handle_gdb_break(regs, TRAP_TRACE);
553 /* else this must be the start of a syscall - just let it run */
554 return;
555
556 case 5:
557 /* Low-priority machine check */
558 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
559
d6ce8626
RC
560 flush_cache_all();
561 flush_tlb_all();
1da177e4
LT
562 cpu_lpmc(5, regs);
563 return;
564
565 case 6:
566 /* Instruction TLB miss fault/Instruction page fault */
567 fault_address = regs->iaoq[0];
568 fault_space = regs->iasq[0];
569 break;
570
571 case 8:
572 /* Illegal instruction trap */
573 die_if_kernel("Illegal instruction", regs, code);
574 si.si_code = ILL_ILLOPC;
575 goto give_sigill;
576
577 case 9:
578 /* Break instruction trap */
579 handle_break(regs->iir,regs);
580 return;
581
582 case 10:
583 /* Privileged operation trap */
584 die_if_kernel("Privileged operation", regs, code);
585 si.si_code = ILL_PRVOPC;
586 goto give_sigill;
587
588 case 11:
589 /* Privileged register trap */
590 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
591
592 /* This is a MFCTL cr26/cr27 to gr instruction.
593 * PCXS traps on this, so we need to emulate it.
594 */
595
596 if (regs->iir & 0x00200000)
597 regs->gr[regs->iir & 0x1f] = mfctl(27);
598 else
599 regs->gr[regs->iir & 0x1f] = mfctl(26);
600
601 regs->iaoq[0] = regs->iaoq[1];
602 regs->iaoq[1] += 4;
603 regs->iasq[0] = regs->iasq[1];
604 return;
605 }
606
607 die_if_kernel("Privileged register usage", regs, code);
608 si.si_code = ILL_PRVREG;
609 give_sigill:
610 si.si_signo = SIGILL;
611 si.si_errno = 0;
612 si.si_addr = (void __user *) regs->iaoq[0];
613 force_sig_info(SIGILL, &si, current);
614 return;
615
616 case 12:
617 /* Overflow Trap, let the userland signal handler do the cleanup */
618 si.si_signo = SIGFPE;
619 si.si_code = FPE_INTOVF;
620 si.si_addr = (void __user *) regs->iaoq[0];
621 force_sig_info(SIGFPE, &si, current);
622 return;
623
624 case 13:
625 /* Conditional Trap
626 The condition succees in an instruction which traps
627 on condition */
628 if(user_mode(regs)){
629 si.si_signo = SIGFPE;
630 /* Set to zero, and let the userspace app figure it out from
631 the insn pointed to by si_addr */
632 si.si_code = 0;
633 si.si_addr = (void __user *) regs->iaoq[0];
634 force_sig_info(SIGFPE, &si, current);
635 return;
636 }
637 /* The kernel doesn't want to handle condition codes */
638 break;
639
640 case 14:
641 /* Assist Exception Trap, i.e. floating point exception. */
642 die_if_kernel("Floating point exception", regs, 0); /* quiet */
643 handle_fpe(regs);
644 return;
645
646 case 15:
647 /* Data TLB miss fault/Data page fault */
648 /* Fall through */
649 case 16:
650 /* Non-access instruction TLB miss fault */
651 /* The instruction TLB entry needed for the target address of the FIC
652 is absent, and hardware can't find it, so we get to cleanup */
653 /* Fall through */
654 case 17:
655 /* Non-access data TLB miss fault/Non-access data page fault */
656 /* FIXME:
657 Still need to add slow path emulation code here!
658 If the insn used a non-shadow register, then the tlb
659 handlers could not have their side-effect (e.g. probe
660 writing to a target register) emulated since rfir would
661 erase the changes to said register. Instead we have to
662 setup everything, call this function we are in, and emulate
663 by hand. Technically we need to emulate:
664 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
665 */
666 fault_address = regs->ior;
667 fault_space = regs->isr;
668 break;
669
670 case 18:
671 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
672 /* Check for unaligned access */
673 if (check_unaligned(regs)) {
674 handle_unaligned(regs);
675 return;
676 }
677 /* Fall Through */
678 case 26:
679 /* PCXL: Data memory access rights trap */
680 fault_address = regs->ior;
681 fault_space = regs->isr;
682 break;
683
684 case 19:
685 /* Data memory break trap */
686 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
687 /* fall thru */
688 case 21:
689 /* Page reference trap */
690 handle_gdb_break(regs, TRAP_HWBKPT);
691 return;
692
693 case 25:
694 /* Taken branch trap */
695 regs->gr[0] &= ~PSW_T;
696 if (user_space(regs))
697 handle_gdb_break(regs, TRAP_BRANCH);
698 /* else this must be the start of a syscall - just let it
699 * run.
700 */
701 return;
702
703 case 7:
704 /* Instruction access rights */
705 /* PCXL: Instruction memory protection trap */
706
707 /*
708 * This could be caused by either: 1) a process attempting
709 * to execute within a vma that does not have execute
710 * permission, or 2) an access rights violation caused by a
711 * flush only translation set up by ptep_get_and_clear().
712 * So we check the vma permissions to differentiate the two.
713 * If the vma indicates we have execute permission, then
714 * the cause is the latter one. In this case, we need to
715 * call do_page_fault() to fix the problem.
716 */
717
718 if (user_mode(regs)) {
719 struct vm_area_struct *vma;
720
721 down_read(&current->mm->mmap_sem);
722 vma = find_vma(current->mm,regs->iaoq[0]);
723 if (vma && (regs->iaoq[0] >= vma->vm_start)
724 && (vma->vm_flags & VM_EXEC)) {
725
726 fault_address = regs->iaoq[0];
727 fault_space = regs->iasq[0];
728
729 up_read(&current->mm->mmap_sem);
730 break; /* call do_page_fault() */
731 }
732 up_read(&current->mm->mmap_sem);
733 }
734 /* Fall Through */
735 case 27:
736 /* Data memory protection ID trap */
737 die_if_kernel("Protection id trap", regs, code);
738 si.si_code = SEGV_MAPERR;
739 si.si_signo = SIGSEGV;
740 si.si_errno = 0;
741 if (code == 7)
742 si.si_addr = (void __user *) regs->iaoq[0];
743 else
744 si.si_addr = (void __user *) regs->ior;
745 force_sig_info(SIGSEGV, &si, current);
746 return;
747
748 case 28:
749 /* Unaligned data reference trap */
750 handle_unaligned(regs);
751 return;
752
753 default:
754 if (user_mode(regs)) {
755#ifdef PRINT_USER_FAULTS
756 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
757 current->pid, current->comm);
758 show_regs(regs);
759#endif
760 /* SIGBUS, for lack of a better one. */
761 si.si_signo = SIGBUS;
762 si.si_code = BUS_OBJERR;
763 si.si_errno = 0;
764 si.si_addr = (void __user *) regs->ior;
765 force_sig_info(SIGBUS, &si, current);
766 return;
767 }
768 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
769
770 parisc_terminate("Unexpected interruption", regs, code, 0);
771 /* NOT REACHED */
772 }
773
774 if (user_mode(regs)) {
775 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
776#ifdef PRINT_USER_FAULTS
777 if (fault_space == 0)
778 printk(KERN_DEBUG "User Fault on Kernel Space ");
779 else
780 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
781 code);
782 printk("pid=%d command='%s'\n", current->pid, current->comm);
783 show_regs(regs);
784#endif
785 si.si_signo = SIGSEGV;
786 si.si_errno = 0;
787 si.si_code = SEGV_MAPERR;
788 si.si_addr = (void __user *) regs->ior;
789 force_sig_info(SIGSEGV, &si, current);
790 return;
791 }
792 }
793 else {
794
795 /*
796 * The kernel should never fault on its own address space.
797 */
798
799 if (fault_space == 0)
800 {
801 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
802 parisc_terminate("Kernel Fault", regs, code, fault_address);
803
804 }
805 }
806
807 do_page_fault(regs, code, fault_address);
808}
809
810
811int __init check_ivt(void *iva)
812{
813 int i;
814 u32 check = 0;
815 u32 *ivap;
816 u32 *hpmcp;
817 u32 length;
818 extern void os_hpmc(void);
819 extern void os_hpmc_end(void);
820
821 if (strcmp((char *)iva, "cows can fly"))
822 return -1;
823
824 ivap = (u32 *)iva;
825
826 for (i = 0; i < 8; i++)
827 *ivap++ = 0;
828
829 /* Compute Checksum for HPMC handler */
830
831 length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
832 ivap[7] = length;
833
834 hpmcp = (u32 *)os_hpmc;
835
836 for (i=0; i<length/4; i++)
837 check += *hpmcp++;
838
839 for (i=0; i<8; i++)
840 check += ivap[i];
841
842 ivap[5] = -check;
843
844 return 0;
845}
846
847#ifndef __LP64__
848extern const void fault_vector_11;
849#endif
850extern const void fault_vector_20;
851
852void __init trap_init(void)
853{
854 void *iva;
855
856 if (boot_cpu_data.cpu_type >= pcxu)
857 iva = (void *) &fault_vector_20;
858 else
859#ifdef __LP64__
860 panic("Can't boot 64-bit OS on PA1.1 processor!");
861#else
862 iva = (void *) &fault_vector_11;
863#endif
864
865 if (check_ivt(iva))
866 panic("IVT invalid");
867}