]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame_incremental - arch/parisc/kernel/traps.c
[PARISC] Fix bug when syscall nr is __NR_Linux_syscalls
[mirror_ubuntu-zesty-kernel.git] / arch / parisc / kernel / traps.c
... / ...
CommitLineData
1/*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8/*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/timer.h>
19#include <linux/delay.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/smp.h>
23#include <linux/spinlock.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/console.h>
27#include <linux/kallsyms.h>
28#include <linux/bug.h>
29
30#include <asm/assembly.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/traps.h>
36#include <asm/unaligned.h>
37#include <asm/atomic.h>
38#include <asm/smp.h>
39#include <asm/pdc.h>
40#include <asm/pdc_chassis.h>
41#include <asm/unwind.h>
42#include <asm/tlbflush.h>
43#include <asm/cacheflush.h>
44
45#include "../math-emu/math-emu.h" /* for handle_fpe() */
46
47#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
48 /* dumped to the console via printk) */
49
50#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
51DEFINE_SPINLOCK(pa_dbit_lock);
52#endif
53
54static int printbinary(char *buf, unsigned long x, int nbits)
55{
56 unsigned long mask = 1UL << (nbits - 1);
57 while (mask != 0) {
58 *buf++ = (mask & x ? '1' : '0');
59 mask >>= 1;
60 }
61 *buf = '\0';
62
63 return nbits;
64}
65
66#ifdef CONFIG_64BIT
67#define RFMT "%016lx"
68#else
69#define RFMT "%08lx"
70#endif
71#define FFMT "%016llx" /* fpregs are 64-bit always */
72
73#define PRINTREGS(lvl,r,f,fmt,x) \
74 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
75 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
76 (r)[(x)+2], (r)[(x)+3])
77
78static void print_gr(char *level, struct pt_regs *regs)
79{
80 int i;
81 char buf[64];
82
83 printk("%s\n", level);
84 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
85 printbinary(buf, regs->gr[0], 32);
86 printk("%sPSW: %s %s\n", level, buf, print_tainted());
87
88 for (i = 0; i < 32; i += 4)
89 PRINTREGS(level, regs->gr, "r", RFMT, i);
90}
91
92static void print_fr(char *level, struct pt_regs *regs)
93{
94 int i;
95 char buf[64];
96 struct { u32 sw[2]; } s;
97
98 /* FR are 64bit everywhere. Need to use asm to get the content
99 * of fpsr/fper1, and we assume that we won't have a FP Identify
100 * in our way, otherwise we're screwed.
101 * The fldd is used to restore the T-bit if there was one, as the
102 * store clears it anyway.
103 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
104 asm volatile ("fstd %%fr0,0(%1) \n\t"
105 "fldd 0(%1),%%fr0 \n\t"
106 : "=m" (s) : "r" (&s) : "r0");
107
108 printk("%s\n", level);
109 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
110 printbinary(buf, s.sw[0], 32);
111 printk("%sFPSR: %s\n", level, buf);
112 printk("%sFPER1: %08x\n", level, s.sw[1]);
113
114 /* here we'll print fr0 again, tho it'll be meaningless */
115 for (i = 0; i < 32; i += 4)
116 PRINTREGS(level, regs->fr, "fr", FFMT, i);
117}
118
119void show_regs(struct pt_regs *regs)
120{
121 int i;
122 char *level;
123 unsigned long cr30, cr31;
124
125 level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
126
127 print_gr(level, regs);
128
129 for (i = 0; i < 8; i += 4)
130 PRINTREGS(level, regs->sr, "sr", RFMT, i);
131
132 if (user_mode(regs))
133 print_fr(level, regs);
134
135 cr30 = mfctl(30);
136 cr31 = mfctl(31);
137 printk("%s\n", level);
138 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
139 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
140 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
141 level, regs->iir, regs->isr, regs->ior);
142 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
143 level, current_thread_info()->cpu, cr30, cr31);
144 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
145 printk(level);
146 print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
147 printk(level);
148 print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
149 printk(level);
150 print_symbol(" RP(r2): %s\n", regs->gr[2]);
151}
152
153
154void dump_stack(void)
155{
156 show_stack(NULL, NULL);
157}
158
159EXPORT_SYMBOL(dump_stack);
160
161static void do_show_stack(struct unwind_frame_info *info)
162{
163 int i = 1;
164
165 printk(KERN_CRIT "Backtrace:\n");
166 while (i <= 16) {
167 if (unwind_once(info) < 0 || info->ip == 0)
168 break;
169
170 if (__kernel_text_address(info->ip)) {
171 printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
172#ifdef CONFIG_KALLSYMS
173 print_symbol("%s\n", info->ip);
174#else
175 if ((i & 0x03) == 0)
176 printk("\n");
177#endif
178 i++;
179 }
180 }
181 printk("\n");
182}
183
184void show_stack(struct task_struct *task, unsigned long *s)
185{
186 struct unwind_frame_info info;
187
188 if (!task) {
189 unsigned long sp;
190
191HERE:
192 asm volatile ("copy %%r30, %0" : "=r"(sp));
193 {
194 struct pt_regs r;
195
196 memset(&r, 0, sizeof(struct pt_regs));
197 r.iaoq[0] = (unsigned long)&&HERE;
198 r.gr[2] = (unsigned long)__builtin_return_address(0);
199 r.gr[30] = sp;
200
201 unwind_frame_init(&info, current, &r);
202 }
203 } else {
204 unwind_frame_init_from_blocked_task(&info, task);
205 }
206
207 do_show_stack(&info);
208}
209
210int is_valid_bugaddr(unsigned long iaoq)
211{
212 return 1;
213}
214
215void die_if_kernel(char *str, struct pt_regs *regs, long err)
216{
217 if (user_mode(regs)) {
218 if (err == 0)
219 return; /* STFU */
220
221 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
222 current->comm, current->pid, str, err, regs->iaoq[0]);
223#ifdef PRINT_USER_FAULTS
224 /* XXX for debugging only */
225 show_regs(regs);
226#endif
227 return;
228 }
229
230 oops_in_progress = 1;
231
232 /* Amuse the user in a SPARC fashion */
233 if (err) printk(
234KERN_CRIT " _______________________________ \n"
235KERN_CRIT " < Your System ate a SPARC! Gah! >\n"
236KERN_CRIT " ------------------------------- \n"
237KERN_CRIT " \\ ^__^\n"
238KERN_CRIT " \\ (xx)\\_______\n"
239KERN_CRIT " (__)\\ )\\/\\\n"
240KERN_CRIT " U ||----w |\n"
241KERN_CRIT " || ||\n");
242
243 /* unlock the pdc lock if necessary */
244 pdc_emergency_unlock();
245
246 /* maybe the kernel hasn't booted very far yet and hasn't been able
247 * to initialize the serial or STI console. In that case we should
248 * re-enable the pdc console, so that the user will be able to
249 * identify the problem. */
250 if (!console_drivers)
251 pdc_console_restart();
252
253 if (err)
254 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
255 current->comm, current->pid, str, err);
256
257 /* Wot's wrong wif bein' racy? */
258 if (current->thread.flags & PARISC_KERNEL_DEATH) {
259 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
260 local_irq_enable();
261 while (1);
262 }
263 current->thread.flags |= PARISC_KERNEL_DEATH;
264
265 show_regs(regs);
266 dump_stack();
267
268 if (in_interrupt())
269 panic("Fatal exception in interrupt");
270
271 if (panic_on_oops) {
272 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
273 ssleep(5);
274 panic("Fatal exception");
275 }
276
277 do_exit(SIGSEGV);
278}
279
280int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
281{
282 return syscall(regs);
283}
284
285/* gdb uses break 4,8 */
286#define GDB_BREAK_INSN 0x10004
287static void handle_gdb_break(struct pt_regs *regs, int wot)
288{
289 struct siginfo si;
290
291 si.si_signo = SIGTRAP;
292 si.si_errno = 0;
293 si.si_code = wot;
294 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
295 force_sig_info(SIGTRAP, &si, current);
296}
297
298static void handle_break(struct pt_regs *regs)
299{
300 unsigned iir = regs->iir;
301
302 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
303 /* check if a BUG() or WARN() trapped here. */
304 enum bug_trap_type tt;
305 tt = report_bug(regs->iaoq[0] & ~3);
306 if (tt == BUG_TRAP_TYPE_WARN) {
307 regs->iaoq[0] += 4;
308 regs->iaoq[1] += 4;
309 return; /* return to next instruction when WARN_ON(). */
310 }
311 die_if_kernel("Unknown kernel breakpoint", regs,
312 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
313 }
314
315#ifdef PRINT_USER_FAULTS
316 if (unlikely(iir != GDB_BREAK_INSN)) {
317 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
318 iir & 31, (iir>>13) & ((1<<13)-1),
319 current->pid, current->comm);
320 show_regs(regs);
321 }
322#endif
323
324 /* send standard GDB signal */
325 handle_gdb_break(regs, TRAP_BRKPT);
326}
327
328static void default_trap(int code, struct pt_regs *regs)
329{
330 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
331 show_regs(regs);
332}
333
334void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
335
336
337void transfer_pim_to_trap_frame(struct pt_regs *regs)
338{
339 register int i;
340 extern unsigned int hpmc_pim_data[];
341 struct pdc_hpmc_pim_11 *pim_narrow;
342 struct pdc_hpmc_pim_20 *pim_wide;
343
344 if (boot_cpu_data.cpu_type >= pcxu) {
345
346 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
347
348 /*
349 * Note: The following code will probably generate a
350 * bunch of truncation error warnings from the compiler.
351 * Could be handled with an ifdef, but perhaps there
352 * is a better way.
353 */
354
355 regs->gr[0] = pim_wide->cr[22];
356
357 for (i = 1; i < 32; i++)
358 regs->gr[i] = pim_wide->gr[i];
359
360 for (i = 0; i < 32; i++)
361 regs->fr[i] = pim_wide->fr[i];
362
363 for (i = 0; i < 8; i++)
364 regs->sr[i] = pim_wide->sr[i];
365
366 regs->iasq[0] = pim_wide->cr[17];
367 regs->iasq[1] = pim_wide->iasq_back;
368 regs->iaoq[0] = pim_wide->cr[18];
369 regs->iaoq[1] = pim_wide->iaoq_back;
370
371 regs->sar = pim_wide->cr[11];
372 regs->iir = pim_wide->cr[19];
373 regs->isr = pim_wide->cr[20];
374 regs->ior = pim_wide->cr[21];
375 }
376 else {
377 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
378
379 regs->gr[0] = pim_narrow->cr[22];
380
381 for (i = 1; i < 32; i++)
382 regs->gr[i] = pim_narrow->gr[i];
383
384 for (i = 0; i < 32; i++)
385 regs->fr[i] = pim_narrow->fr[i];
386
387 for (i = 0; i < 8; i++)
388 regs->sr[i] = pim_narrow->sr[i];
389
390 regs->iasq[0] = pim_narrow->cr[17];
391 regs->iasq[1] = pim_narrow->iasq_back;
392 regs->iaoq[0] = pim_narrow->cr[18];
393 regs->iaoq[1] = pim_narrow->iaoq_back;
394
395 regs->sar = pim_narrow->cr[11];
396 regs->iir = pim_narrow->cr[19];
397 regs->isr = pim_narrow->cr[20];
398 regs->ior = pim_narrow->cr[21];
399 }
400
401 /*
402 * The following fields only have meaning if we came through
403 * another path. So just zero them here.
404 */
405
406 regs->ksp = 0;
407 regs->kpc = 0;
408 regs->orig_r28 = 0;
409}
410
411
412/*
413 * This routine is called as a last resort when everything else
414 * has gone clearly wrong. We get called for faults in kernel space,
415 * and HPMC's.
416 */
417void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
418{
419 static DEFINE_SPINLOCK(terminate_lock);
420
421 oops_in_progress = 1;
422
423 set_eiem(0);
424 local_irq_disable();
425 spin_lock(&terminate_lock);
426
427 /* unlock the pdc lock if necessary */
428 pdc_emergency_unlock();
429
430 /* restart pdc console if necessary */
431 if (!console_drivers)
432 pdc_console_restart();
433
434 /* Not all paths will gutter the processor... */
435 switch(code){
436
437 case 1:
438 transfer_pim_to_trap_frame(regs);
439 break;
440
441 default:
442 /* Fall through */
443 break;
444
445 }
446
447 {
448 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
449 struct unwind_frame_info info;
450 unwind_frame_init(&info, current, regs);
451 do_show_stack(&info);
452 }
453
454 printk("\n");
455 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
456 msg, code, regs, offset);
457 show_regs(regs);
458
459 spin_unlock(&terminate_lock);
460
461 /* put soft power button back under hardware control;
462 * if the user had pressed it once at any time, the
463 * system will shut down immediately right here. */
464 pdc_soft_power_button(0);
465
466 /* Call kernel panic() so reboot timeouts work properly
467 * FIXME: This function should be on the list of
468 * panic notifiers, and we should call panic
469 * directly from the location that we wish.
470 * e.g. We should not call panic from
471 * parisc_terminate, but rather the oter way around.
472 * This hack works, prints the panic message twice,
473 * and it enables reboot timers!
474 */
475 panic(msg);
476}
477
478void handle_interruption(int code, struct pt_regs *regs)
479{
480 unsigned long fault_address = 0;
481 unsigned long fault_space = 0;
482 struct siginfo si;
483
484 if (code == 1)
485 pdc_console_restart(); /* switch back to pdc if HPMC */
486 else
487 local_irq_enable();
488
489 /* Security check:
490 * If the priority level is still user, and the
491 * faulting space is not equal to the active space
492 * then the user is attempting something in a space
493 * that does not belong to them. Kill the process.
494 *
495 * This is normally the situation when the user
496 * attempts to jump into the kernel space at the
497 * wrong offset, be it at the gateway page or a
498 * random location.
499 *
500 * We cannot normally signal the process because it
501 * could *be* on the gateway page, and processes
502 * executing on the gateway page can't have signals
503 * delivered.
504 *
505 * We merely readjust the address into the users
506 * space, at a destination address of zero, and
507 * allow processing to continue.
508 */
509 if (((unsigned long)regs->iaoq[0] & 3) &&
510 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
511 /* Kill the user process later */
512 regs->iaoq[0] = 0 | 3;
513 regs->iaoq[1] = regs->iaoq[0] + 4;
514 regs->iasq[0] = regs->iasq[0] = regs->sr[7];
515 regs->gr[0] &= ~PSW_B;
516 return;
517 }
518
519#if 0
520 printk(KERN_CRIT "Interruption # %d\n", code);
521#endif
522
523 switch(code) {
524
525 case 1:
526 /* High-priority machine check (HPMC) */
527
528 /* set up a new led state on systems shipped with a LED State panel */
529 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
530
531 parisc_terminate("High Priority Machine Check (HPMC)",
532 regs, code, 0);
533 /* NOT REACHED */
534
535 case 2:
536 /* Power failure interrupt */
537 printk(KERN_CRIT "Power failure interrupt !\n");
538 return;
539
540 case 3:
541 /* Recovery counter trap */
542 regs->gr[0] &= ~PSW_R;
543 if (user_space(regs))
544 handle_gdb_break(regs, TRAP_TRACE);
545 /* else this must be the start of a syscall - just let it run */
546 return;
547
548 case 5:
549 /* Low-priority machine check */
550 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
551
552 flush_cache_all();
553 flush_tlb_all();
554 cpu_lpmc(5, regs);
555 return;
556
557 case 6:
558 /* Instruction TLB miss fault/Instruction page fault */
559 fault_address = regs->iaoq[0];
560 fault_space = regs->iasq[0];
561 break;
562
563 case 8:
564 /* Illegal instruction trap */
565 die_if_kernel("Illegal instruction", regs, code);
566 si.si_code = ILL_ILLOPC;
567 goto give_sigill;
568
569 case 9:
570 /* Break instruction trap */
571 handle_break(regs);
572 return;
573
574 case 10:
575 /* Privileged operation trap */
576 die_if_kernel("Privileged operation", regs, code);
577 si.si_code = ILL_PRVOPC;
578 goto give_sigill;
579
580 case 11:
581 /* Privileged register trap */
582 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
583
584 /* This is a MFCTL cr26/cr27 to gr instruction.
585 * PCXS traps on this, so we need to emulate it.
586 */
587
588 if (regs->iir & 0x00200000)
589 regs->gr[regs->iir & 0x1f] = mfctl(27);
590 else
591 regs->gr[regs->iir & 0x1f] = mfctl(26);
592
593 regs->iaoq[0] = regs->iaoq[1];
594 regs->iaoq[1] += 4;
595 regs->iasq[0] = regs->iasq[1];
596 return;
597 }
598
599 die_if_kernel("Privileged register usage", regs, code);
600 si.si_code = ILL_PRVREG;
601 give_sigill:
602 si.si_signo = SIGILL;
603 si.si_errno = 0;
604 si.si_addr = (void __user *) regs->iaoq[0];
605 force_sig_info(SIGILL, &si, current);
606 return;
607
608 case 12:
609 /* Overflow Trap, let the userland signal handler do the cleanup */
610 si.si_signo = SIGFPE;
611 si.si_code = FPE_INTOVF;
612 si.si_addr = (void __user *) regs->iaoq[0];
613 force_sig_info(SIGFPE, &si, current);
614 return;
615
616 case 13:
617 /* Conditional Trap
618 The condition succeeds in an instruction which traps
619 on condition */
620 if(user_mode(regs)){
621 si.si_signo = SIGFPE;
622 /* Set to zero, and let the userspace app figure it out from
623 the insn pointed to by si_addr */
624 si.si_code = 0;
625 si.si_addr = (void __user *) regs->iaoq[0];
626 force_sig_info(SIGFPE, &si, current);
627 return;
628 }
629 /* The kernel doesn't want to handle condition codes */
630 break;
631
632 case 14:
633 /* Assist Exception Trap, i.e. floating point exception. */
634 die_if_kernel("Floating point exception", regs, 0); /* quiet */
635 handle_fpe(regs);
636 return;
637
638 case 15:
639 /* Data TLB miss fault/Data page fault */
640 /* Fall through */
641 case 16:
642 /* Non-access instruction TLB miss fault */
643 /* The instruction TLB entry needed for the target address of the FIC
644 is absent, and hardware can't find it, so we get to cleanup */
645 /* Fall through */
646 case 17:
647 /* Non-access data TLB miss fault/Non-access data page fault */
648 /* FIXME:
649 Still need to add slow path emulation code here!
650 If the insn used a non-shadow register, then the tlb
651 handlers could not have their side-effect (e.g. probe
652 writing to a target register) emulated since rfir would
653 erase the changes to said register. Instead we have to
654 setup everything, call this function we are in, and emulate
655 by hand. Technically we need to emulate:
656 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
657 */
658 fault_address = regs->ior;
659 fault_space = regs->isr;
660 break;
661
662 case 18:
663 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
664 /* Check for unaligned access */
665 if (check_unaligned(regs)) {
666 handle_unaligned(regs);
667 return;
668 }
669 /* Fall Through */
670 case 26:
671 /* PCXL: Data memory access rights trap */
672 fault_address = regs->ior;
673 fault_space = regs->isr;
674 break;
675
676 case 19:
677 /* Data memory break trap */
678 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
679 /* fall thru */
680 case 21:
681 /* Page reference trap */
682 handle_gdb_break(regs, TRAP_HWBKPT);
683 return;
684
685 case 25:
686 /* Taken branch trap */
687 regs->gr[0] &= ~PSW_T;
688 if (user_space(regs))
689 handle_gdb_break(regs, TRAP_BRANCH);
690 /* else this must be the start of a syscall - just let it
691 * run.
692 */
693 return;
694
695 case 7:
696 /* Instruction access rights */
697 /* PCXL: Instruction memory protection trap */
698
699 /*
700 * This could be caused by either: 1) a process attempting
701 * to execute within a vma that does not have execute
702 * permission, or 2) an access rights violation caused by a
703 * flush only translation set up by ptep_get_and_clear().
704 * So we check the vma permissions to differentiate the two.
705 * If the vma indicates we have execute permission, then
706 * the cause is the latter one. In this case, we need to
707 * call do_page_fault() to fix the problem.
708 */
709
710 if (user_mode(regs)) {
711 struct vm_area_struct *vma;
712
713 down_read(&current->mm->mmap_sem);
714 vma = find_vma(current->mm,regs->iaoq[0]);
715 if (vma && (regs->iaoq[0] >= vma->vm_start)
716 && (vma->vm_flags & VM_EXEC)) {
717
718 fault_address = regs->iaoq[0];
719 fault_space = regs->iasq[0];
720
721 up_read(&current->mm->mmap_sem);
722 break; /* call do_page_fault() */
723 }
724 up_read(&current->mm->mmap_sem);
725 }
726 /* Fall Through */
727 case 27:
728 /* Data memory protection ID trap */
729 die_if_kernel("Protection id trap", regs, code);
730 si.si_code = SEGV_MAPERR;
731 si.si_signo = SIGSEGV;
732 si.si_errno = 0;
733 if (code == 7)
734 si.si_addr = (void __user *) regs->iaoq[0];
735 else
736 si.si_addr = (void __user *) regs->ior;
737 force_sig_info(SIGSEGV, &si, current);
738 return;
739
740 case 28:
741 /* Unaligned data reference trap */
742 handle_unaligned(regs);
743 return;
744
745 default:
746 if (user_mode(regs)) {
747#ifdef PRINT_USER_FAULTS
748 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
749 current->pid, current->comm);
750 show_regs(regs);
751#endif
752 /* SIGBUS, for lack of a better one. */
753 si.si_signo = SIGBUS;
754 si.si_code = BUS_OBJERR;
755 si.si_errno = 0;
756 si.si_addr = (void __user *) regs->ior;
757 force_sig_info(SIGBUS, &si, current);
758 return;
759 }
760 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
761
762 parisc_terminate("Unexpected interruption", regs, code, 0);
763 /* NOT REACHED */
764 }
765
766 if (user_mode(regs)) {
767 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
768#ifdef PRINT_USER_FAULTS
769 if (fault_space == 0)
770 printk(KERN_DEBUG "User Fault on Kernel Space ");
771 else
772 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
773 code);
774 printk("pid=%d command='%s'\n", current->pid, current->comm);
775 show_regs(regs);
776#endif
777 si.si_signo = SIGSEGV;
778 si.si_errno = 0;
779 si.si_code = SEGV_MAPERR;
780 si.si_addr = (void __user *) regs->ior;
781 force_sig_info(SIGSEGV, &si, current);
782 return;
783 }
784 }
785 else {
786
787 /*
788 * The kernel should never fault on its own address space.
789 */
790
791 if (fault_space == 0)
792 {
793 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
794 parisc_terminate("Kernel Fault", regs, code, fault_address);
795
796 }
797 }
798
799 do_page_fault(regs, code, fault_address);
800}
801
802
803int __init check_ivt(void *iva)
804{
805 int i;
806 u32 check = 0;
807 u32 *ivap;
808 u32 *hpmcp;
809 u32 length;
810 extern void os_hpmc(void);
811 extern void os_hpmc_end(void);
812
813 if (strcmp((char *)iva, "cows can fly"))
814 return -1;
815
816 ivap = (u32 *)iva;
817
818 for (i = 0; i < 8; i++)
819 *ivap++ = 0;
820
821 /* Compute Checksum for HPMC handler */
822
823 length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
824 ivap[7] = length;
825
826 hpmcp = (u32 *)os_hpmc;
827
828 for (i=0; i<length/4; i++)
829 check += *hpmcp++;
830
831 for (i=0; i<8; i++)
832 check += ivap[i];
833
834 ivap[5] = -check;
835
836 return 0;
837}
838
839#ifndef CONFIG_64BIT
840extern const void fault_vector_11;
841#endif
842extern const void fault_vector_20;
843
844void __init trap_init(void)
845{
846 void *iva;
847
848 if (boot_cpu_data.cpu_type >= pcxu)
849 iva = (void *) &fault_vector_20;
850 else
851#ifdef CONFIG_64BIT
852 panic("Can't boot 64-bit OS on PA1.1 processor!");
853#else
854 iva = (void *) &fault_vector_11;
855#endif
856
857 if (check_ivt(iva))
858 panic("IVT invalid");
859}