]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - arch/alpha/kernel/traps.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[mirror_ubuntu-disco-kernel.git] / arch / alpha / kernel / traps.c
1 /*
2 * arch/alpha/kernel/traps.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 */
6
7 /*
8 * This file initializes the trap entry points
9 */
10
11 #include <linux/jiffies.h>
12 #include <linux/mm.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sched/debug.h>
15 #include <linux/tty.h>
16 #include <linux/delay.h>
17 #include <linux/extable.h>
18 #include <linux/kallsyms.h>
19 #include <linux/ratelimit.h>
20
21 #include <asm/gentrap.h>
22 #include <linux/uaccess.h>
23 #include <asm/unaligned.h>
24 #include <asm/sysinfo.h>
25 #include <asm/hwrpb.h>
26 #include <asm/mmu_context.h>
27 #include <asm/special_insns.h>
28
29 #include "proto.h"
30
31 /* Work-around for some SRMs which mishandle opDEC faults. */
32
33 static int opDEC_fix;
34
35 static void
36 opDEC_check(void)
37 {
38 __asm__ __volatile__ (
39 /* Load the address of... */
40 " br $16, 1f\n"
41 /* A stub instruction fault handler. Just add 4 to the
42 pc and continue. */
43 " ldq $16, 8($sp)\n"
44 " addq $16, 4, $16\n"
45 " stq $16, 8($sp)\n"
46 " call_pal %[rti]\n"
47 /* Install the instruction fault handler. */
48 "1: lda $17, 3\n"
49 " call_pal %[wrent]\n"
50 /* With that in place, the fault from the round-to-minf fp
51 insn will arrive either at the "lda 4" insn (bad) or one
52 past that (good). This places the correct fixup in %0. */
53 " lda %[fix], 0\n"
54 " cvttq/svm $f31,$f31\n"
55 " lda %[fix], 4"
56 : [fix] "=r" (opDEC_fix)
57 : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
58 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
59
60 if (opDEC_fix)
61 printk("opDEC fixup enabled.\n");
62 }
63
64 void
65 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
66 {
67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
68 regs->pc, regs->r26, regs->ps, print_tainted());
69 printk("pc is at %pSR\n", (void *)regs->pc);
70 printk("ra is at %pSR\n", (void *)regs->r26);
71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
72 regs->r0, regs->r1, regs->r2);
73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
74 regs->r3, regs->r4, regs->r5);
75 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
76 regs->r6, regs->r7, regs->r8);
77
78 if (r9_15) {
79 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
80 r9_15[9], r9_15[10], r9_15[11]);
81 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
82 r9_15[12], r9_15[13], r9_15[14]);
83 printk("s6 = %016lx\n", r9_15[15]);
84 }
85
86 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
87 regs->r16, regs->r17, regs->r18);
88 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
89 regs->r19, regs->r20, regs->r21);
90 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
91 regs->r22, regs->r23, regs->r24);
92 printk("t11= %016lx pv = %016lx at = %016lx\n",
93 regs->r25, regs->r27, regs->r28);
94 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
95 #if 0
96 __halt();
97 #endif
98 }
99
100 #if 0
101 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
102 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
103 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
104 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
105 #endif
106
107 static void
108 dik_show_code(unsigned int *pc)
109 {
110 long i;
111
112 printk("Code:");
113 for (i = -6; i < 2; i++) {
114 unsigned int insn;
115 if (__get_user(insn, (unsigned int __user *)pc + i))
116 break;
117 printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
118 }
119 printk("\n");
120 }
121
122 static void
123 dik_show_trace(unsigned long *sp)
124 {
125 long i = 0;
126 printk("Trace:\n");
127 while (0x1ff8 & (unsigned long) sp) {
128 extern char _stext[], _etext[];
129 unsigned long tmp = *sp;
130 sp++;
131 if (tmp < (unsigned long) &_stext)
132 continue;
133 if (tmp >= (unsigned long) &_etext)
134 continue;
135 printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
136 if (i > 40) {
137 printk(" ...");
138 break;
139 }
140 }
141 printk("\n");
142 }
143
144 static int kstack_depth_to_print = 24;
145
146 void show_stack(struct task_struct *task, unsigned long *sp)
147 {
148 unsigned long *stack;
149 int i;
150
151 /*
152 * debugging aid: "show_stack(NULL);" prints the
153 * back trace for this cpu.
154 */
155 if(sp==NULL)
156 sp=(unsigned long*)&sp;
157
158 stack = sp;
159 for(i=0; i < kstack_depth_to_print; i++) {
160 if (((long) stack & (THREAD_SIZE-1)) == 0)
161 break;
162 if (i && ((i % 4) == 0))
163 printk("\n ");
164 printk("%016lx ", *stack++);
165 }
166 printk("\n");
167 dik_show_trace(sp);
168 }
169
170 void
171 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
172 {
173 if (regs->ps & 8)
174 return;
175 #ifdef CONFIG_SMP
176 printk("CPU %d ", hard_smp_processor_id());
177 #endif
178 printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
179 dik_show_regs(regs, r9_15);
180 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
181 dik_show_trace((unsigned long *)(regs+1));
182 dik_show_code((unsigned int *)regs->pc);
183
184 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
185 printk("die_if_kernel recursion detected.\n");
186 local_irq_enable();
187 while (1);
188 }
189 do_exit(SIGSEGV);
190 }
191
192 #ifndef CONFIG_MATHEMU
193 static long dummy_emul(void) { return 0; }
194 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
195 = (void *)dummy_emul;
196 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
197 long (*alpha_fp_emul) (unsigned long pc)
198 = (void *)dummy_emul;
199 EXPORT_SYMBOL_GPL(alpha_fp_emul);
200 #else
201 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
202 long alpha_fp_emul (unsigned long pc);
203 #endif
204
205 asmlinkage void
206 do_entArith(unsigned long summary, unsigned long write_mask,
207 struct pt_regs *regs)
208 {
209 long si_code = FPE_FLTINV;
210 siginfo_t info;
211
212 if (summary & 1) {
213 /* Software-completion summary bit is set, so try to
214 emulate the instruction. If the processor supports
215 precise exceptions, we don't have to search. */
216 if (!amask(AMASK_PRECISE_TRAP))
217 si_code = alpha_fp_emul(regs->pc - 4);
218 else
219 si_code = alpha_fp_emul_imprecise(regs, write_mask);
220 if (si_code == 0)
221 return;
222 }
223 die_if_kernel("Arithmetic fault", regs, 0, NULL);
224
225 info.si_signo = SIGFPE;
226 info.si_errno = 0;
227 info.si_code = si_code;
228 info.si_addr = (void __user *) regs->pc;
229 send_sig_info(SIGFPE, &info, current);
230 }
231
232 asmlinkage void
233 do_entIF(unsigned long type, struct pt_regs *regs)
234 {
235 siginfo_t info;
236 int signo, code;
237
238 if ((regs->ps & ~IPL_MAX) == 0) {
239 if (type == 1) {
240 const unsigned int *data
241 = (const unsigned int *) regs->pc;
242 printk("Kernel bug at %s:%d\n",
243 (const char *)(data[1] | (long)data[2] << 32),
244 data[0]);
245 }
246 #ifdef CONFIG_ALPHA_WTINT
247 if (type == 4) {
248 /* If CALL_PAL WTINT is totally unsupported by the
249 PALcode, e.g. MILO, "emulate" it by overwriting
250 the insn. */
251 unsigned int *pinsn
252 = (unsigned int *) regs->pc - 1;
253 if (*pinsn == PAL_wtint) {
254 *pinsn = 0x47e01400; /* mov 0,$0 */
255 imb();
256 regs->r0 = 0;
257 return;
258 }
259 }
260 #endif /* ALPHA_WTINT */
261 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
262 regs, type, NULL);
263 }
264
265 switch (type) {
266 case 0: /* breakpoint */
267 info.si_signo = SIGTRAP;
268 info.si_errno = 0;
269 info.si_code = TRAP_BRKPT;
270 info.si_trapno = 0;
271 info.si_addr = (void __user *) regs->pc;
272
273 if (ptrace_cancel_bpt(current)) {
274 regs->pc -= 4; /* make pc point to former bpt */
275 }
276
277 send_sig_info(SIGTRAP, &info, current);
278 return;
279
280 case 1: /* bugcheck */
281 info.si_signo = SIGTRAP;
282 info.si_errno = 0;
283 info.si_code = TRAP_FIXME;
284 info.si_addr = (void __user *) regs->pc;
285 info.si_trapno = 0;
286 send_sig_info(SIGTRAP, &info, current);
287 return;
288
289 case 2: /* gentrap */
290 info.si_addr = (void __user *) regs->pc;
291 info.si_trapno = regs->r16;
292 switch ((long) regs->r16) {
293 case GEN_INTOVF:
294 signo = SIGFPE;
295 code = FPE_INTOVF;
296 break;
297 case GEN_INTDIV:
298 signo = SIGFPE;
299 code = FPE_INTDIV;
300 break;
301 case GEN_FLTOVF:
302 signo = SIGFPE;
303 code = FPE_FLTOVF;
304 break;
305 case GEN_FLTDIV:
306 signo = SIGFPE;
307 code = FPE_FLTDIV;
308 break;
309 case GEN_FLTUND:
310 signo = SIGFPE;
311 code = FPE_FLTUND;
312 break;
313 case GEN_FLTINV:
314 signo = SIGFPE;
315 code = FPE_FLTINV;
316 break;
317 case GEN_FLTINE:
318 signo = SIGFPE;
319 code = FPE_FLTRES;
320 break;
321 case GEN_ROPRAND:
322 signo = SIGFPE;
323 code = FPE_FIXME;
324 break;
325
326 case GEN_DECOVF:
327 case GEN_DECDIV:
328 case GEN_DECINV:
329 case GEN_ASSERTERR:
330 case GEN_NULPTRERR:
331 case GEN_STKOVF:
332 case GEN_STRLENERR:
333 case GEN_SUBSTRERR:
334 case GEN_RANGERR:
335 case GEN_SUBRNG:
336 case GEN_SUBRNG1:
337 case GEN_SUBRNG2:
338 case GEN_SUBRNG3:
339 case GEN_SUBRNG4:
340 case GEN_SUBRNG5:
341 case GEN_SUBRNG6:
342 case GEN_SUBRNG7:
343 default:
344 signo = SIGTRAP;
345 code = TRAP_FIXME;
346 break;
347 }
348
349 info.si_signo = signo;
350 info.si_errno = 0;
351 info.si_code = code;
352 info.si_addr = (void __user *) regs->pc;
353 send_sig_info(signo, &info, current);
354 return;
355
356 case 4: /* opDEC */
357 if (implver() == IMPLVER_EV4) {
358 long si_code;
359
360 /* The some versions of SRM do not handle
361 the opDEC properly - they return the PC of the
362 opDEC fault, not the instruction after as the
363 Alpha architecture requires. Here we fix it up.
364 We do this by intentionally causing an opDEC
365 fault during the boot sequence and testing if
366 we get the correct PC. If not, we set a flag
367 to correct it every time through. */
368 regs->pc += opDEC_fix;
369
370 /* EV4 does not implement anything except normal
371 rounding. Everything else will come here as
372 an illegal instruction. Emulate them. */
373 si_code = alpha_fp_emul(regs->pc - 4);
374 if (si_code == 0)
375 return;
376 if (si_code > 0) {
377 info.si_signo = SIGFPE;
378 info.si_errno = 0;
379 info.si_code = si_code;
380 info.si_addr = (void __user *) regs->pc;
381 send_sig_info(SIGFPE, &info, current);
382 return;
383 }
384 }
385 break;
386
387 case 3: /* FEN fault */
388 /* Irritating users can call PAL_clrfen to disable the
389 FPU for the process. The kernel will then trap in
390 do_switch_stack and undo_switch_stack when we try
391 to save and restore the FP registers.
392
393 Given that GCC by default generates code that uses the
394 FP registers, PAL_clrfen is not useful except for DoS
395 attacks. So turn the bleeding FPU back on and be done
396 with it. */
397 current_thread_info()->pcb.flags |= 1;
398 __reload_thread(&current_thread_info()->pcb);
399 return;
400
401 case 5: /* illoc */
402 default: /* unexpected instruction-fault type */
403 ;
404 }
405
406 info.si_signo = SIGILL;
407 info.si_errno = 0;
408 info.si_code = ILL_ILLOPC;
409 info.si_addr = (void __user *) regs->pc;
410 send_sig_info(SIGILL, &info, current);
411 }
412
413 /* There is an ifdef in the PALcode in MILO that enables a
414 "kernel debugging entry point" as an unprivileged call_pal.
415
416 We don't want to have anything to do with it, but unfortunately
417 several versions of MILO included in distributions have it enabled,
418 and if we don't put something on the entry point we'll oops. */
419
420 asmlinkage void
421 do_entDbg(struct pt_regs *regs)
422 {
423 siginfo_t info;
424
425 die_if_kernel("Instruction fault", regs, 0, NULL);
426
427 info.si_signo = SIGILL;
428 info.si_errno = 0;
429 info.si_code = ILL_ILLOPC;
430 info.si_addr = (void __user *) regs->pc;
431 force_sig_info(SIGILL, &info, current);
432 }
433
434
435 /*
436 * entUna has a different register layout to be reasonably simple. It
437 * needs access to all the integer registers (the kernel doesn't use
438 * fp-regs), and it needs to have them in order for simpler access.
439 *
440 * Due to the non-standard register layout (and because we don't want
441 * to handle floating-point regs), user-mode unaligned accesses are
442 * handled separately by do_entUnaUser below.
443 *
444 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
445 * on a gp-register unaligned load/store, something is _very_ wrong
446 * in the kernel anyway..
447 */
448 struct allregs {
449 unsigned long regs[32];
450 unsigned long ps, pc, gp, a0, a1, a2;
451 };
452
453 struct unaligned_stat {
454 unsigned long count, va, pc;
455 } unaligned[2];
456
457
458 /* Macro for exception fixup code to access integer registers. */
459 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
460
461
462 asmlinkage void
463 do_entUna(void * va, unsigned long opcode, unsigned long reg,
464 struct allregs *regs)
465 {
466 long error, tmp1, tmp2, tmp3, tmp4;
467 unsigned long pc = regs->pc - 4;
468 unsigned long *_regs = regs->regs;
469 const struct exception_table_entry *fixup;
470
471 unaligned[0].count++;
472 unaligned[0].va = (unsigned long) va;
473 unaligned[0].pc = pc;
474
475 /* We don't want to use the generic get/put unaligned macros as
476 we want to trap exceptions. Only if we actually get an
477 exception will we decide whether we should have caught it. */
478
479 switch (opcode) {
480 case 0x0c: /* ldwu */
481 __asm__ __volatile__(
482 "1: ldq_u %1,0(%3)\n"
483 "2: ldq_u %2,1(%3)\n"
484 " extwl %1,%3,%1\n"
485 " extwh %2,%3,%2\n"
486 "3:\n"
487 EXC(1b,3b,%1,%0)
488 EXC(2b,3b,%2,%0)
489 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
490 : "r"(va), "0"(0));
491 if (error)
492 goto got_exception;
493 una_reg(reg) = tmp1|tmp2;
494 return;
495
496 case 0x28: /* ldl */
497 __asm__ __volatile__(
498 "1: ldq_u %1,0(%3)\n"
499 "2: ldq_u %2,3(%3)\n"
500 " extll %1,%3,%1\n"
501 " extlh %2,%3,%2\n"
502 "3:\n"
503 EXC(1b,3b,%1,%0)
504 EXC(2b,3b,%2,%0)
505 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
506 : "r"(va), "0"(0));
507 if (error)
508 goto got_exception;
509 una_reg(reg) = (int)(tmp1|tmp2);
510 return;
511
512 case 0x29: /* ldq */
513 __asm__ __volatile__(
514 "1: ldq_u %1,0(%3)\n"
515 "2: ldq_u %2,7(%3)\n"
516 " extql %1,%3,%1\n"
517 " extqh %2,%3,%2\n"
518 "3:\n"
519 EXC(1b,3b,%1,%0)
520 EXC(2b,3b,%2,%0)
521 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
522 : "r"(va), "0"(0));
523 if (error)
524 goto got_exception;
525 una_reg(reg) = tmp1|tmp2;
526 return;
527
528 /* Note that the store sequences do not indicate that they change
529 memory because it _should_ be affecting nothing in this context.
530 (Otherwise we have other, much larger, problems.) */
531 case 0x0d: /* stw */
532 __asm__ __volatile__(
533 "1: ldq_u %2,1(%5)\n"
534 "2: ldq_u %1,0(%5)\n"
535 " inswh %6,%5,%4\n"
536 " inswl %6,%5,%3\n"
537 " mskwh %2,%5,%2\n"
538 " mskwl %1,%5,%1\n"
539 " or %2,%4,%2\n"
540 " or %1,%3,%1\n"
541 "3: stq_u %2,1(%5)\n"
542 "4: stq_u %1,0(%5)\n"
543 "5:\n"
544 EXC(1b,5b,%2,%0)
545 EXC(2b,5b,%1,%0)
546 EXC(3b,5b,$31,%0)
547 EXC(4b,5b,$31,%0)
548 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
549 "=&r"(tmp3), "=&r"(tmp4)
550 : "r"(va), "r"(una_reg(reg)), "0"(0));
551 if (error)
552 goto got_exception;
553 return;
554
555 case 0x2c: /* stl */
556 __asm__ __volatile__(
557 "1: ldq_u %2,3(%5)\n"
558 "2: ldq_u %1,0(%5)\n"
559 " inslh %6,%5,%4\n"
560 " insll %6,%5,%3\n"
561 " msklh %2,%5,%2\n"
562 " mskll %1,%5,%1\n"
563 " or %2,%4,%2\n"
564 " or %1,%3,%1\n"
565 "3: stq_u %2,3(%5)\n"
566 "4: stq_u %1,0(%5)\n"
567 "5:\n"
568 EXC(1b,5b,%2,%0)
569 EXC(2b,5b,%1,%0)
570 EXC(3b,5b,$31,%0)
571 EXC(4b,5b,$31,%0)
572 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
573 "=&r"(tmp3), "=&r"(tmp4)
574 : "r"(va), "r"(una_reg(reg)), "0"(0));
575 if (error)
576 goto got_exception;
577 return;
578
579 case 0x2d: /* stq */
580 __asm__ __volatile__(
581 "1: ldq_u %2,7(%5)\n"
582 "2: ldq_u %1,0(%5)\n"
583 " insqh %6,%5,%4\n"
584 " insql %6,%5,%3\n"
585 " mskqh %2,%5,%2\n"
586 " mskql %1,%5,%1\n"
587 " or %2,%4,%2\n"
588 " or %1,%3,%1\n"
589 "3: stq_u %2,7(%5)\n"
590 "4: stq_u %1,0(%5)\n"
591 "5:\n"
592 EXC(1b,5b,%2,%0)
593 EXC(2b,5b,%1,%0)
594 EXC(3b,5b,$31,%0)
595 EXC(4b,5b,$31,%0)
596 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
597 "=&r"(tmp3), "=&r"(tmp4)
598 : "r"(va), "r"(una_reg(reg)), "0"(0));
599 if (error)
600 goto got_exception;
601 return;
602 }
603
604 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
605 pc, va, opcode, reg);
606 do_exit(SIGSEGV);
607
608 got_exception:
609 /* Ok, we caught the exception, but we don't want it. Is there
610 someone to pass it along to? */
611 if ((fixup = search_exception_tables(pc)) != 0) {
612 unsigned long newpc;
613 newpc = fixup_exception(una_reg, fixup, pc);
614
615 printk("Forwarding unaligned exception at %lx (%lx)\n",
616 pc, newpc);
617
618 regs->pc = newpc;
619 return;
620 }
621
622 /*
623 * Yikes! No one to forward the exception to.
624 * Since the registers are in a weird format, dump them ourselves.
625 */
626
627 printk("%s(%d): unhandled unaligned exception\n",
628 current->comm, task_pid_nr(current));
629
630 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
631 pc, una_reg(26), regs->ps);
632 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
633 una_reg(0), una_reg(1), una_reg(2));
634 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
635 una_reg(3), una_reg(4), una_reg(5));
636 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
637 una_reg(6), una_reg(7), una_reg(8));
638 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
639 una_reg(9), una_reg(10), una_reg(11));
640 printk("r12= %016lx r13= %016lx r14= %016lx\n",
641 una_reg(12), una_reg(13), una_reg(14));
642 printk("r15= %016lx\n", una_reg(15));
643 printk("r16= %016lx r17= %016lx r18= %016lx\n",
644 una_reg(16), una_reg(17), una_reg(18));
645 printk("r19= %016lx r20= %016lx r21= %016lx\n",
646 una_reg(19), una_reg(20), una_reg(21));
647 printk("r22= %016lx r23= %016lx r24= %016lx\n",
648 una_reg(22), una_reg(23), una_reg(24));
649 printk("r25= %016lx r27= %016lx r28= %016lx\n",
650 una_reg(25), una_reg(27), una_reg(28));
651 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
652
653 dik_show_code((unsigned int *)pc);
654 dik_show_trace((unsigned long *)(regs+1));
655
656 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
657 printk("die_if_kernel recursion detected.\n");
658 local_irq_enable();
659 while (1);
660 }
661 do_exit(SIGSEGV);
662 }
663
664 /*
665 * Convert an s-floating point value in memory format to the
666 * corresponding value in register format. The exponent
667 * needs to be remapped to preserve non-finite values
668 * (infinities, not-a-numbers, denormals).
669 */
670 static inline unsigned long
671 s_mem_to_reg (unsigned long s_mem)
672 {
673 unsigned long frac = (s_mem >> 0) & 0x7fffff;
674 unsigned long sign = (s_mem >> 31) & 0x1;
675 unsigned long exp_msb = (s_mem >> 30) & 0x1;
676 unsigned long exp_low = (s_mem >> 23) & 0x7f;
677 unsigned long exp;
678
679 exp = (exp_msb << 10) | exp_low; /* common case */
680 if (exp_msb) {
681 if (exp_low == 0x7f) {
682 exp = 0x7ff;
683 }
684 } else {
685 if (exp_low == 0x00) {
686 exp = 0x000;
687 } else {
688 exp |= (0x7 << 7);
689 }
690 }
691 return (sign << 63) | (exp << 52) | (frac << 29);
692 }
693
694 /*
695 * Convert an s-floating point value in register format to the
696 * corresponding value in memory format.
697 */
698 static inline unsigned long
699 s_reg_to_mem (unsigned long s_reg)
700 {
701 return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
702 }
703
704 /*
705 * Handle user-level unaligned fault. Handling user-level unaligned
706 * faults is *extremely* slow and produces nasty messages. A user
707 * program *should* fix unaligned faults ASAP.
708 *
709 * Notice that we have (almost) the regular kernel stack layout here,
710 * so finding the appropriate registers is a little more difficult
711 * than in the kernel case.
712 *
713 * Finally, we handle regular integer load/stores only. In
714 * particular, load-linked/store-conditionally and floating point
715 * load/stores are not supported. The former make no sense with
716 * unaligned faults (they are guaranteed to fail) and I don't think
717 * the latter will occur in any decent program.
718 *
719 * Sigh. We *do* have to handle some FP operations, because GCC will
720 * uses them as temporary storage for integer memory to memory copies.
721 * However, we need to deal with stt/ldt and sts/lds only.
722 */
723
724 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
725 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
726 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
727 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
728
729 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
730 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
731 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
732
733 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
734
735 static int unauser_reg_offsets[32] = {
736 R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
737 /* r9 ... r15 are stored in front of regs. */
738 -56, -48, -40, -32, -24, -16, -8,
739 R(r16), R(r17), R(r18),
740 R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
741 R(r27), R(r28), R(gp),
742 0, 0
743 };
744
745 #undef R
746
747 asmlinkage void
748 do_entUnaUser(void __user * va, unsigned long opcode,
749 unsigned long reg, struct pt_regs *regs)
750 {
751 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
752
753 unsigned long tmp1, tmp2, tmp3, tmp4;
754 unsigned long fake_reg, *reg_addr = &fake_reg;
755 siginfo_t info;
756 long error;
757
758 /* Check the UAC bits to decide what the user wants us to do
759 with the unaliged access. */
760
761 if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
762 if (__ratelimit(&ratelimit)) {
763 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
764 current->comm, task_pid_nr(current),
765 regs->pc - 4, va, opcode, reg);
766 }
767 }
768 if ((current_thread_info()->status & TS_UAC_SIGBUS))
769 goto give_sigbus;
770 /* Not sure why you'd want to use this, but... */
771 if ((current_thread_info()->status & TS_UAC_NOFIX))
772 return;
773
774 /* Don't bother reading ds in the access check since we already
775 know that this came from the user. Also rely on the fact that
776 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
777 if ((unsigned long)va >= TASK_SIZE)
778 goto give_sigsegv;
779
780 ++unaligned[1].count;
781 unaligned[1].va = (unsigned long)va;
782 unaligned[1].pc = regs->pc - 4;
783
784 if ((1L << opcode) & OP_INT_MASK) {
785 /* it's an integer load/store */
786 if (reg < 30) {
787 reg_addr = (unsigned long *)
788 ((char *)regs + unauser_reg_offsets[reg]);
789 } else if (reg == 30) {
790 /* usp in PAL regs */
791 fake_reg = rdusp();
792 } else {
793 /* zero "register" */
794 fake_reg = 0;
795 }
796 }
797
798 /* We don't want to use the generic get/put unaligned macros as
799 we want to trap exceptions. Only if we actually get an
800 exception will we decide whether we should have caught it. */
801
802 switch (opcode) {
803 case 0x0c: /* ldwu */
804 __asm__ __volatile__(
805 "1: ldq_u %1,0(%3)\n"
806 "2: ldq_u %2,1(%3)\n"
807 " extwl %1,%3,%1\n"
808 " extwh %2,%3,%2\n"
809 "3:\n"
810 EXC(1b,3b,%1,%0)
811 EXC(2b,3b,%2,%0)
812 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
813 : "r"(va), "0"(0));
814 if (error)
815 goto give_sigsegv;
816 *reg_addr = tmp1|tmp2;
817 break;
818
819 case 0x22: /* lds */
820 __asm__ __volatile__(
821 "1: ldq_u %1,0(%3)\n"
822 "2: ldq_u %2,3(%3)\n"
823 " extll %1,%3,%1\n"
824 " extlh %2,%3,%2\n"
825 "3:\n"
826 EXC(1b,3b,%1,%0)
827 EXC(2b,3b,%2,%0)
828 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
829 : "r"(va), "0"(0));
830 if (error)
831 goto give_sigsegv;
832 alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
833 return;
834
835 case 0x23: /* ldt */
836 __asm__ __volatile__(
837 "1: ldq_u %1,0(%3)\n"
838 "2: ldq_u %2,7(%3)\n"
839 " extql %1,%3,%1\n"
840 " extqh %2,%3,%2\n"
841 "3:\n"
842 EXC(1b,3b,%1,%0)
843 EXC(2b,3b,%2,%0)
844 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
845 : "r"(va), "0"(0));
846 if (error)
847 goto give_sigsegv;
848 alpha_write_fp_reg(reg, tmp1|tmp2);
849 return;
850
851 case 0x28: /* ldl */
852 __asm__ __volatile__(
853 "1: ldq_u %1,0(%3)\n"
854 "2: ldq_u %2,3(%3)\n"
855 " extll %1,%3,%1\n"
856 " extlh %2,%3,%2\n"
857 "3:\n"
858 EXC(1b,3b,%1,%0)
859 EXC(2b,3b,%2,%0)
860 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
861 : "r"(va), "0"(0));
862 if (error)
863 goto give_sigsegv;
864 *reg_addr = (int)(tmp1|tmp2);
865 break;
866
867 case 0x29: /* ldq */
868 __asm__ __volatile__(
869 "1: ldq_u %1,0(%3)\n"
870 "2: ldq_u %2,7(%3)\n"
871 " extql %1,%3,%1\n"
872 " extqh %2,%3,%2\n"
873 "3:\n"
874 EXC(1b,3b,%1,%0)
875 EXC(2b,3b,%2,%0)
876 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
877 : "r"(va), "0"(0));
878 if (error)
879 goto give_sigsegv;
880 *reg_addr = tmp1|tmp2;
881 break;
882
883 /* Note that the store sequences do not indicate that they change
884 memory because it _should_ be affecting nothing in this context.
885 (Otherwise we have other, much larger, problems.) */
886 case 0x0d: /* stw */
887 __asm__ __volatile__(
888 "1: ldq_u %2,1(%5)\n"
889 "2: ldq_u %1,0(%5)\n"
890 " inswh %6,%5,%4\n"
891 " inswl %6,%5,%3\n"
892 " mskwh %2,%5,%2\n"
893 " mskwl %1,%5,%1\n"
894 " or %2,%4,%2\n"
895 " or %1,%3,%1\n"
896 "3: stq_u %2,1(%5)\n"
897 "4: stq_u %1,0(%5)\n"
898 "5:\n"
899 EXC(1b,5b,%2,%0)
900 EXC(2b,5b,%1,%0)
901 EXC(3b,5b,$31,%0)
902 EXC(4b,5b,$31,%0)
903 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
904 "=&r"(tmp3), "=&r"(tmp4)
905 : "r"(va), "r"(*reg_addr), "0"(0));
906 if (error)
907 goto give_sigsegv;
908 return;
909
910 case 0x26: /* sts */
911 fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
912 /* FALLTHRU */
913
914 case 0x2c: /* stl */
915 __asm__ __volatile__(
916 "1: ldq_u %2,3(%5)\n"
917 "2: ldq_u %1,0(%5)\n"
918 " inslh %6,%5,%4\n"
919 " insll %6,%5,%3\n"
920 " msklh %2,%5,%2\n"
921 " mskll %1,%5,%1\n"
922 " or %2,%4,%2\n"
923 " or %1,%3,%1\n"
924 "3: stq_u %2,3(%5)\n"
925 "4: stq_u %1,0(%5)\n"
926 "5:\n"
927 EXC(1b,5b,%2,%0)
928 EXC(2b,5b,%1,%0)
929 EXC(3b,5b,$31,%0)
930 EXC(4b,5b,$31,%0)
931 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
932 "=&r"(tmp3), "=&r"(tmp4)
933 : "r"(va), "r"(*reg_addr), "0"(0));
934 if (error)
935 goto give_sigsegv;
936 return;
937
938 case 0x27: /* stt */
939 fake_reg = alpha_read_fp_reg(reg);
940 /* FALLTHRU */
941
942 case 0x2d: /* stq */
943 __asm__ __volatile__(
944 "1: ldq_u %2,7(%5)\n"
945 "2: ldq_u %1,0(%5)\n"
946 " insqh %6,%5,%4\n"
947 " insql %6,%5,%3\n"
948 " mskqh %2,%5,%2\n"
949 " mskql %1,%5,%1\n"
950 " or %2,%4,%2\n"
951 " or %1,%3,%1\n"
952 "3: stq_u %2,7(%5)\n"
953 "4: stq_u %1,0(%5)\n"
954 "5:\n"
955 EXC(1b,5b,%2,%0)
956 EXC(2b,5b,%1,%0)
957 EXC(3b,5b,$31,%0)
958 EXC(4b,5b,$31,%0)
959 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
960 "=&r"(tmp3), "=&r"(tmp4)
961 : "r"(va), "r"(*reg_addr), "0"(0));
962 if (error)
963 goto give_sigsegv;
964 return;
965
966 default:
967 /* What instruction were you trying to use, exactly? */
968 goto give_sigbus;
969 }
970
971 /* Only integer loads should get here; everyone else returns early. */
972 if (reg == 30)
973 wrusp(fake_reg);
974 return;
975
976 give_sigsegv:
977 regs->pc -= 4; /* make pc point to faulting insn */
978 info.si_signo = SIGSEGV;
979 info.si_errno = 0;
980
981 /* We need to replicate some of the logic in mm/fault.c,
982 since we don't have access to the fault code in the
983 exception handling return path. */
984 if ((unsigned long)va >= TASK_SIZE)
985 info.si_code = SEGV_ACCERR;
986 else {
987 struct mm_struct *mm = current->mm;
988 down_read(&mm->mmap_sem);
989 if (find_vma(mm, (unsigned long)va))
990 info.si_code = SEGV_ACCERR;
991 else
992 info.si_code = SEGV_MAPERR;
993 up_read(&mm->mmap_sem);
994 }
995 info.si_addr = va;
996 send_sig_info(SIGSEGV, &info, current);
997 return;
998
999 give_sigbus:
1000 regs->pc -= 4;
1001 info.si_signo = SIGBUS;
1002 info.si_errno = 0;
1003 info.si_code = BUS_ADRALN;
1004 info.si_addr = va;
1005 send_sig_info(SIGBUS, &info, current);
1006 return;
1007 }
1008
1009 void
1010 trap_init(void)
1011 {
1012 /* Tell PAL-code what global pointer we want in the kernel. */
1013 register unsigned long gptr __asm__("$29");
1014 wrkgp(gptr);
1015
1016 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1017 a bug in the handling of the opDEC fault. Fix it up if so. */
1018 if (implver() == IMPLVER_EV4)
1019 opDEC_check();
1020
1021 wrent(entArith, 1);
1022 wrent(entMM, 2);
1023 wrent(entIF, 3);
1024 wrent(entUna, 4);
1025 wrent(entSys, 5);
1026 wrent(entDbg, 6);
1027 }