]>
git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/alpha/kernel/traps.c
2 * arch/alpha/kernel/traps.c
4 * (C) Copyright 1994 Linus Torvalds
8 * This file initializes the trap entry points
11 #include <linux/jiffies.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sched/debug.h>
15 #include <linux/tty.h>
16 #include <linux/delay.h>
17 #include <linux/extable.h>
18 #include <linux/kallsyms.h>
19 #include <linux/ratelimit.h>
21 #include <asm/gentrap.h>
22 #include <linux/uaccess.h>
23 #include <asm/unaligned.h>
24 #include <asm/sysinfo.h>
25 #include <asm/hwrpb.h>
26 #include <asm/mmu_context.h>
27 #include <asm/special_insns.h>
31 /* Work-around for some SRMs which mishandle opDEC faults. */
38 __asm__
__volatile__ (
39 /* Load the address of... */
41 /* A stub instruction fault handler. Just add 4 to the
47 /* Install the instruction fault handler. */
49 " call_pal %[wrent]\n"
50 /* With that in place, the fault from the round-to-minf fp
51 insn will arrive either at the "lda 4" insn (bad) or one
52 past that (good). This places the correct fixup in %0. */
54 " cvttq/svm $f31,$f31\n"
56 : [fix
] "=r" (opDEC_fix
)
57 : [rti
] "n" (PAL_rti
), [wrent
] "n" (PAL_wrent
)
58 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
61 printk("opDEC fixup enabled.\n");
65 dik_show_regs(struct pt_regs
*regs
, unsigned long *r9_15
)
67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
68 regs
->pc
, regs
->r26
, regs
->ps
, print_tainted());
69 printk("pc is at %pSR\n", (void *)regs
->pc
);
70 printk("ra is at %pSR\n", (void *)regs
->r26
);
71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
72 regs
->r0
, regs
->r1
, regs
->r2
);
73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
74 regs
->r3
, regs
->r4
, regs
->r5
);
75 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
76 regs
->r6
, regs
->r7
, regs
->r8
);
79 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
80 r9_15
[9], r9_15
[10], r9_15
[11]);
81 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
82 r9_15
[12], r9_15
[13], r9_15
[14]);
83 printk("s6 = %016lx\n", r9_15
[15]);
86 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
87 regs
->r16
, regs
->r17
, regs
->r18
);
88 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
89 regs
->r19
, regs
->r20
, regs
->r21
);
90 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
91 regs
->r22
, regs
->r23
, regs
->r24
);
92 printk("t11= %016lx pv = %016lx at = %016lx\n",
93 regs
->r25
, regs
->r27
, regs
->r28
);
94 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
101 static char * ireg_name
[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
102 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
103 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
104 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
108 dik_show_code(unsigned int *pc
)
113 for (i
= -6; i
< 2; i
++) {
115 if (__get_user(insn
, (unsigned int __user
*)pc
+ i
))
117 printk("%c%08x%c", i
? ' ' : '<', insn
, i
? ' ' : '>');
123 dik_show_trace(unsigned long *sp
)
127 while (0x1ff8 & (unsigned long) sp
) {
128 extern char _stext
[], _etext
[];
129 unsigned long tmp
= *sp
;
131 if (tmp
< (unsigned long) &_stext
)
133 if (tmp
>= (unsigned long) &_etext
)
135 printk("[<%lx>] %pSR\n", tmp
, (void *)tmp
);
144 static int kstack_depth_to_print
= 24;
146 void show_stack(struct task_struct
*task
, unsigned long *sp
)
148 unsigned long *stack
;
152 * debugging aid: "show_stack(NULL);" prints the
153 * back trace for this cpu.
156 sp
=(unsigned long*)&sp
;
159 for(i
=0; i
< kstack_depth_to_print
; i
++) {
160 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
162 if (i
&& ((i
% 4) == 0))
164 printk("%016lx ", *stack
++);
171 die_if_kernel(char * str
, struct pt_regs
*regs
, long err
, unsigned long *r9_15
)
176 printk("CPU %d ", hard_smp_processor_id());
178 printk("%s(%d): %s %ld\n", current
->comm
, task_pid_nr(current
), str
, err
);
179 dik_show_regs(regs
, r9_15
);
180 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
181 dik_show_trace((unsigned long *)(regs
+1));
182 dik_show_code((unsigned int *)regs
->pc
);
184 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
185 printk("die_if_kernel recursion detected.\n");
192 #ifndef CONFIG_MATHEMU
193 static long dummy_emul(void) { return 0; }
194 long (*alpha_fp_emul_imprecise
)(struct pt_regs
*regs
, unsigned long writemask
)
195 = (void *)dummy_emul
;
196 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise
);
197 long (*alpha_fp_emul
) (unsigned long pc
)
198 = (void *)dummy_emul
;
199 EXPORT_SYMBOL_GPL(alpha_fp_emul
);
201 long alpha_fp_emul_imprecise(struct pt_regs
*regs
, unsigned long writemask
);
202 long alpha_fp_emul (unsigned long pc
);
206 do_entArith(unsigned long summary
, unsigned long write_mask
,
207 struct pt_regs
*regs
)
209 long si_code
= FPE_FLTINV
;
213 /* Software-completion summary bit is set, so try to
214 emulate the instruction. If the processor supports
215 precise exceptions, we don't have to search. */
216 if (!amask(AMASK_PRECISE_TRAP
))
217 si_code
= alpha_fp_emul(regs
->pc
- 4);
219 si_code
= alpha_fp_emul_imprecise(regs
, write_mask
);
223 die_if_kernel("Arithmetic fault", regs
, 0, NULL
);
225 info
.si_signo
= SIGFPE
;
227 info
.si_code
= si_code
;
228 info
.si_addr
= (void __user
*) regs
->pc
;
229 send_sig_info(SIGFPE
, &info
, current
);
233 do_entIF(unsigned long type
, struct pt_regs
*regs
)
238 if ((regs
->ps
& ~IPL_MAX
) == 0) {
240 const unsigned int *data
241 = (const unsigned int *) regs
->pc
;
242 printk("Kernel bug at %s:%d\n",
243 (const char *)(data
[1] | (long)data
[2] << 32),
246 #ifdef CONFIG_ALPHA_WTINT
248 /* If CALL_PAL WTINT is totally unsupported by the
249 PALcode, e.g. MILO, "emulate" it by overwriting
252 = (unsigned int *) regs
->pc
- 1;
253 if (*pinsn
== PAL_wtint
) {
254 *pinsn
= 0x47e01400; /* mov 0,$0 */
260 #endif /* ALPHA_WTINT */
261 die_if_kernel((type
== 1 ? "Kernel Bug" : "Instruction fault"),
266 case 0: /* breakpoint */
267 info
.si_signo
= SIGTRAP
;
269 info
.si_code
= TRAP_BRKPT
;
271 info
.si_addr
= (void __user
*) regs
->pc
;
273 if (ptrace_cancel_bpt(current
)) {
274 regs
->pc
-= 4; /* make pc point to former bpt */
277 send_sig_info(SIGTRAP
, &info
, current
);
280 case 1: /* bugcheck */
281 info
.si_signo
= SIGTRAP
;
283 info
.si_code
= TRAP_FIXME
;
284 info
.si_addr
= (void __user
*) regs
->pc
;
286 send_sig_info(SIGTRAP
, &info
, current
);
289 case 2: /* gentrap */
290 info
.si_addr
= (void __user
*) regs
->pc
;
291 info
.si_trapno
= regs
->r16
;
292 switch ((long) regs
->r16
) {
349 info
.si_signo
= signo
;
352 info
.si_addr
= (void __user
*) regs
->pc
;
353 send_sig_info(signo
, &info
, current
);
357 if (implver() == IMPLVER_EV4
) {
360 /* The some versions of SRM do not handle
361 the opDEC properly - they return the PC of the
362 opDEC fault, not the instruction after as the
363 Alpha architecture requires. Here we fix it up.
364 We do this by intentionally causing an opDEC
365 fault during the boot sequence and testing if
366 we get the correct PC. If not, we set a flag
367 to correct it every time through. */
368 regs
->pc
+= opDEC_fix
;
370 /* EV4 does not implement anything except normal
371 rounding. Everything else will come here as
372 an illegal instruction. Emulate them. */
373 si_code
= alpha_fp_emul(regs
->pc
- 4);
377 info
.si_signo
= SIGFPE
;
379 info
.si_code
= si_code
;
380 info
.si_addr
= (void __user
*) regs
->pc
;
381 send_sig_info(SIGFPE
, &info
, current
);
387 case 3: /* FEN fault */
388 /* Irritating users can call PAL_clrfen to disable the
389 FPU for the process. The kernel will then trap in
390 do_switch_stack and undo_switch_stack when we try
391 to save and restore the FP registers.
393 Given that GCC by default generates code that uses the
394 FP registers, PAL_clrfen is not useful except for DoS
395 attacks. So turn the bleeding FPU back on and be done
397 current_thread_info()->pcb
.flags
|= 1;
398 __reload_thread(¤t_thread_info()->pcb
);
402 default: /* unexpected instruction-fault type */
406 info
.si_signo
= SIGILL
;
408 info
.si_code
= ILL_ILLOPC
;
409 info
.si_addr
= (void __user
*) regs
->pc
;
410 send_sig_info(SIGILL
, &info
, current
);
413 /* There is an ifdef in the PALcode in MILO that enables a
414 "kernel debugging entry point" as an unprivileged call_pal.
416 We don't want to have anything to do with it, but unfortunately
417 several versions of MILO included in distributions have it enabled,
418 and if we don't put something on the entry point we'll oops. */
421 do_entDbg(struct pt_regs
*regs
)
425 die_if_kernel("Instruction fault", regs
, 0, NULL
);
427 info
.si_signo
= SIGILL
;
429 info
.si_code
= ILL_ILLOPC
;
430 info
.si_addr
= (void __user
*) regs
->pc
;
431 force_sig_info(SIGILL
, &info
, current
);
436 * entUna has a different register layout to be reasonably simple. It
437 * needs access to all the integer registers (the kernel doesn't use
438 * fp-regs), and it needs to have them in order for simpler access.
440 * Due to the non-standard register layout (and because we don't want
441 * to handle floating-point regs), user-mode unaligned accesses are
442 * handled separately by do_entUnaUser below.
444 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
445 * on a gp-register unaligned load/store, something is _very_ wrong
446 * in the kernel anyway..
449 unsigned long regs
[32];
450 unsigned long ps
, pc
, gp
, a0
, a1
, a2
;
453 struct unaligned_stat
{
454 unsigned long count
, va
, pc
;
458 /* Macro for exception fixup code to access integer registers. */
459 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
463 do_entUna(void * va
, unsigned long opcode
, unsigned long reg
,
464 struct allregs
*regs
)
466 long error
, tmp1
, tmp2
, tmp3
, tmp4
;
467 unsigned long pc
= regs
->pc
- 4;
468 unsigned long *_regs
= regs
->regs
;
469 const struct exception_table_entry
*fixup
;
471 unaligned
[0].count
++;
472 unaligned
[0].va
= (unsigned long) va
;
473 unaligned
[0].pc
= pc
;
475 /* We don't want to use the generic get/put unaligned macros as
476 we want to trap exceptions. Only if we actually get an
477 exception will we decide whether we should have caught it. */
480 case 0x0c: /* ldwu */
481 __asm__
__volatile__(
482 "1: ldq_u %1,0(%3)\n"
483 "2: ldq_u %2,1(%3)\n"
489 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
493 una_reg(reg
) = tmp1
|tmp2
;
497 __asm__
__volatile__(
498 "1: ldq_u %1,0(%3)\n"
499 "2: ldq_u %2,3(%3)\n"
505 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
509 una_reg(reg
) = (int)(tmp1
|tmp2
);
513 __asm__
__volatile__(
514 "1: ldq_u %1,0(%3)\n"
515 "2: ldq_u %2,7(%3)\n"
521 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
525 una_reg(reg
) = tmp1
|tmp2
;
528 /* Note that the store sequences do not indicate that they change
529 memory because it _should_ be affecting nothing in this context.
530 (Otherwise we have other, much larger, problems.) */
532 __asm__
__volatile__(
533 "1: ldq_u %2,1(%5)\n"
534 "2: ldq_u %1,0(%5)\n"
541 "3: stq_u %2,1(%5)\n"
542 "4: stq_u %1,0(%5)\n"
548 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
549 "=&r"(tmp3
), "=&r"(tmp4
)
550 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
556 __asm__
__volatile__(
557 "1: ldq_u %2,3(%5)\n"
558 "2: ldq_u %1,0(%5)\n"
565 "3: stq_u %2,3(%5)\n"
566 "4: stq_u %1,0(%5)\n"
572 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
573 "=&r"(tmp3
), "=&r"(tmp4
)
574 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
580 __asm__
__volatile__(
581 "1: ldq_u %2,7(%5)\n"
582 "2: ldq_u %1,0(%5)\n"
589 "3: stq_u %2,7(%5)\n"
590 "4: stq_u %1,0(%5)\n"
596 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
597 "=&r"(tmp3
), "=&r"(tmp4
)
598 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
604 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
605 pc
, va
, opcode
, reg
);
609 /* Ok, we caught the exception, but we don't want it. Is there
610 someone to pass it along to? */
611 if ((fixup
= search_exception_tables(pc
)) != 0) {
613 newpc
= fixup_exception(una_reg
, fixup
, pc
);
615 printk("Forwarding unaligned exception at %lx (%lx)\n",
623 * Yikes! No one to forward the exception to.
624 * Since the registers are in a weird format, dump them ourselves.
627 printk("%s(%d): unhandled unaligned exception\n",
628 current
->comm
, task_pid_nr(current
));
630 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
631 pc
, una_reg(26), regs
->ps
);
632 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
633 una_reg(0), una_reg(1), una_reg(2));
634 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
635 una_reg(3), una_reg(4), una_reg(5));
636 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
637 una_reg(6), una_reg(7), una_reg(8));
638 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
639 una_reg(9), una_reg(10), una_reg(11));
640 printk("r12= %016lx r13= %016lx r14= %016lx\n",
641 una_reg(12), una_reg(13), una_reg(14));
642 printk("r15= %016lx\n", una_reg(15));
643 printk("r16= %016lx r17= %016lx r18= %016lx\n",
644 una_reg(16), una_reg(17), una_reg(18));
645 printk("r19= %016lx r20= %016lx r21= %016lx\n",
646 una_reg(19), una_reg(20), una_reg(21));
647 printk("r22= %016lx r23= %016lx r24= %016lx\n",
648 una_reg(22), una_reg(23), una_reg(24));
649 printk("r25= %016lx r27= %016lx r28= %016lx\n",
650 una_reg(25), una_reg(27), una_reg(28));
651 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
653 dik_show_code((unsigned int *)pc
);
654 dik_show_trace((unsigned long *)(regs
+1));
656 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
657 printk("die_if_kernel recursion detected.\n");
665 * Convert an s-floating point value in memory format to the
666 * corresponding value in register format. The exponent
667 * needs to be remapped to preserve non-finite values
668 * (infinities, not-a-numbers, denormals).
670 static inline unsigned long
671 s_mem_to_reg (unsigned long s_mem
)
673 unsigned long frac
= (s_mem
>> 0) & 0x7fffff;
674 unsigned long sign
= (s_mem
>> 31) & 0x1;
675 unsigned long exp_msb
= (s_mem
>> 30) & 0x1;
676 unsigned long exp_low
= (s_mem
>> 23) & 0x7f;
679 exp
= (exp_msb
<< 10) | exp_low
; /* common case */
681 if (exp_low
== 0x7f) {
685 if (exp_low
== 0x00) {
691 return (sign
<< 63) | (exp
<< 52) | (frac
<< 29);
695 * Convert an s-floating point value in register format to the
696 * corresponding value in memory format.
698 static inline unsigned long
699 s_reg_to_mem (unsigned long s_reg
)
701 return ((s_reg
>> 62) << 30) | ((s_reg
<< 5) >> 34);
705 * Handle user-level unaligned fault. Handling user-level unaligned
706 * faults is *extremely* slow and produces nasty messages. A user
707 * program *should* fix unaligned faults ASAP.
709 * Notice that we have (almost) the regular kernel stack layout here,
710 * so finding the appropriate registers is a little more difficult
711 * than in the kernel case.
713 * Finally, we handle regular integer load/stores only. In
714 * particular, load-linked/store-conditionally and floating point
715 * load/stores are not supported. The former make no sense with
716 * unaligned faults (they are guaranteed to fail) and I don't think
717 * the latter will occur in any decent program.
719 * Sigh. We *do* have to handle some FP operations, because GCC will
720 * uses them as temporary storage for integer memory to memory copies.
721 * However, we need to deal with stt/ldt and sts/lds only.
724 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
725 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
726 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
727 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
729 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
730 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
731 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
733 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
735 static int unauser_reg_offsets
[32] = {
736 R(r0
), R(r1
), R(r2
), R(r3
), R(r4
), R(r5
), R(r6
), R(r7
), R(r8
),
737 /* r9 ... r15 are stored in front of regs. */
738 -56, -48, -40, -32, -24, -16, -8,
739 R(r16
), R(r17
), R(r18
),
740 R(r19
), R(r20
), R(r21
), R(r22
), R(r23
), R(r24
), R(r25
), R(r26
),
741 R(r27
), R(r28
), R(gp
),
748 do_entUnaUser(void __user
* va
, unsigned long opcode
,
749 unsigned long reg
, struct pt_regs
*regs
)
751 static DEFINE_RATELIMIT_STATE(ratelimit
, 5 * HZ
, 5);
753 unsigned long tmp1
, tmp2
, tmp3
, tmp4
;
754 unsigned long fake_reg
, *reg_addr
= &fake_reg
;
758 /* Check the UAC bits to decide what the user wants us to do
759 with the unaliged access. */
761 if (!(current_thread_info()->status
& TS_UAC_NOPRINT
)) {
762 if (__ratelimit(&ratelimit
)) {
763 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
764 current
->comm
, task_pid_nr(current
),
765 regs
->pc
- 4, va
, opcode
, reg
);
768 if ((current_thread_info()->status
& TS_UAC_SIGBUS
))
770 /* Not sure why you'd want to use this, but... */
771 if ((current_thread_info()->status
& TS_UAC_NOFIX
))
774 /* Don't bother reading ds in the access check since we already
775 know that this came from the user. Also rely on the fact that
776 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
777 if ((unsigned long)va
>= TASK_SIZE
)
780 ++unaligned
[1].count
;
781 unaligned
[1].va
= (unsigned long)va
;
782 unaligned
[1].pc
= regs
->pc
- 4;
784 if ((1L << opcode
) & OP_INT_MASK
) {
785 /* it's an integer load/store */
787 reg_addr
= (unsigned long *)
788 ((char *)regs
+ unauser_reg_offsets
[reg
]);
789 } else if (reg
== 30) {
790 /* usp in PAL regs */
793 /* zero "register" */
798 /* We don't want to use the generic get/put unaligned macros as
799 we want to trap exceptions. Only if we actually get an
800 exception will we decide whether we should have caught it. */
803 case 0x0c: /* ldwu */
804 __asm__
__volatile__(
805 "1: ldq_u %1,0(%3)\n"
806 "2: ldq_u %2,1(%3)\n"
812 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
816 *reg_addr
= tmp1
|tmp2
;
820 __asm__
__volatile__(
821 "1: ldq_u %1,0(%3)\n"
822 "2: ldq_u %2,3(%3)\n"
828 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
832 alpha_write_fp_reg(reg
, s_mem_to_reg((int)(tmp1
|tmp2
)));
836 __asm__
__volatile__(
837 "1: ldq_u %1,0(%3)\n"
838 "2: ldq_u %2,7(%3)\n"
844 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
848 alpha_write_fp_reg(reg
, tmp1
|tmp2
);
852 __asm__
__volatile__(
853 "1: ldq_u %1,0(%3)\n"
854 "2: ldq_u %2,3(%3)\n"
860 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
864 *reg_addr
= (int)(tmp1
|tmp2
);
868 __asm__
__volatile__(
869 "1: ldq_u %1,0(%3)\n"
870 "2: ldq_u %2,7(%3)\n"
876 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
880 *reg_addr
= tmp1
|tmp2
;
883 /* Note that the store sequences do not indicate that they change
884 memory because it _should_ be affecting nothing in this context.
885 (Otherwise we have other, much larger, problems.) */
887 __asm__
__volatile__(
888 "1: ldq_u %2,1(%5)\n"
889 "2: ldq_u %1,0(%5)\n"
896 "3: stq_u %2,1(%5)\n"
897 "4: stq_u %1,0(%5)\n"
903 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
904 "=&r"(tmp3
), "=&r"(tmp4
)
905 : "r"(va
), "r"(*reg_addr
), "0"(0));
911 fake_reg
= s_reg_to_mem(alpha_read_fp_reg(reg
));
915 __asm__
__volatile__(
916 "1: ldq_u %2,3(%5)\n"
917 "2: ldq_u %1,0(%5)\n"
924 "3: stq_u %2,3(%5)\n"
925 "4: stq_u %1,0(%5)\n"
931 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
932 "=&r"(tmp3
), "=&r"(tmp4
)
933 : "r"(va
), "r"(*reg_addr
), "0"(0));
939 fake_reg
= alpha_read_fp_reg(reg
);
943 __asm__
__volatile__(
944 "1: ldq_u %2,7(%5)\n"
945 "2: ldq_u %1,0(%5)\n"
952 "3: stq_u %2,7(%5)\n"
953 "4: stq_u %1,0(%5)\n"
959 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
960 "=&r"(tmp3
), "=&r"(tmp4
)
961 : "r"(va
), "r"(*reg_addr
), "0"(0));
967 /* What instruction were you trying to use, exactly? */
971 /* Only integer loads should get here; everyone else returns early. */
977 regs
->pc
-= 4; /* make pc point to faulting insn */
978 info
.si_signo
= SIGSEGV
;
981 /* We need to replicate some of the logic in mm/fault.c,
982 since we don't have access to the fault code in the
983 exception handling return path. */
984 if ((unsigned long)va
>= TASK_SIZE
)
985 info
.si_code
= SEGV_ACCERR
;
987 struct mm_struct
*mm
= current
->mm
;
988 down_read(&mm
->mmap_sem
);
989 if (find_vma(mm
, (unsigned long)va
))
990 info
.si_code
= SEGV_ACCERR
;
992 info
.si_code
= SEGV_MAPERR
;
993 up_read(&mm
->mmap_sem
);
996 send_sig_info(SIGSEGV
, &info
, current
);
1001 info
.si_signo
= SIGBUS
;
1003 info
.si_code
= BUS_ADRALN
;
1005 send_sig_info(SIGBUS
, &info
, current
);
1012 /* Tell PAL-code what global pointer we want in the kernel. */
1013 register unsigned long gptr
__asm__("$29");
1016 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1017 a bug in the handling of the opDEC fault. Fix it up if so. */
1018 if (implver() == IMPLVER_EV4
)