]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/metag/kernel/traps.c
5ce67f9124aa5ddfcac2740f6b67af83fe0882b1
2 * Meta exception handling.
4 * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/signal.h>
15 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/preempt.h>
21 #include <linux/ptrace.h>
22 #include <linux/module.h>
23 #include <linux/kallsyms.h>
24 #include <linux/kdebug.h>
25 #include <linux/kexec.h>
26 #include <linux/unistd.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/syscalls.h>
32 #include <asm/core_reg.h>
33 #include <asm/irqflags.h>
34 #include <asm/siginfo.h>
35 #include <asm/traps.h>
36 #include <asm/hwthread.h>
37 #include <asm/setup.h>
38 #include <asm/switch.h>
39 #include <asm/user_gateway.h>
40 #include <asm/syscall.h>
41 #include <asm/syscalls.h>
43 /* Passing syscall arguments as long long is quicker. */
44 typedef unsigned int (*LPSYSCALL
) (unsigned long long,
49 * Users of LNKSET should compare the bus error bits obtained from DEFR
50 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
51 * different cores revisions.
53 #define TXDEFR_LNKSET_SUCCESS 0x02000000
54 #define TXDEFR_LNKSET_FAILURE 0x04000000
57 * Our global TBI handle. Initialised from setup.c/setup_arch.
59 DECLARE_PER_CPU(PTBI
, pTBI
);
62 static DEFINE_PER_CPU(unsigned int, trigger_mask
);
64 unsigned int global_trigger_mask
;
65 EXPORT_SYMBOL(global_trigger_mask
);
68 unsigned long per_cpu__stack_save
[NR_CPUS
];
70 static const char * const trap_names
[] = {
71 [TBIXXF_SIGNUM_IIF
] = "Illegal instruction fault",
72 [TBIXXF_SIGNUM_PGF
] = "Privilege violation",
73 [TBIXXF_SIGNUM_DHF
] = "Unaligned data access fault",
74 [TBIXXF_SIGNUM_IGF
] = "Code fetch general read failure",
75 [TBIXXF_SIGNUM_DGF
] = "Data access general read/write fault",
76 [TBIXXF_SIGNUM_IPF
] = "Code fetch page fault",
77 [TBIXXF_SIGNUM_DPF
] = "Data access page fault",
78 [TBIXXF_SIGNUM_IHF
] = "Instruction breakpoint",
79 [TBIXXF_SIGNUM_DWF
] = "Read-only data access fault",
82 const char *trap_name(int trapno
)
84 if (trapno
>= 0 && trapno
< ARRAY_SIZE(trap_names
)
85 && trap_names
[trapno
])
86 return trap_names
[trapno
];
87 return "Unknown fault";
90 static DEFINE_SPINLOCK(die_lock
);
92 void __noreturn
die(const char *str
, struct pt_regs
*regs
,
93 long err
, unsigned long addr
)
95 static int die_counter
;
99 spin_lock_irq(&die_lock
);
102 pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str
, err
& 0xffff,
103 trap_name(err
& 0xffff), addr
, ++die_counter
);
108 pr_err("Process: %s (pid: %d, stack limit = %p)\n", current
->comm
,
109 task_pid_nr(current
), task_stack_page(current
) + THREAD_SIZE
);
112 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
113 if (kexec_should_crash(current
))
117 panic("Fatal exception in interrupt");
120 panic("Fatal exception");
122 spin_unlock_irq(&die_lock
);
127 #ifdef CONFIG_METAG_DSP
129 * The ECH encoding specifies the size of a DSPRAM as,
133 * A "slot" is the size of two DSPRAM bank entries; an entry from
134 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
138 static inline unsigned int decode_dspram_size(unsigned int size
)
140 unsigned int _sz
= size
& 0x7f;
142 return _sz
* SLOT_SZ
* 4;
145 static void dspram_save(struct meta_ext_context
*dsp_ctx
,
146 unsigned int ramA_sz
, unsigned int ramB_sz
)
148 unsigned int ram_sz
[2];
154 for (i
= 0; i
< 2; i
++) {
155 if (ram_sz
[i
] != 0) {
159 sz
= decode_dspram_size(ram_sz
[i
] >> 8);
161 sz
= decode_dspram_size(ram_sz
[i
]);
163 if (dsp_ctx
->ram
[i
] == NULL
) {
164 dsp_ctx
->ram
[i
] = kmalloc(sz
, GFP_KERNEL
);
166 if (dsp_ctx
->ram
[i
] == NULL
)
167 panic("couldn't save DSP context");
169 if (ram_sz
[i
] > dsp_ctx
->ram_sz
[i
]) {
170 kfree(dsp_ctx
->ram
[i
]);
172 dsp_ctx
->ram
[i
] = kmalloc(sz
,
175 if (dsp_ctx
->ram
[i
] == NULL
)
176 panic("couldn't save DSP context");
181 __TBIDspramSaveA(ram_sz
[i
], dsp_ctx
->ram
[i
]);
183 __TBIDspramSaveB(ram_sz
[i
], dsp_ctx
->ram
[i
]);
185 dsp_ctx
->ram_sz
[i
] = ram_sz
[i
];
189 #endif /* CONFIG_METAG_DSP */
192 * Allow interrupts to be nested and save any "extended" register
193 * context state, e.g. DSP regs and RAMs.
195 static void nest_interrupts(TBIRES State
, unsigned long mask
)
197 #ifdef CONFIG_METAG_DSP
198 struct meta_ext_context
*dsp_ctx
;
202 * D0.8 may contain an ECH encoding. The upper 16 bits
203 * tell us what DSP resources the current process is
204 * using. OR the bits into the SaveMask so that
205 * __TBINestInts() knows what resources to save as
206 * part of this context.
208 * Don't save the context if we're nesting interrupts in the
209 * kernel because the kernel doesn't use DSP hardware.
211 D0_8
= __core_reg_get(D0
.8
);
213 if (D0_8
&& (State
.Sig
.SaveMask
& TBICTX_PRIV_BIT
)) {
214 State
.Sig
.SaveMask
|= (D0_8
>> 16);
216 dsp_ctx
= current
->thread
.dsp_context
;
217 if (dsp_ctx
== NULL
) {
218 dsp_ctx
= kzalloc(sizeof(*dsp_ctx
), GFP_KERNEL
);
220 panic("couldn't save DSP context: ENOMEM");
222 current
->thread
.dsp_context
= dsp_ctx
;
225 current
->thread
.user_flags
|= (D0_8
& 0xffff0000);
226 __TBINestInts(State
, &dsp_ctx
->regs
, mask
);
227 dspram_save(dsp_ctx
, D0_8
& 0x7f00, D0_8
& 0x007f);
229 __TBINestInts(State
, NULL
, mask
);
231 __TBINestInts(State
, NULL
, mask
);
235 void head_end(TBIRES State
, unsigned long mask
)
237 unsigned int savemask
= (unsigned short)State
.Sig
.SaveMask
;
238 unsigned int ctx_savemask
= (unsigned short)State
.Sig
.pCtx
->SaveMask
;
240 if (savemask
& TBICTX_PRIV_BIT
) {
241 ctx_savemask
|= TBICTX_PRIV_BIT
;
242 current
->thread
.user_flags
= savemask
;
245 /* Always undo the sleep bit */
246 ctx_savemask
&= ~TBICTX_WAIT_BIT
;
248 /* Always save the catch buffer and RD pipe if they are dirty */
249 savemask
|= TBICTX_XCBF_BIT
;
251 /* Only save the catch and RD if we have not already done so.
252 * Note - the RD bits are in the pCtx only, and not in the
255 if ((savemask
& TBICTX_CBUF_BIT
) ||
256 (ctx_savemask
& TBICTX_CBRP_BIT
)) {
257 /* Have we already saved the buffers though?
258 * - See TestTrack 5071 */
259 if (ctx_savemask
& TBICTX_XCBF_BIT
) {
260 /* Strip off the bits so the call to __TBINestInts
261 * won't save the buffers again. */
262 savemask
&= ~TBICTX_CBUF_BIT
;
263 ctx_savemask
&= ~TBICTX_CBRP_BIT
;
267 #ifdef CONFIG_METAG_META21
269 unsigned int depth
, txdefr
;
274 * The process may have been interrupted after a LNKSET, but
275 * before it could read the DEFR state, so we mustn't lose that
276 * state or it could end up retrying an atomic operation that
279 * All interrupts are disabled at this point so we
280 * don't need to perform any locking. We must do this
281 * dance before we use LNKGET or LNKSET.
283 BUG_ON(current
->thread
.int_depth
> HARDIRQ_BITS
);
285 depth
= current
->thread
.int_depth
++;
287 txdefr
= __core_reg_get(TXDEFR
);
289 txdefr
&= TXDEFR_BUS_STATE_BITS
;
290 if (txdefr
& TXDEFR_LNKSET_SUCCESS
)
291 current
->thread
.txdefr_failure
&= ~(1 << depth
);
293 current
->thread
.txdefr_failure
|= (1 << depth
);
297 State
.Sig
.SaveMask
= savemask
;
298 State
.Sig
.pCtx
->SaveMask
= ctx_savemask
;
300 nest_interrupts(State
, mask
);
302 #ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
303 /* Poison the catch registers. This shows up any mistakes we have
304 * made in their handling MUCH quicker.
306 __core_reg_set(TXCATCH0
, 0x87650021);
307 __core_reg_set(TXCATCH1
, 0x87654322);
308 __core_reg_set(TXCATCH2
, 0x87654323);
309 __core_reg_set(TXCATCH3
, 0x87654324);
310 #endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
313 TBIRES
tail_end_sys(TBIRES State
, int syscall
, int *restart
)
315 struct pt_regs
*regs
= (struct pt_regs
*)State
.Sig
.pCtx
;
320 if (user_mode(regs
)) {
321 flags
= current_thread_info()->flags
;
322 if (flags
& _TIF_WORK_MASK
&&
323 do_work_pending(regs
, flags
, syscall
)) {
328 #ifdef CONFIG_METAG_FPU
329 if (current
->thread
.fpu_context
&&
330 current
->thread
.fpu_context
->needs_restore
) {
331 __TBICtxFPURestore(State
, current
->thread
.fpu_context
);
333 * Clearing this bit ensures the FP unit is not made
334 * active again unless it is used.
336 State
.Sig
.SaveMask
&= ~TBICTX_FPAC_BIT
;
337 current
->thread
.fpu_context
->needs_restore
= false;
339 State
.Sig
.TrigMask
|= TBI_TRIG_BIT(TBID_SIGNUM_DFR
);
343 /* TBI will turn interrupts back on at some point. */
344 if (!irqs_disabled_flags((unsigned long)State
.Sig
.TrigMask
))
347 #ifdef CONFIG_METAG_DSP
349 * If we previously saved an extended context then restore it
350 * now. Otherwise, clear D0.8 because this process is not
351 * using DSP hardware.
353 if (State
.Sig
.pCtx
->SaveMask
& TBICTX_XEXT_BIT
) {
355 struct meta_ext_context
*dsp_ctx
= current
->thread
.dsp_context
;
357 /* Make sure we're going to return to userland. */
358 BUG_ON(current
->thread
.int_depth
!= 1);
360 if (dsp_ctx
->ram_sz
[0] > 0)
361 __TBIDspramRestoreA(dsp_ctx
->ram_sz
[0],
363 if (dsp_ctx
->ram_sz
[1] > 0)
364 __TBIDspramRestoreB(dsp_ctx
->ram_sz
[1],
367 State
.Sig
.SaveMask
|= State
.Sig
.pCtx
->SaveMask
;
368 __TBICtxRestore(State
, current
->thread
.dsp_context
);
369 D0_8
= __core_reg_get(D0
.8
);
370 D0_8
|= current
->thread
.user_flags
& 0xffff0000;
371 D0_8
|= (dsp_ctx
->ram_sz
[1] | dsp_ctx
->ram_sz
[0]) & 0xffff;
372 __core_reg_set(D0
.8
, D0_8
);
374 __core_reg_set(D0
.8
, 0);
375 #endif /* CONFIG_METAG_DSP */
377 #ifdef CONFIG_METAG_META21
379 unsigned int depth
, txdefr
;
382 * If there hasn't been a LNKSET since the last LNKGET then the
383 * link flag will be set, causing the next LNKSET to succeed if
384 * the addresses match. The two LNK operations may not be a pair
385 * (e.g. see atomic_read()), so the LNKSET should fail.
386 * We use a conditional-never LNKSET to clear the link flag
387 * without side effects.
389 asm volatile("LNKSETDNV [D0Re0],D0Re0");
391 depth
= --current
->thread
.int_depth
;
393 BUG_ON(user_mode(regs
) && depth
);
395 txdefr
= __core_reg_get(TXDEFR
);
397 txdefr
&= ~TXDEFR_BUS_STATE_BITS
;
399 /* Do we need to restore a failure code into TXDEFR? */
400 if (current
->thread
.txdefr_failure
& (1 << depth
))
401 txdefr
|= (TXDEFR_LNKSET_FAILURE
| TXDEFR_BUS_TRIG_BIT
);
403 txdefr
|= (TXDEFR_LNKSET_SUCCESS
| TXDEFR_BUS_TRIG_BIT
);
405 __core_reg_set(TXDEFR
, txdefr
);
413 * If we took an interrupt in the middle of __kuser_get_tls then we need
414 * to rewind the PC to the start of the function in case the process
415 * gets migrated to another thread (SMP only) and it reads the wrong tls
418 static inline void _restart_critical_section(TBIRES State
)
420 unsigned long get_tls_start
;
421 unsigned long get_tls_end
;
423 get_tls_start
= (unsigned long)__kuser_get_tls
-
424 (unsigned long)&__user_gateway_start
;
426 get_tls_start
+= USER_GATEWAY_PAGE
;
428 get_tls_end
= (unsigned long)__kuser_get_tls_end
-
429 (unsigned long)&__user_gateway_start
;
431 get_tls_end
+= USER_GATEWAY_PAGE
;
433 if ((State
.Sig
.pCtx
->CurrPC
>= get_tls_start
) &&
434 (State
.Sig
.pCtx
->CurrPC
< get_tls_end
))
435 State
.Sig
.pCtx
->CurrPC
= get_tls_start
;
439 * If we took an interrupt in the middle of
440 * __kuser_cmpxchg then we need to rewind the PC to the
441 * start of the function.
443 static inline void _restart_critical_section(TBIRES State
)
445 unsigned long cmpxchg_start
;
446 unsigned long cmpxchg_end
;
448 cmpxchg_start
= (unsigned long)__kuser_cmpxchg
-
449 (unsigned long)&__user_gateway_start
;
451 cmpxchg_start
+= USER_GATEWAY_PAGE
;
453 cmpxchg_end
= (unsigned long)__kuser_cmpxchg_end
-
454 (unsigned long)&__user_gateway_start
;
456 cmpxchg_end
+= USER_GATEWAY_PAGE
;
458 if ((State
.Sig
.pCtx
->CurrPC
>= cmpxchg_start
) &&
459 (State
.Sig
.pCtx
->CurrPC
< cmpxchg_end
))
460 State
.Sig
.pCtx
->CurrPC
= cmpxchg_start
;
464 /* Used by kick_handler() */
465 void restart_critical_section(TBIRES State
)
467 _restart_critical_section(State
);
470 TBIRES
trigger_handler(TBIRES State
, int SigNum
, int Triggers
, int Inst
,
473 head_end(State
, ~INTS_OFF_MASK
);
475 /* If we interrupted user code handle any critical sections. */
476 if (State
.Sig
.SaveMask
& TBICTX_PRIV_BIT
)
477 _restart_critical_section(State
);
479 trace_hardirqs_off();
481 do_IRQ(SigNum
, (struct pt_regs
*)State
.Sig
.pCtx
);
483 return tail_end(State
);
486 static unsigned int load_fault(PTBICTXEXTCB0 pbuf
)
488 return pbuf
->CBFlags
& TXCATCH0_READ_BIT
;
491 static unsigned long fault_address(PTBICTXEXTCB0 pbuf
)
496 static void unhandled_fault(struct pt_regs
*regs
, unsigned long addr
,
497 int signo
, int code
, int trapno
)
499 if (user_mode(regs
)) {
502 if (show_unhandled_signals
&& unhandled_signal(current
, signo
)
503 && printk_ratelimit()) {
505 pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
506 current
->pid
, regs
->ctx
.CurrPC
, addr
,
507 trapno
, trap_name(trapno
));
508 print_vma_addr(" in ", regs
->ctx
.CurrPC
);
509 print_vma_addr(" rtp in ", regs
->ctx
.DX
[4].U1
);
514 info
.si_signo
= signo
;
517 info
.si_addr
= (__force
void __user
*)addr
;
518 info
.si_trapno
= trapno
;
519 force_sig_info(signo
, &info
, current
);
521 die("Oops", regs
, trapno
, addr
);
525 static int handle_data_fault(PTBICTXEXTCB0 pcbuf
, struct pt_regs
*regs
,
526 unsigned int data_address
, int trapno
)
530 ret
= do_page_fault(regs
, data_address
, !load_fault(pcbuf
), trapno
);
535 static unsigned long get_inst_fault_address(struct pt_regs
*regs
)
537 return regs
->ctx
.CurrPC
;
540 TBIRES
fault_handler(TBIRES State
, int SigNum
, int Triggers
,
543 struct pt_regs
*regs
= (struct pt_regs
*)State
.Sig
.pCtx
;
544 PTBICTXEXTCB0 pcbuf
= (PTBICTXEXTCB0
)®s
->extcb0
;
545 unsigned long data_address
;
547 head_end(State
, ~INTS_OFF_MASK
);
549 /* Hardware breakpoint or data watch */
550 if ((SigNum
== TBIXXF_SIGNUM_IHF
) ||
551 ((SigNum
== TBIXXF_SIGNUM_DHF
) &&
552 (pcbuf
[0].CBFlags
& (TXCATCH0_WATCH1_BIT
|
553 TXCATCH0_WATCH0_BIT
)))) {
554 State
= __TBIUnExpXXX(State
, SigNum
, Triggers
, Inst
,
556 return tail_end(State
);
561 data_address
= fault_address(pcbuf
);
564 case TBIXXF_SIGNUM_IGF
:
565 /* 1st-level entry invalid (instruction fetch) */
566 case TBIXXF_SIGNUM_IPF
: {
567 /* 2nd-level entry invalid (instruction fetch) */
568 unsigned long addr
= get_inst_fault_address(regs
);
569 do_page_fault(regs
, addr
, 0, SigNum
);
573 case TBIXXF_SIGNUM_DGF
:
574 /* 1st-level entry invalid (data access) */
575 case TBIXXF_SIGNUM_DPF
:
576 /* 2nd-level entry invalid (data access) */
577 case TBIXXF_SIGNUM_DWF
:
578 /* Write to read only page */
579 handle_data_fault(pcbuf
, regs
, data_address
, SigNum
);
582 case TBIXXF_SIGNUM_IIF
:
583 /* Illegal instruction */
584 unhandled_fault(regs
, regs
->ctx
.CurrPC
, SIGILL
, ILL_ILLOPC
,
588 case TBIXXF_SIGNUM_DHF
:
589 /* Unaligned access */
590 unhandled_fault(regs
, data_address
, SIGBUS
, BUS_ADRALN
,
593 case TBIXXF_SIGNUM_PGF
:
594 /* Privilege violation */
595 unhandled_fault(regs
, data_address
, SIGSEGV
, SEGV_ACCERR
,
603 return tail_end(State
);
606 static bool switch_is_syscall(unsigned int inst
)
608 return inst
== __METAG_SW_ENCODING(SYS
);
611 static bool switch_is_legacy_syscall(unsigned int inst
)
613 return inst
== __METAG_SW_ENCODING(SYS_LEGACY
);
616 static inline void step_over_switch(struct pt_regs
*regs
, unsigned int inst
)
618 regs
->ctx
.CurrPC
+= 4;
621 static inline int test_syscall_work(void)
623 return current_thread_info()->flags
& _TIF_WORK_SYSCALL_MASK
;
626 TBIRES
switch1_handler(TBIRES State
, int SigNum
, int Triggers
,
629 struct pt_regs
*regs
= (struct pt_regs
*)State
.Sig
.pCtx
;
630 unsigned int sysnumber
;
631 unsigned long long a1_a2
, a3_a4
, a5_a6
;
632 LPSYSCALL syscall_entry
;
635 head_end(State
, ~INTS_OFF_MASK
);
638 * If this is not a syscall SWITCH it could be a breakpoint.
640 if (!switch_is_syscall(Inst
)) {
642 * Alert the user if they're trying to use legacy system
643 * calls. This suggests they need to update their C
644 * library and build against up to date kernel headers.
646 if (switch_is_legacy_syscall(Inst
))
647 pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
649 * We don't know how to handle the SWITCH and cannot
650 * safely ignore it, so treat all unknown switches
651 * (including breakpoints) as traps.
653 force_sig(SIGTRAP
, current
);
654 return tail_end(State
);
661 sysnumber
= regs
->ctx
.DX
[0].U1
;
663 if (test_syscall_work())
664 sysnumber
= syscall_trace_enter(regs
);
666 /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
667 step_over_switch(regs
, Inst
);
669 if (sysnumber
>= __NR_syscalls
) {
670 pr_debug("unknown syscall number: %d\n", sysnumber
);
671 syscall_entry
= (LPSYSCALL
) sys_ni_syscall
;
673 syscall_entry
= (LPSYSCALL
) sys_call_table
[sysnumber
];
676 /* Use 64bit loads for speed. */
677 a5_a6
= *(unsigned long long *)®s
->ctx
.DX
[1];
678 a3_a4
= *(unsigned long long *)®s
->ctx
.DX
[2];
679 a1_a2
= *(unsigned long long *)®s
->ctx
.DX
[3];
681 /* here is the actual call to the syscall handler functions */
682 regs
->ctx
.DX
[0].U0
= syscall_entry(a1_a2
, a3_a4
, a5_a6
);
684 if (test_syscall_work())
685 syscall_trace_leave(regs
);
687 State
= tail_end_sys(State
, sysnumber
, &restart
);
688 /* Handlerless restarts shouldn't go via userland */
690 goto restart_syscall
;
694 TBIRES
switchx_handler(TBIRES State
, int SigNum
, int Triggers
,
697 struct pt_regs
*regs
= (struct pt_regs
*)State
.Sig
.pCtx
;
700 * This can be caused by any user process simply executing an unusual
701 * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
702 * thread to stop, so signal a SIGTRAP instead.
704 head_end(State
, ~INTS_OFF_MASK
);
706 force_sig(SIGTRAP
, current
);
708 State
= __TBIUnExpXXX(State
, SigNum
, Triggers
, Inst
, pTBI
);
709 return tail_end(State
);
712 #ifdef CONFIG_METAG_META21
713 TBIRES
fpe_handler(TBIRES State
, int SigNum
, int Triggers
, int Inst
, PTBI pTBI
)
715 struct pt_regs
*regs
= (struct pt_regs
*)State
.Sig
.pCtx
;
716 unsigned int error_state
= Triggers
;
719 head_end(State
, ~INTS_OFF_MASK
);
723 info
.si_signo
= SIGFPE
;
725 if (error_state
& TXSTAT_FPE_INVALID_BIT
)
726 info
.si_code
= FPE_FLTINV
;
727 else if (error_state
& TXSTAT_FPE_DIVBYZERO_BIT
)
728 info
.si_code
= FPE_FLTDIV
;
729 else if (error_state
& TXSTAT_FPE_OVERFLOW_BIT
)
730 info
.si_code
= FPE_FLTOVF
;
731 else if (error_state
& TXSTAT_FPE_UNDERFLOW_BIT
)
732 info
.si_code
= FPE_FLTUND
;
733 else if (error_state
& TXSTAT_FPE_INEXACT_BIT
)
734 info
.si_code
= FPE_FLTRES
;
738 info
.si_addr
= (__force
void __user
*)regs
->ctx
.CurrPC
;
739 force_sig_info(SIGFPE
, &info
, current
);
741 return tail_end(State
);
745 #ifdef CONFIG_METAG_SUSPEND_MEM
746 struct traps_context
{
747 PTBIAPIFN fnSigs
[TBID_SIGNUM_MAX
+ 1];
750 static struct traps_context
*metag_traps_context
;
752 int traps_save_context(void)
754 unsigned long cpu
= smp_processor_id();
755 PTBI _pTBI
= per_cpu(pTBI
, cpu
);
756 struct traps_context
*context
;
758 context
= kzalloc(sizeof(*context
), GFP_ATOMIC
);
762 memcpy(context
->fnSigs
, (void *)_pTBI
->fnSigs
, sizeof(context
->fnSigs
));
764 metag_traps_context
= context
;
768 int traps_restore_context(void)
770 unsigned long cpu
= smp_processor_id();
771 PTBI _pTBI
= per_cpu(pTBI
, cpu
);
772 struct traps_context
*context
= metag_traps_context
;
774 metag_traps_context
= NULL
;
776 memcpy((void *)_pTBI
->fnSigs
, context
->fnSigs
, sizeof(context
->fnSigs
));
784 static inline unsigned int _get_trigger_mask(void)
786 unsigned long cpu
= smp_processor_id();
787 return per_cpu(trigger_mask
, cpu
);
790 unsigned int get_trigger_mask(void)
792 return _get_trigger_mask();
794 EXPORT_SYMBOL(get_trigger_mask
);
796 static void set_trigger_mask(unsigned int mask
)
798 unsigned long cpu
= smp_processor_id();
799 per_cpu(trigger_mask
, cpu
) = mask
;
802 void arch_local_irq_enable(void)
805 arch_local_irq_restore(_get_trigger_mask());
806 preempt_enable_no_resched();
808 EXPORT_SYMBOL(arch_local_irq_enable
);
810 static void set_trigger_mask(unsigned int mask
)
812 global_trigger_mask
= mask
;
816 void per_cpu_trap_init(unsigned long cpu
)
819 unsigned int thread
= cpu_2_hwthread_id
[cpu
];
821 set_trigger_mask(TBI_INTS_INIT(thread
) | /* interrupts */
822 TBI_TRIG_BIT(TBID_SIGNUM_LWK
) | /* low level kick */
823 TBI_TRIG_BIT(TBID_SIGNUM_SW1
));
825 /* non-priv - use current stack */
826 int_context
.Sig
.pCtx
= NULL
;
827 /* Start with interrupts off */
828 int_context
.Sig
.TrigMask
= INTS_OFF_MASK
;
829 int_context
.Sig
.SaveMask
= 0;
831 /* And call __TBIASyncTrigger() */
832 __TBIASyncTrigger(int_context
);
835 void __init
trap_init(void)
837 unsigned long cpu
= smp_processor_id();
838 PTBI _pTBI
= per_cpu(pTBI
, cpu
);
840 _pTBI
->fnSigs
[TBID_SIGNUM_XXF
] = fault_handler
;
841 _pTBI
->fnSigs
[TBID_SIGNUM_SW0
] = switchx_handler
;
842 _pTBI
->fnSigs
[TBID_SIGNUM_SW1
] = switch1_handler
;
843 _pTBI
->fnSigs
[TBID_SIGNUM_SW2
] = switchx_handler
;
844 _pTBI
->fnSigs
[TBID_SIGNUM_SW3
] = switchx_handler
;
845 _pTBI
->fnSigs
[TBID_SIGNUM_LWK
] = kick_handler
;
847 #ifdef CONFIG_METAG_META21
848 _pTBI
->fnSigs
[TBID_SIGNUM_DFR
] = __TBIHandleDFR
;
849 _pTBI
->fnSigs
[TBID_SIGNUM_FPE
] = fpe_handler
;
852 per_cpu_trap_init(cpu
);
855 void tbi_startup_interrupt(int irq
)
857 unsigned long cpu
= smp_processor_id();
858 PTBI _pTBI
= per_cpu(pTBI
, cpu
);
860 BUG_ON(irq
> TBID_SIGNUM_MAX
);
862 /* For TR1 and TR2, the thread id is encoded in the irq number */
863 if (irq
>= TBID_SIGNUM_T10
&& irq
< TBID_SIGNUM_TR3
)
864 cpu
= hwthread_id_2_cpu
[(irq
- TBID_SIGNUM_T10
) % 4];
866 set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq
));
868 _pTBI
->fnSigs
[irq
] = trigger_handler
;
871 void tbi_shutdown_interrupt(int irq
)
873 unsigned long cpu
= smp_processor_id();
874 PTBI _pTBI
= per_cpu(pTBI
, cpu
);
876 BUG_ON(irq
> TBID_SIGNUM_MAX
);
878 set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq
));
880 _pTBI
->fnSigs
[irq
] = __TBIUnExpXXX
;
883 int ret_from_fork(TBIRES arg
)
885 struct task_struct
*prev
= arg
.Switch
.pPara
;
886 struct task_struct
*tsk
= current
;
887 struct pt_regs
*regs
= task_pt_regs(tsk
);
893 if (tsk
->flags
& PF_KTHREAD
) {
894 fn
= (void *)regs
->ctx
.DX
[4].U1
;
897 fn((void *)regs
->ctx
.DX
[3].U1
);
900 if (test_syscall_work())
901 syscall_trace_leave(regs
);
905 Next
.Sig
.TrigMask
= get_trigger_mask();
906 Next
.Sig
.SaveMask
= 0;
907 Next
.Sig
.pCtx
= ®s
->ctx
;
909 set_gateway_tls(current
->thread
.tls_ptr
);
911 preempt_enable_no_resched();
913 /* And interrupts should come back on when we resume the real usermode
914 * code. Call __TBIASyncResume()
916 __TBIASyncResume(tail_end(Next
));
917 /* ASyncResume should NEVER return */
922 void show_trace(struct task_struct
*tsk
, unsigned long *sp
,
923 struct pt_regs
*regs
)
926 #ifdef CONFIG_FRAME_POINTER
927 unsigned long fp
, fpnew
;
931 if (regs
&& user_mode(regs
))
934 printk("\nCall trace: ");
935 #ifdef CONFIG_KALLSYMS
942 #ifdef CONFIG_FRAME_POINTER
944 print_ip_sym(regs
->ctx
.CurrPC
);
945 fp
= regs
->ctx
.AX
[1].U0
;
947 fp
= __core_reg_get(A0FrP
);
950 /* detect when the frame pointer has been used for other purposes and
951 * doesn't point to the stack (it may point completely elsewhere which
952 * kstack_end may not detect).
954 stack
= (unsigned long)task_stack_page(tsk
);
955 while (fp
>= stack
&& fp
+ 8 <= stack
+ THREAD_SIZE
) {
956 addr
= __raw_readl((unsigned long *)(fp
+ 4)) - 4;
957 if (kernel_text_address(addr
))
961 /* stack grows up, so frame pointers must decrease */
962 fpnew
= __raw_readl((unsigned long *)(fp
+ 0));
968 while (!kstack_end(sp
)) {
970 if (kernel_text_address(addr
))
977 debug_show_held_locks(tsk
);
980 void show_stack(struct task_struct
*tsk
, unsigned long *sp
)
985 sp
= (unsigned long *)current_stack_pointer
;
987 sp
= (unsigned long *)tsk
->thread
.kernel_context
->AX
[0].U0
;
989 show_trace(tsk
, sp
, NULL
);