]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/metag/kernel/traps.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-artful-kernel.git] / arch / metag / kernel / traps.c
CommitLineData
ac919f08
JH
1/*
2 * Meta exception handling.
3 *
4 * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
9fb4aa87 11#include <linux/export.h>
ac919f08 12#include <linux/sched.h>
b17b0153 13#include <linux/sched/debug.h>
29930025 14#include <linux/sched/task.h>
ac919f08
JH
15#include <linux/signal.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/types.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
fa771d02 21#include <linux/preempt.h>
ac919f08
JH
22#include <linux/ptrace.h>
23#include <linux/module.h>
24#include <linux/kallsyms.h>
25#include <linux/kdebug.h>
26#include <linux/kexec.h>
27#include <linux/unistd.h>
28#include <linux/smp.h>
29#include <linux/slab.h>
30#include <linux/syscalls.h>
31
32#include <asm/bug.h>
33#include <asm/core_reg.h>
34#include <asm/irqflags.h>
35#include <asm/siginfo.h>
36#include <asm/traps.h>
37#include <asm/hwthread.h>
0184cd34 38#include <asm/setup.h>
ac919f08
JH
39#include <asm/switch.h>
40#include <asm/user_gateway.h>
41#include <asm/syscall.h>
42#include <asm/syscalls.h>
43
44/* Passing syscall arguments as long long is quicker. */
45typedef unsigned int (*LPSYSCALL) (unsigned long long,
46 unsigned long long,
47 unsigned long long);
48
49/*
50 * Users of LNKSET should compare the bus error bits obtained from DEFR
51 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
52 * different cores revisions.
53 */
54#define TXDEFR_LNKSET_SUCCESS 0x02000000
55#define TXDEFR_LNKSET_FAILURE 0x04000000
56
57/*
58 * Our global TBI handle. Initialised from setup.c/setup_arch.
59 */
60DECLARE_PER_CPU(PTBI, pTBI);
61
62#ifdef CONFIG_SMP
63static DEFINE_PER_CPU(unsigned int, trigger_mask);
64#else
65unsigned int global_trigger_mask;
9fb4aa87 66EXPORT_SYMBOL(global_trigger_mask);
ac919f08
JH
67#endif
68
69unsigned long per_cpu__stack_save[NR_CPUS];
70
71static const char * const trap_names[] = {
72 [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
73 [TBIXXF_SIGNUM_PGF] = "Privilege violation",
74 [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
75 [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
76 [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
77 [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
78 [TBIXXF_SIGNUM_DPF] = "Data access page fault",
79 [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
80 [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
81};
82
83const char *trap_name(int trapno)
84{
85 if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
86 && trap_names[trapno])
87 return trap_names[trapno];
88 return "Unknown fault";
89}
90
91static DEFINE_SPINLOCK(die_lock);
92
b79e88e5
MC
93void __noreturn die(const char *str, struct pt_regs *regs,
94 long err, unsigned long addr)
ac919f08
JH
95{
96 static int die_counter;
97
98 oops_enter();
99
100 spin_lock_irq(&die_lock);
101 console_verbose();
102 bust_spinlocks(1);
103 pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
104 trap_name(err & 0xffff), addr, ++die_counter);
105
106 print_modules();
107 show_regs(regs);
108
109 pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
110 task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
111
112 bust_spinlocks(0);
8fd5e7a2 113 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
ac919f08
JH
114 if (kexec_should_crash(current))
115 crash_kexec(regs);
116
117 if (in_interrupt())
118 panic("Fatal exception in interrupt");
119
120 if (panic_on_oops)
121 panic("Fatal exception");
122
123 spin_unlock_irq(&die_lock);
124 oops_exit();
125 do_exit(SIGSEGV);
126}
127
128#ifdef CONFIG_METAG_DSP
129/*
130 * The ECH encoding specifies the size of a DSPRAM as,
131 *
132 * "slots" / 4
133 *
134 * A "slot" is the size of two DSPRAM bank entries; an entry from
135 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
136 * entry is 4 bytes.
137 */
138#define SLOT_SZ 8
139static inline unsigned int decode_dspram_size(unsigned int size)
140{
141 unsigned int _sz = size & 0x7f;
142
143 return _sz * SLOT_SZ * 4;
144}
145
146static void dspram_save(struct meta_ext_context *dsp_ctx,
147 unsigned int ramA_sz, unsigned int ramB_sz)
148{
149 unsigned int ram_sz[2];
150 int i;
151
152 ram_sz[0] = ramA_sz;
153 ram_sz[1] = ramB_sz;
154
155 for (i = 0; i < 2; i++) {
156 if (ram_sz[i] != 0) {
157 unsigned int sz;
158
159 if (i == 0)
160 sz = decode_dspram_size(ram_sz[i] >> 8);
161 else
162 sz = decode_dspram_size(ram_sz[i]);
163
164 if (dsp_ctx->ram[i] == NULL) {
165 dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
166
167 if (dsp_ctx->ram[i] == NULL)
168 panic("couldn't save DSP context");
169 } else {
170 if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
171 kfree(dsp_ctx->ram[i]);
172
173 dsp_ctx->ram[i] = kmalloc(sz,
174 GFP_KERNEL);
175
176 if (dsp_ctx->ram[i] == NULL)
177 panic("couldn't save DSP context");
178 }
179 }
180
181 if (i == 0)
182 __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
183 else
184 __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
185
186 dsp_ctx->ram_sz[i] = ram_sz[i];
187 }
188 }
189}
190#endif /* CONFIG_METAG_DSP */
191
192/*
193 * Allow interrupts to be nested and save any "extended" register
194 * context state, e.g. DSP regs and RAMs.
195 */
196static void nest_interrupts(TBIRES State, unsigned long mask)
197{
198#ifdef CONFIG_METAG_DSP
199 struct meta_ext_context *dsp_ctx;
200 unsigned int D0_8;
201
202 /*
203 * D0.8 may contain an ECH encoding. The upper 16 bits
204 * tell us what DSP resources the current process is
205 * using. OR the bits into the SaveMask so that
206 * __TBINestInts() knows what resources to save as
207 * part of this context.
208 *
209 * Don't save the context if we're nesting interrupts in the
210 * kernel because the kernel doesn't use DSP hardware.
211 */
212 D0_8 = __core_reg_get(D0.8);
213
214 if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
215 State.Sig.SaveMask |= (D0_8 >> 16);
216
217 dsp_ctx = current->thread.dsp_context;
218 if (dsp_ctx == NULL) {
219 dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
220 if (dsp_ctx == NULL)
221 panic("couldn't save DSP context: ENOMEM");
222
223 current->thread.dsp_context = dsp_ctx;
224 }
225
226 current->thread.user_flags |= (D0_8 & 0xffff0000);
227 __TBINestInts(State, &dsp_ctx->regs, mask);
228 dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
229 } else
230 __TBINestInts(State, NULL, mask);
231#else
232 __TBINestInts(State, NULL, mask);
233#endif
234}
235
236void head_end(TBIRES State, unsigned long mask)
237{
238 unsigned int savemask = (unsigned short)State.Sig.SaveMask;
239 unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
240
241 if (savemask & TBICTX_PRIV_BIT) {
242 ctx_savemask |= TBICTX_PRIV_BIT;
243 current->thread.user_flags = savemask;
244 }
245
246 /* Always undo the sleep bit */
247 ctx_savemask &= ~TBICTX_WAIT_BIT;
248
249 /* Always save the catch buffer and RD pipe if they are dirty */
250 savemask |= TBICTX_XCBF_BIT;
251
252 /* Only save the catch and RD if we have not already done so.
253 * Note - the RD bits are in the pCtx only, and not in the
254 * State.SaveMask.
255 */
256 if ((savemask & TBICTX_CBUF_BIT) ||
257 (ctx_savemask & TBICTX_CBRP_BIT)) {
258 /* Have we already saved the buffers though?
259 * - See TestTrack 5071 */
260 if (ctx_savemask & TBICTX_XCBF_BIT) {
261 /* Strip off the bits so the call to __TBINestInts
262 * won't save the buffers again. */
263 savemask &= ~TBICTX_CBUF_BIT;
264 ctx_savemask &= ~TBICTX_CBRP_BIT;
265 }
266 }
267
268#ifdef CONFIG_METAG_META21
269 {
270 unsigned int depth, txdefr;
271
272 /*
273 * Save TXDEFR state.
274 *
275 * The process may have been interrupted after a LNKSET, but
276 * before it could read the DEFR state, so we mustn't lose that
277 * state or it could end up retrying an atomic operation that
278 * succeeded.
279 *
280 * All interrupts are disabled at this point so we
281 * don't need to perform any locking. We must do this
282 * dance before we use LNKGET or LNKSET.
283 */
284 BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
285
286 depth = current->thread.int_depth++;
287
288 txdefr = __core_reg_get(TXDEFR);
289
290 txdefr &= TXDEFR_BUS_STATE_BITS;
291 if (txdefr & TXDEFR_LNKSET_SUCCESS)
292 current->thread.txdefr_failure &= ~(1 << depth);
293 else
294 current->thread.txdefr_failure |= (1 << depth);
295 }
296#endif
297
298 State.Sig.SaveMask = savemask;
299 State.Sig.pCtx->SaveMask = ctx_savemask;
300
301 nest_interrupts(State, mask);
302
303#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
304 /* Poison the catch registers. This shows up any mistakes we have
305 * made in their handling MUCH quicker.
306 */
307 __core_reg_set(TXCATCH0, 0x87650021);
308 __core_reg_set(TXCATCH1, 0x87654322);
309 __core_reg_set(TXCATCH2, 0x87654323);
310 __core_reg_set(TXCATCH3, 0x87654324);
311#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
312}
313
314TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
315{
316 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
317 unsigned long flags;
318
319 local_irq_disable();
320
321 if (user_mode(regs)) {
322 flags = current_thread_info()->flags;
323 if (flags & _TIF_WORK_MASK &&
324 do_work_pending(regs, flags, syscall)) {
325 *restart = 1;
326 return State;
327 }
328
329#ifdef CONFIG_METAG_FPU
330 if (current->thread.fpu_context &&
331 current->thread.fpu_context->needs_restore) {
332 __TBICtxFPURestore(State, current->thread.fpu_context);
333 /*
334 * Clearing this bit ensures the FP unit is not made
335 * active again unless it is used.
336 */
337 State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
338 current->thread.fpu_context->needs_restore = false;
339 }
340 State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
341#endif
342 }
343
344 /* TBI will turn interrupts back on at some point. */
345 if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
346 trace_hardirqs_on();
347
348#ifdef CONFIG_METAG_DSP
349 /*
350 * If we previously saved an extended context then restore it
351 * now. Otherwise, clear D0.8 because this process is not
352 * using DSP hardware.
353 */
354 if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
355 unsigned int D0_8;
356 struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
357
358 /* Make sure we're going to return to userland. */
359 BUG_ON(current->thread.int_depth != 1);
360
361 if (dsp_ctx->ram_sz[0] > 0)
362 __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
363 dsp_ctx->ram[0]);
364 if (dsp_ctx->ram_sz[1] > 0)
365 __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
366 dsp_ctx->ram[1]);
367
368 State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
369 __TBICtxRestore(State, current->thread.dsp_context);
370 D0_8 = __core_reg_get(D0.8);
371 D0_8 |= current->thread.user_flags & 0xffff0000;
372 D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
373 __core_reg_set(D0.8, D0_8);
374 } else
375 __core_reg_set(D0.8, 0);
376#endif /* CONFIG_METAG_DSP */
377
378#ifdef CONFIG_METAG_META21
379 {
380 unsigned int depth, txdefr;
381
382 /*
383 * If there hasn't been a LNKSET since the last LNKGET then the
384 * link flag will be set, causing the next LNKSET to succeed if
385 * the addresses match. The two LNK operations may not be a pair
386 * (e.g. see atomic_read()), so the LNKSET should fail.
387 * We use a conditional-never LNKSET to clear the link flag
388 * without side effects.
389 */
390 asm volatile("LNKSETDNV [D0Re0],D0Re0");
391
392 depth = --current->thread.int_depth;
393
394 BUG_ON(user_mode(regs) && depth);
395
396 txdefr = __core_reg_get(TXDEFR);
397
398 txdefr &= ~TXDEFR_BUS_STATE_BITS;
399
400 /* Do we need to restore a failure code into TXDEFR? */
401 if (current->thread.txdefr_failure & (1 << depth))
402 txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
403 else
404 txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
405
406 __core_reg_set(TXDEFR, txdefr);
407 }
408#endif
409 return State;
410}
411
412#ifdef CONFIG_SMP
413/*
414 * If we took an interrupt in the middle of __kuser_get_tls then we need
415 * to rewind the PC to the start of the function in case the process
416 * gets migrated to another thread (SMP only) and it reads the wrong tls
417 * data.
418 */
419static inline void _restart_critical_section(TBIRES State)
420{
421 unsigned long get_tls_start;
422 unsigned long get_tls_end;
423
424 get_tls_start = (unsigned long)__kuser_get_tls -
425 (unsigned long)&__user_gateway_start;
426
427 get_tls_start += USER_GATEWAY_PAGE;
428
429 get_tls_end = (unsigned long)__kuser_get_tls_end -
430 (unsigned long)&__user_gateway_start;
431
432 get_tls_end += USER_GATEWAY_PAGE;
433
434 if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
435 (State.Sig.pCtx->CurrPC < get_tls_end))
436 State.Sig.pCtx->CurrPC = get_tls_start;
437}
438#else
439/*
440 * If we took an interrupt in the middle of
441 * __kuser_cmpxchg then we need to rewind the PC to the
442 * start of the function.
443 */
444static inline void _restart_critical_section(TBIRES State)
445{
446 unsigned long cmpxchg_start;
447 unsigned long cmpxchg_end;
448
449 cmpxchg_start = (unsigned long)__kuser_cmpxchg -
450 (unsigned long)&__user_gateway_start;
451
452 cmpxchg_start += USER_GATEWAY_PAGE;
453
454 cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
455 (unsigned long)&__user_gateway_start;
456
457 cmpxchg_end += USER_GATEWAY_PAGE;
458
459 if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
460 (State.Sig.pCtx->CurrPC < cmpxchg_end))
461 State.Sig.pCtx->CurrPC = cmpxchg_start;
462}
463#endif
464
465/* Used by kick_handler() */
466void restart_critical_section(TBIRES State)
467{
468 _restart_critical_section(State);
469}
470
471TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
472 PTBI pTBI)
473{
474 head_end(State, ~INTS_OFF_MASK);
475
476 /* If we interrupted user code handle any critical sections. */
477 if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
478 _restart_critical_section(State);
479
480 trace_hardirqs_off();
481
482 do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
483
484 return tail_end(State);
485}
486
487static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
488{
489 return pbuf->CBFlags & TXCATCH0_READ_BIT;
490}
491
492static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
493{
494 return pbuf->CBAddr;
495}
496
497static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
498 int signo, int code, int trapno)
499{
500 if (user_mode(regs)) {
501 siginfo_t info;
502
503 if (show_unhandled_signals && unhandled_signal(current, signo)
504 && printk_ratelimit()) {
505
506 pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
507 current->pid, regs->ctx.CurrPC, addr,
508 trapno, trap_name(trapno));
509 print_vma_addr(" in ", regs->ctx.CurrPC);
510 print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
511 printk("\n");
512 show_regs(regs);
513 }
514
515 info.si_signo = signo;
516 info.si_errno = 0;
517 info.si_code = code;
518 info.si_addr = (__force void __user *)addr;
519 info.si_trapno = trapno;
520 force_sig_info(signo, &info, current);
521 } else {
522 die("Oops", regs, trapno, addr);
523 }
524}
525
526static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
527 unsigned int data_address, int trapno)
528{
529 int ret;
530
531 ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
532
533 return ret;
534}
535
536static unsigned long get_inst_fault_address(struct pt_regs *regs)
537{
538 return regs->ctx.CurrPC;
539}
540
541TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
542 int Inst, PTBI pTBI)
543{
544 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
545 PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
546 unsigned long data_address;
547
548 head_end(State, ~INTS_OFF_MASK);
549
550 /* Hardware breakpoint or data watch */
551 if ((SigNum == TBIXXF_SIGNUM_IHF) ||
552 ((SigNum == TBIXXF_SIGNUM_DHF) &&
553 (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
554 TXCATCH0_WATCH0_BIT)))) {
555 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
556 pTBI);
557 return tail_end(State);
558 }
559
560 local_irq_enable();
561
562 data_address = fault_address(pcbuf);
563
564 switch (SigNum) {
565 case TBIXXF_SIGNUM_IGF:
566 /* 1st-level entry invalid (instruction fetch) */
567 case TBIXXF_SIGNUM_IPF: {
568 /* 2nd-level entry invalid (instruction fetch) */
569 unsigned long addr = get_inst_fault_address(regs);
570 do_page_fault(regs, addr, 0, SigNum);
571 break;
572 }
573
574 case TBIXXF_SIGNUM_DGF:
575 /* 1st-level entry invalid (data access) */
576 case TBIXXF_SIGNUM_DPF:
577 /* 2nd-level entry invalid (data access) */
578 case TBIXXF_SIGNUM_DWF:
579 /* Write to read only page */
580 handle_data_fault(pcbuf, regs, data_address, SigNum);
581 break;
582
583 case TBIXXF_SIGNUM_IIF:
584 /* Illegal instruction */
585 unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
586 SigNum);
587 break;
588
589 case TBIXXF_SIGNUM_DHF:
590 /* Unaligned access */
591 unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
592 SigNum);
593 break;
594 case TBIXXF_SIGNUM_PGF:
595 /* Privilege violation */
596 unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
597 SigNum);
598 break;
599 default:
600 BUG();
601 break;
602 }
603
604 return tail_end(State);
605}
606
607static bool switch_is_syscall(unsigned int inst)
608{
609 return inst == __METAG_SW_ENCODING(SYS);
610}
611
612static bool switch_is_legacy_syscall(unsigned int inst)
613{
614 return inst == __METAG_SW_ENCODING(SYS_LEGACY);
615}
616
617static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
618{
619 regs->ctx.CurrPC += 4;
620}
621
622static inline int test_syscall_work(void)
623{
624 return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
625}
626
627TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
628 int Inst, PTBI pTBI)
629{
630 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
631 unsigned int sysnumber;
632 unsigned long long a1_a2, a3_a4, a5_a6;
633 LPSYSCALL syscall_entry;
634 int restart;
635
636 head_end(State, ~INTS_OFF_MASK);
637
638 /*
639 * If this is not a syscall SWITCH it could be a breakpoint.
640 */
641 if (!switch_is_syscall(Inst)) {
642 /*
643 * Alert the user if they're trying to use legacy system
644 * calls. This suggests they need to update their C
645 * library and build against up to date kernel headers.
646 */
647 if (switch_is_legacy_syscall(Inst))
648 pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
649 /*
650 * We don't know how to handle the SWITCH and cannot
651 * safely ignore it, so treat all unknown switches
652 * (including breakpoints) as traps.
653 */
654 force_sig(SIGTRAP, current);
655 return tail_end(State);
656 }
657
658 local_irq_enable();
659
660restart_syscall:
661 restart = 0;
662 sysnumber = regs->ctx.DX[0].U1;
663
664 if (test_syscall_work())
665 sysnumber = syscall_trace_enter(regs);
666
667 /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
668 step_over_switch(regs, Inst);
669
670 if (sysnumber >= __NR_syscalls) {
671 pr_debug("unknown syscall number: %d\n", sysnumber);
672 syscall_entry = (LPSYSCALL) sys_ni_syscall;
673 } else {
674 syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
675 }
676
677 /* Use 64bit loads for speed. */
678 a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
679 a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
680 a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
681
682 /* here is the actual call to the syscall handler functions */
683 regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
684
685 if (test_syscall_work())
686 syscall_trace_leave(regs);
687
688 State = tail_end_sys(State, sysnumber, &restart);
689 /* Handlerless restarts shouldn't go via userland */
690 if (restart)
691 goto restart_syscall;
692 return State;
693}
694
695TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
696 int Inst, PTBI pTBI)
697{
698 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
699
700 /*
701 * This can be caused by any user process simply executing an unusual
702 * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
703 * thread to stop, so signal a SIGTRAP instead.
704 */
705 head_end(State, ~INTS_OFF_MASK);
706 if (user_mode(regs))
707 force_sig(SIGTRAP, current);
708 else
709 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
710 return tail_end(State);
711}
712
713#ifdef CONFIG_METAG_META21
714TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
715{
716 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
717 unsigned int error_state = Triggers;
718 siginfo_t info;
719
720 head_end(State, ~INTS_OFF_MASK);
721
722 local_irq_enable();
723
724 info.si_signo = SIGFPE;
725
726 if (error_state & TXSTAT_FPE_INVALID_BIT)
727 info.si_code = FPE_FLTINV;
728 else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
729 info.si_code = FPE_FLTDIV;
730 else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
731 info.si_code = FPE_FLTOVF;
732 else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
733 info.si_code = FPE_FLTUND;
734 else if (error_state & TXSTAT_FPE_INEXACT_BIT)
735 info.si_code = FPE_FLTRES;
736 else
737 info.si_code = 0;
738 info.si_errno = 0;
739 info.si_addr = (__force void __user *)regs->ctx.CurrPC;
740 force_sig_info(SIGFPE, &info, current);
741
742 return tail_end(State);
743}
744#endif
745
746#ifdef CONFIG_METAG_SUSPEND_MEM
747struct traps_context {
748 PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
749};
750
751static struct traps_context *metag_traps_context;
752
753int traps_save_context(void)
754{
755 unsigned long cpu = smp_processor_id();
756 PTBI _pTBI = per_cpu(pTBI, cpu);
757 struct traps_context *context;
758
759 context = kzalloc(sizeof(*context), GFP_ATOMIC);
760 if (!context)
761 return -ENOMEM;
762
763 memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
764
765 metag_traps_context = context;
766 return 0;
767}
768
769int traps_restore_context(void)
770{
771 unsigned long cpu = smp_processor_id();
772 PTBI _pTBI = per_cpu(pTBI, cpu);
773 struct traps_context *context = metag_traps_context;
774
775 metag_traps_context = NULL;
776
777 memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
778
779 kfree(context);
780 return 0;
781}
782#endif
783
784#ifdef CONFIG_SMP
fa771d02 785static inline unsigned int _get_trigger_mask(void)
ac919f08
JH
786{
787 unsigned long cpu = smp_processor_id();
788 return per_cpu(trigger_mask, cpu);
789}
790
fa771d02
JH
791unsigned int get_trigger_mask(void)
792{
793 return _get_trigger_mask();
794}
9fb4aa87 795EXPORT_SYMBOL(get_trigger_mask);
fa771d02 796
ac919f08
JH
797static void set_trigger_mask(unsigned int mask)
798{
799 unsigned long cpu = smp_processor_id();
800 per_cpu(trigger_mask, cpu) = mask;
801}
fa771d02
JH
802
803void arch_local_irq_enable(void)
804{
805 preempt_disable();
806 arch_local_irq_restore(_get_trigger_mask());
807 preempt_enable_no_resched();
808}
809EXPORT_SYMBOL(arch_local_irq_enable);
ac919f08
JH
810#else
811static void set_trigger_mask(unsigned int mask)
812{
813 global_trigger_mask = mask;
814}
815#endif
816
54be16e7 817void per_cpu_trap_init(unsigned long cpu)
ac919f08
JH
818{
819 TBIRES int_context;
820 unsigned int thread = cpu_2_hwthread_id[cpu];
821
822 set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
823 TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
95281171 824 TBI_TRIG_BIT(TBID_SIGNUM_SW1));
ac919f08
JH
825
826 /* non-priv - use current stack */
827 int_context.Sig.pCtx = NULL;
828 /* Start with interrupts off */
829 int_context.Sig.TrigMask = INTS_OFF_MASK;
830 int_context.Sig.SaveMask = 0;
831
832 /* And call __TBIASyncTrigger() */
833 __TBIASyncTrigger(int_context);
834}
835
836void __init trap_init(void)
837{
838 unsigned long cpu = smp_processor_id();
839 PTBI _pTBI = per_cpu(pTBI, cpu);
840
841 _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
842 _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
843 _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
844 _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
845 _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
95281171 846 _pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler;
ac919f08
JH
847
848#ifdef CONFIG_METAG_META21
849 _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
850 _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
851#endif
852
853 per_cpu_trap_init(cpu);
854}
855
856void tbi_startup_interrupt(int irq)
857{
858 unsigned long cpu = smp_processor_id();
859 PTBI _pTBI = per_cpu(pTBI, cpu);
860
861 BUG_ON(irq > TBID_SIGNUM_MAX);
862
863 /* For TR1 and TR2, the thread id is encoded in the irq number */
864 if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
865 cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
866
867 set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
868
869 _pTBI->fnSigs[irq] = trigger_handler;
870}
871
872void tbi_shutdown_interrupt(int irq)
873{
874 unsigned long cpu = smp_processor_id();
875 PTBI _pTBI = per_cpu(pTBI, cpu);
876
877 BUG_ON(irq > TBID_SIGNUM_MAX);
878
879 set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
880
881 _pTBI->fnSigs[irq] = __TBIUnExpXXX;
882}
883
884int ret_from_fork(TBIRES arg)
885{
886 struct task_struct *prev = arg.Switch.pPara;
887 struct task_struct *tsk = current;
888 struct pt_regs *regs = task_pt_regs(tsk);
889 int (*fn)(void *);
890 TBIRES Next;
891
892 schedule_tail(prev);
893
894 if (tsk->flags & PF_KTHREAD) {
895 fn = (void *)regs->ctx.DX[4].U1;
896 BUG_ON(!fn);
897
898 fn((void *)regs->ctx.DX[3].U1);
899 }
900
901 if (test_syscall_work())
902 syscall_trace_leave(regs);
903
904 preempt_disable();
905
906 Next.Sig.TrigMask = get_trigger_mask();
907 Next.Sig.SaveMask = 0;
908 Next.Sig.pCtx = &regs->ctx;
909
910 set_gateway_tls(current->thread.tls_ptr);
911
912 preempt_enable_no_resched();
913
914 /* And interrupts should come back on when we resume the real usermode
915 * code. Call __TBIASyncResume()
916 */
917 __TBIASyncResume(tail_end(Next));
918 /* ASyncResume should NEVER return */
919 BUG();
920 return 0;
921}
922
923void show_trace(struct task_struct *tsk, unsigned long *sp,
924 struct pt_regs *regs)
925{
926 unsigned long addr;
927#ifdef CONFIG_FRAME_POINTER
928 unsigned long fp, fpnew;
929 unsigned long stack;
930#endif
931
932 if (regs && user_mode(regs))
933 return;
934
935 printk("\nCall trace: ");
936#ifdef CONFIG_KALLSYMS
937 printk("\n");
938#endif
939
940 if (!tsk)
941 tsk = current;
942
943#ifdef CONFIG_FRAME_POINTER
944 if (regs) {
945 print_ip_sym(regs->ctx.CurrPC);
946 fp = regs->ctx.AX[1].U0;
947 } else {
948 fp = __core_reg_get(A0FrP);
949 }
950
951 /* detect when the frame pointer has been used for other purposes and
952 * doesn't point to the stack (it may point completely elsewhere which
953 * kstack_end may not detect).
954 */
955 stack = (unsigned long)task_stack_page(tsk);
956 while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
957 addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
958 if (kernel_text_address(addr))
959 print_ip_sym(addr);
960 else
961 break;
962 /* stack grows up, so frame pointers must decrease */
963 fpnew = __raw_readl((unsigned long *)(fp + 0));
964 if (fpnew >= fp)
965 break;
966 fp = fpnew;
967 }
968#else
969 while (!kstack_end(sp)) {
970 addr = (*sp--) - 4;
971 if (kernel_text_address(addr))
972 print_ip_sym(addr);
973 }
974#endif
975
976 printk("\n");
977
978 debug_show_held_locks(tsk);
979}
980
981void show_stack(struct task_struct *tsk, unsigned long *sp)
982{
983 if (!tsk)
984 tsk = current;
985 if (tsk == current)
986 sp = (unsigned long *)current_stack_pointer;
987 else
988 sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
989
990 show_trace(tsk, sp, NULL);
991}