]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/metag/kernel/traps.c
5ce67f9124aa5ddfcac2740f6b67af83fe0882b1
[mirror_ubuntu-artful-kernel.git] / arch / metag / kernel / traps.c
1 /*
2 * Meta exception handling.
3 *
4 * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/signal.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/preempt.h>
21 #include <linux/ptrace.h>
22 #include <linux/module.h>
23 #include <linux/kallsyms.h>
24 #include <linux/kdebug.h>
25 #include <linux/kexec.h>
26 #include <linux/unistd.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/syscalls.h>
30
31 #include <asm/bug.h>
32 #include <asm/core_reg.h>
33 #include <asm/irqflags.h>
34 #include <asm/siginfo.h>
35 #include <asm/traps.h>
36 #include <asm/hwthread.h>
37 #include <asm/setup.h>
38 #include <asm/switch.h>
39 #include <asm/user_gateway.h>
40 #include <asm/syscall.h>
41 #include <asm/syscalls.h>
42
43 /* Passing syscall arguments as long long is quicker. */
44 typedef unsigned int (*LPSYSCALL) (unsigned long long,
45 unsigned long long,
46 unsigned long long);
47
48 /*
49 * Users of LNKSET should compare the bus error bits obtained from DEFR
50 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
51 * different cores revisions.
52 */
53 #define TXDEFR_LNKSET_SUCCESS 0x02000000
54 #define TXDEFR_LNKSET_FAILURE 0x04000000
55
56 /*
57 * Our global TBI handle. Initialised from setup.c/setup_arch.
58 */
59 DECLARE_PER_CPU(PTBI, pTBI);
60
61 #ifdef CONFIG_SMP
62 static DEFINE_PER_CPU(unsigned int, trigger_mask);
63 #else
64 unsigned int global_trigger_mask;
65 EXPORT_SYMBOL(global_trigger_mask);
66 #endif
67
68 unsigned long per_cpu__stack_save[NR_CPUS];
69
70 static const char * const trap_names[] = {
71 [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
72 [TBIXXF_SIGNUM_PGF] = "Privilege violation",
73 [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
74 [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
75 [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
76 [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
77 [TBIXXF_SIGNUM_DPF] = "Data access page fault",
78 [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
79 [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
80 };
81
82 const char *trap_name(int trapno)
83 {
84 if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
85 && trap_names[trapno])
86 return trap_names[trapno];
87 return "Unknown fault";
88 }
89
90 static DEFINE_SPINLOCK(die_lock);
91
92 void __noreturn die(const char *str, struct pt_regs *regs,
93 long err, unsigned long addr)
94 {
95 static int die_counter;
96
97 oops_enter();
98
99 spin_lock_irq(&die_lock);
100 console_verbose();
101 bust_spinlocks(1);
102 pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
103 trap_name(err & 0xffff), addr, ++die_counter);
104
105 print_modules();
106 show_regs(regs);
107
108 pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
109 task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
110
111 bust_spinlocks(0);
112 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
113 if (kexec_should_crash(current))
114 crash_kexec(regs);
115
116 if (in_interrupt())
117 panic("Fatal exception in interrupt");
118
119 if (panic_on_oops)
120 panic("Fatal exception");
121
122 spin_unlock_irq(&die_lock);
123 oops_exit();
124 do_exit(SIGSEGV);
125 }
126
127 #ifdef CONFIG_METAG_DSP
128 /*
129 * The ECH encoding specifies the size of a DSPRAM as,
130 *
131 * "slots" / 4
132 *
133 * A "slot" is the size of two DSPRAM bank entries; an entry from
134 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
135 * entry is 4 bytes.
136 */
137 #define SLOT_SZ 8
138 static inline unsigned int decode_dspram_size(unsigned int size)
139 {
140 unsigned int _sz = size & 0x7f;
141
142 return _sz * SLOT_SZ * 4;
143 }
144
145 static void dspram_save(struct meta_ext_context *dsp_ctx,
146 unsigned int ramA_sz, unsigned int ramB_sz)
147 {
148 unsigned int ram_sz[2];
149 int i;
150
151 ram_sz[0] = ramA_sz;
152 ram_sz[1] = ramB_sz;
153
154 for (i = 0; i < 2; i++) {
155 if (ram_sz[i] != 0) {
156 unsigned int sz;
157
158 if (i == 0)
159 sz = decode_dspram_size(ram_sz[i] >> 8);
160 else
161 sz = decode_dspram_size(ram_sz[i]);
162
163 if (dsp_ctx->ram[i] == NULL) {
164 dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
165
166 if (dsp_ctx->ram[i] == NULL)
167 panic("couldn't save DSP context");
168 } else {
169 if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
170 kfree(dsp_ctx->ram[i]);
171
172 dsp_ctx->ram[i] = kmalloc(sz,
173 GFP_KERNEL);
174
175 if (dsp_ctx->ram[i] == NULL)
176 panic("couldn't save DSP context");
177 }
178 }
179
180 if (i == 0)
181 __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
182 else
183 __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
184
185 dsp_ctx->ram_sz[i] = ram_sz[i];
186 }
187 }
188 }
189 #endif /* CONFIG_METAG_DSP */
190
191 /*
192 * Allow interrupts to be nested and save any "extended" register
193 * context state, e.g. DSP regs and RAMs.
194 */
195 static void nest_interrupts(TBIRES State, unsigned long mask)
196 {
197 #ifdef CONFIG_METAG_DSP
198 struct meta_ext_context *dsp_ctx;
199 unsigned int D0_8;
200
201 /*
202 * D0.8 may contain an ECH encoding. The upper 16 bits
203 * tell us what DSP resources the current process is
204 * using. OR the bits into the SaveMask so that
205 * __TBINestInts() knows what resources to save as
206 * part of this context.
207 *
208 * Don't save the context if we're nesting interrupts in the
209 * kernel because the kernel doesn't use DSP hardware.
210 */
211 D0_8 = __core_reg_get(D0.8);
212
213 if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
214 State.Sig.SaveMask |= (D0_8 >> 16);
215
216 dsp_ctx = current->thread.dsp_context;
217 if (dsp_ctx == NULL) {
218 dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
219 if (dsp_ctx == NULL)
220 panic("couldn't save DSP context: ENOMEM");
221
222 current->thread.dsp_context = dsp_ctx;
223 }
224
225 current->thread.user_flags |= (D0_8 & 0xffff0000);
226 __TBINestInts(State, &dsp_ctx->regs, mask);
227 dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
228 } else
229 __TBINestInts(State, NULL, mask);
230 #else
231 __TBINestInts(State, NULL, mask);
232 #endif
233 }
234
235 void head_end(TBIRES State, unsigned long mask)
236 {
237 unsigned int savemask = (unsigned short)State.Sig.SaveMask;
238 unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
239
240 if (savemask & TBICTX_PRIV_BIT) {
241 ctx_savemask |= TBICTX_PRIV_BIT;
242 current->thread.user_flags = savemask;
243 }
244
245 /* Always undo the sleep bit */
246 ctx_savemask &= ~TBICTX_WAIT_BIT;
247
248 /* Always save the catch buffer and RD pipe if they are dirty */
249 savemask |= TBICTX_XCBF_BIT;
250
251 /* Only save the catch and RD if we have not already done so.
252 * Note - the RD bits are in the pCtx only, and not in the
253 * State.SaveMask.
254 */
255 if ((savemask & TBICTX_CBUF_BIT) ||
256 (ctx_savemask & TBICTX_CBRP_BIT)) {
257 /* Have we already saved the buffers though?
258 * - See TestTrack 5071 */
259 if (ctx_savemask & TBICTX_XCBF_BIT) {
260 /* Strip off the bits so the call to __TBINestInts
261 * won't save the buffers again. */
262 savemask &= ~TBICTX_CBUF_BIT;
263 ctx_savemask &= ~TBICTX_CBRP_BIT;
264 }
265 }
266
267 #ifdef CONFIG_METAG_META21
268 {
269 unsigned int depth, txdefr;
270
271 /*
272 * Save TXDEFR state.
273 *
274 * The process may have been interrupted after a LNKSET, but
275 * before it could read the DEFR state, so we mustn't lose that
276 * state or it could end up retrying an atomic operation that
277 * succeeded.
278 *
279 * All interrupts are disabled at this point so we
280 * don't need to perform any locking. We must do this
281 * dance before we use LNKGET or LNKSET.
282 */
283 BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
284
285 depth = current->thread.int_depth++;
286
287 txdefr = __core_reg_get(TXDEFR);
288
289 txdefr &= TXDEFR_BUS_STATE_BITS;
290 if (txdefr & TXDEFR_LNKSET_SUCCESS)
291 current->thread.txdefr_failure &= ~(1 << depth);
292 else
293 current->thread.txdefr_failure |= (1 << depth);
294 }
295 #endif
296
297 State.Sig.SaveMask = savemask;
298 State.Sig.pCtx->SaveMask = ctx_savemask;
299
300 nest_interrupts(State, mask);
301
302 #ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
303 /* Poison the catch registers. This shows up any mistakes we have
304 * made in their handling MUCH quicker.
305 */
306 __core_reg_set(TXCATCH0, 0x87650021);
307 __core_reg_set(TXCATCH1, 0x87654322);
308 __core_reg_set(TXCATCH2, 0x87654323);
309 __core_reg_set(TXCATCH3, 0x87654324);
310 #endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
311 }
312
313 TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
314 {
315 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
316 unsigned long flags;
317
318 local_irq_disable();
319
320 if (user_mode(regs)) {
321 flags = current_thread_info()->flags;
322 if (flags & _TIF_WORK_MASK &&
323 do_work_pending(regs, flags, syscall)) {
324 *restart = 1;
325 return State;
326 }
327
328 #ifdef CONFIG_METAG_FPU
329 if (current->thread.fpu_context &&
330 current->thread.fpu_context->needs_restore) {
331 __TBICtxFPURestore(State, current->thread.fpu_context);
332 /*
333 * Clearing this bit ensures the FP unit is not made
334 * active again unless it is used.
335 */
336 State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
337 current->thread.fpu_context->needs_restore = false;
338 }
339 State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
340 #endif
341 }
342
343 /* TBI will turn interrupts back on at some point. */
344 if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
345 trace_hardirqs_on();
346
347 #ifdef CONFIG_METAG_DSP
348 /*
349 * If we previously saved an extended context then restore it
350 * now. Otherwise, clear D0.8 because this process is not
351 * using DSP hardware.
352 */
353 if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
354 unsigned int D0_8;
355 struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
356
357 /* Make sure we're going to return to userland. */
358 BUG_ON(current->thread.int_depth != 1);
359
360 if (dsp_ctx->ram_sz[0] > 0)
361 __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
362 dsp_ctx->ram[0]);
363 if (dsp_ctx->ram_sz[1] > 0)
364 __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
365 dsp_ctx->ram[1]);
366
367 State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
368 __TBICtxRestore(State, current->thread.dsp_context);
369 D0_8 = __core_reg_get(D0.8);
370 D0_8 |= current->thread.user_flags & 0xffff0000;
371 D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
372 __core_reg_set(D0.8, D0_8);
373 } else
374 __core_reg_set(D0.8, 0);
375 #endif /* CONFIG_METAG_DSP */
376
377 #ifdef CONFIG_METAG_META21
378 {
379 unsigned int depth, txdefr;
380
381 /*
382 * If there hasn't been a LNKSET since the last LNKGET then the
383 * link flag will be set, causing the next LNKSET to succeed if
384 * the addresses match. The two LNK operations may not be a pair
385 * (e.g. see atomic_read()), so the LNKSET should fail.
386 * We use a conditional-never LNKSET to clear the link flag
387 * without side effects.
388 */
389 asm volatile("LNKSETDNV [D0Re0],D0Re0");
390
391 depth = --current->thread.int_depth;
392
393 BUG_ON(user_mode(regs) && depth);
394
395 txdefr = __core_reg_get(TXDEFR);
396
397 txdefr &= ~TXDEFR_BUS_STATE_BITS;
398
399 /* Do we need to restore a failure code into TXDEFR? */
400 if (current->thread.txdefr_failure & (1 << depth))
401 txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
402 else
403 txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
404
405 __core_reg_set(TXDEFR, txdefr);
406 }
407 #endif
408 return State;
409 }
410
411 #ifdef CONFIG_SMP
412 /*
413 * If we took an interrupt in the middle of __kuser_get_tls then we need
414 * to rewind the PC to the start of the function in case the process
415 * gets migrated to another thread (SMP only) and it reads the wrong tls
416 * data.
417 */
418 static inline void _restart_critical_section(TBIRES State)
419 {
420 unsigned long get_tls_start;
421 unsigned long get_tls_end;
422
423 get_tls_start = (unsigned long)__kuser_get_tls -
424 (unsigned long)&__user_gateway_start;
425
426 get_tls_start += USER_GATEWAY_PAGE;
427
428 get_tls_end = (unsigned long)__kuser_get_tls_end -
429 (unsigned long)&__user_gateway_start;
430
431 get_tls_end += USER_GATEWAY_PAGE;
432
433 if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
434 (State.Sig.pCtx->CurrPC < get_tls_end))
435 State.Sig.pCtx->CurrPC = get_tls_start;
436 }
437 #else
438 /*
439 * If we took an interrupt in the middle of
440 * __kuser_cmpxchg then we need to rewind the PC to the
441 * start of the function.
442 */
443 static inline void _restart_critical_section(TBIRES State)
444 {
445 unsigned long cmpxchg_start;
446 unsigned long cmpxchg_end;
447
448 cmpxchg_start = (unsigned long)__kuser_cmpxchg -
449 (unsigned long)&__user_gateway_start;
450
451 cmpxchg_start += USER_GATEWAY_PAGE;
452
453 cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
454 (unsigned long)&__user_gateway_start;
455
456 cmpxchg_end += USER_GATEWAY_PAGE;
457
458 if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
459 (State.Sig.pCtx->CurrPC < cmpxchg_end))
460 State.Sig.pCtx->CurrPC = cmpxchg_start;
461 }
462 #endif
463
464 /* Used by kick_handler() */
465 void restart_critical_section(TBIRES State)
466 {
467 _restart_critical_section(State);
468 }
469
470 TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
471 PTBI pTBI)
472 {
473 head_end(State, ~INTS_OFF_MASK);
474
475 /* If we interrupted user code handle any critical sections. */
476 if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
477 _restart_critical_section(State);
478
479 trace_hardirqs_off();
480
481 do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
482
483 return tail_end(State);
484 }
485
486 static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
487 {
488 return pbuf->CBFlags & TXCATCH0_READ_BIT;
489 }
490
491 static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
492 {
493 return pbuf->CBAddr;
494 }
495
496 static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
497 int signo, int code, int trapno)
498 {
499 if (user_mode(regs)) {
500 siginfo_t info;
501
502 if (show_unhandled_signals && unhandled_signal(current, signo)
503 && printk_ratelimit()) {
504
505 pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
506 current->pid, regs->ctx.CurrPC, addr,
507 trapno, trap_name(trapno));
508 print_vma_addr(" in ", regs->ctx.CurrPC);
509 print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
510 printk("\n");
511 show_regs(regs);
512 }
513
514 info.si_signo = signo;
515 info.si_errno = 0;
516 info.si_code = code;
517 info.si_addr = (__force void __user *)addr;
518 info.si_trapno = trapno;
519 force_sig_info(signo, &info, current);
520 } else {
521 die("Oops", regs, trapno, addr);
522 }
523 }
524
525 static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
526 unsigned int data_address, int trapno)
527 {
528 int ret;
529
530 ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
531
532 return ret;
533 }
534
535 static unsigned long get_inst_fault_address(struct pt_regs *regs)
536 {
537 return regs->ctx.CurrPC;
538 }
539
540 TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
541 int Inst, PTBI pTBI)
542 {
543 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
544 PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
545 unsigned long data_address;
546
547 head_end(State, ~INTS_OFF_MASK);
548
549 /* Hardware breakpoint or data watch */
550 if ((SigNum == TBIXXF_SIGNUM_IHF) ||
551 ((SigNum == TBIXXF_SIGNUM_DHF) &&
552 (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
553 TXCATCH0_WATCH0_BIT)))) {
554 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
555 pTBI);
556 return tail_end(State);
557 }
558
559 local_irq_enable();
560
561 data_address = fault_address(pcbuf);
562
563 switch (SigNum) {
564 case TBIXXF_SIGNUM_IGF:
565 /* 1st-level entry invalid (instruction fetch) */
566 case TBIXXF_SIGNUM_IPF: {
567 /* 2nd-level entry invalid (instruction fetch) */
568 unsigned long addr = get_inst_fault_address(regs);
569 do_page_fault(regs, addr, 0, SigNum);
570 break;
571 }
572
573 case TBIXXF_SIGNUM_DGF:
574 /* 1st-level entry invalid (data access) */
575 case TBIXXF_SIGNUM_DPF:
576 /* 2nd-level entry invalid (data access) */
577 case TBIXXF_SIGNUM_DWF:
578 /* Write to read only page */
579 handle_data_fault(pcbuf, regs, data_address, SigNum);
580 break;
581
582 case TBIXXF_SIGNUM_IIF:
583 /* Illegal instruction */
584 unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
585 SigNum);
586 break;
587
588 case TBIXXF_SIGNUM_DHF:
589 /* Unaligned access */
590 unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
591 SigNum);
592 break;
593 case TBIXXF_SIGNUM_PGF:
594 /* Privilege violation */
595 unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
596 SigNum);
597 break;
598 default:
599 BUG();
600 break;
601 }
602
603 return tail_end(State);
604 }
605
606 static bool switch_is_syscall(unsigned int inst)
607 {
608 return inst == __METAG_SW_ENCODING(SYS);
609 }
610
611 static bool switch_is_legacy_syscall(unsigned int inst)
612 {
613 return inst == __METAG_SW_ENCODING(SYS_LEGACY);
614 }
615
616 static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
617 {
618 regs->ctx.CurrPC += 4;
619 }
620
621 static inline int test_syscall_work(void)
622 {
623 return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
624 }
625
626 TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
627 int Inst, PTBI pTBI)
628 {
629 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
630 unsigned int sysnumber;
631 unsigned long long a1_a2, a3_a4, a5_a6;
632 LPSYSCALL syscall_entry;
633 int restart;
634
635 head_end(State, ~INTS_OFF_MASK);
636
637 /*
638 * If this is not a syscall SWITCH it could be a breakpoint.
639 */
640 if (!switch_is_syscall(Inst)) {
641 /*
642 * Alert the user if they're trying to use legacy system
643 * calls. This suggests they need to update their C
644 * library and build against up to date kernel headers.
645 */
646 if (switch_is_legacy_syscall(Inst))
647 pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
648 /*
649 * We don't know how to handle the SWITCH and cannot
650 * safely ignore it, so treat all unknown switches
651 * (including breakpoints) as traps.
652 */
653 force_sig(SIGTRAP, current);
654 return tail_end(State);
655 }
656
657 local_irq_enable();
658
659 restart_syscall:
660 restart = 0;
661 sysnumber = regs->ctx.DX[0].U1;
662
663 if (test_syscall_work())
664 sysnumber = syscall_trace_enter(regs);
665
666 /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
667 step_over_switch(regs, Inst);
668
669 if (sysnumber >= __NR_syscalls) {
670 pr_debug("unknown syscall number: %d\n", sysnumber);
671 syscall_entry = (LPSYSCALL) sys_ni_syscall;
672 } else {
673 syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
674 }
675
676 /* Use 64bit loads for speed. */
677 a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
678 a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
679 a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
680
681 /* here is the actual call to the syscall handler functions */
682 regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
683
684 if (test_syscall_work())
685 syscall_trace_leave(regs);
686
687 State = tail_end_sys(State, sysnumber, &restart);
688 /* Handlerless restarts shouldn't go via userland */
689 if (restart)
690 goto restart_syscall;
691 return State;
692 }
693
694 TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
695 int Inst, PTBI pTBI)
696 {
697 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
698
699 /*
700 * This can be caused by any user process simply executing an unusual
701 * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
702 * thread to stop, so signal a SIGTRAP instead.
703 */
704 head_end(State, ~INTS_OFF_MASK);
705 if (user_mode(regs))
706 force_sig(SIGTRAP, current);
707 else
708 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
709 return tail_end(State);
710 }
711
712 #ifdef CONFIG_METAG_META21
713 TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
714 {
715 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
716 unsigned int error_state = Triggers;
717 siginfo_t info;
718
719 head_end(State, ~INTS_OFF_MASK);
720
721 local_irq_enable();
722
723 info.si_signo = SIGFPE;
724
725 if (error_state & TXSTAT_FPE_INVALID_BIT)
726 info.si_code = FPE_FLTINV;
727 else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
728 info.si_code = FPE_FLTDIV;
729 else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
730 info.si_code = FPE_FLTOVF;
731 else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
732 info.si_code = FPE_FLTUND;
733 else if (error_state & TXSTAT_FPE_INEXACT_BIT)
734 info.si_code = FPE_FLTRES;
735 else
736 info.si_code = 0;
737 info.si_errno = 0;
738 info.si_addr = (__force void __user *)regs->ctx.CurrPC;
739 force_sig_info(SIGFPE, &info, current);
740
741 return tail_end(State);
742 }
743 #endif
744
745 #ifdef CONFIG_METAG_SUSPEND_MEM
746 struct traps_context {
747 PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
748 };
749
750 static struct traps_context *metag_traps_context;
751
752 int traps_save_context(void)
753 {
754 unsigned long cpu = smp_processor_id();
755 PTBI _pTBI = per_cpu(pTBI, cpu);
756 struct traps_context *context;
757
758 context = kzalloc(sizeof(*context), GFP_ATOMIC);
759 if (!context)
760 return -ENOMEM;
761
762 memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
763
764 metag_traps_context = context;
765 return 0;
766 }
767
768 int traps_restore_context(void)
769 {
770 unsigned long cpu = smp_processor_id();
771 PTBI _pTBI = per_cpu(pTBI, cpu);
772 struct traps_context *context = metag_traps_context;
773
774 metag_traps_context = NULL;
775
776 memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
777
778 kfree(context);
779 return 0;
780 }
781 #endif
782
783 #ifdef CONFIG_SMP
784 static inline unsigned int _get_trigger_mask(void)
785 {
786 unsigned long cpu = smp_processor_id();
787 return per_cpu(trigger_mask, cpu);
788 }
789
790 unsigned int get_trigger_mask(void)
791 {
792 return _get_trigger_mask();
793 }
794 EXPORT_SYMBOL(get_trigger_mask);
795
796 static void set_trigger_mask(unsigned int mask)
797 {
798 unsigned long cpu = smp_processor_id();
799 per_cpu(trigger_mask, cpu) = mask;
800 }
801
802 void arch_local_irq_enable(void)
803 {
804 preempt_disable();
805 arch_local_irq_restore(_get_trigger_mask());
806 preempt_enable_no_resched();
807 }
808 EXPORT_SYMBOL(arch_local_irq_enable);
809 #else
810 static void set_trigger_mask(unsigned int mask)
811 {
812 global_trigger_mask = mask;
813 }
814 #endif
815
816 void per_cpu_trap_init(unsigned long cpu)
817 {
818 TBIRES int_context;
819 unsigned int thread = cpu_2_hwthread_id[cpu];
820
821 set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
822 TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
823 TBI_TRIG_BIT(TBID_SIGNUM_SW1));
824
825 /* non-priv - use current stack */
826 int_context.Sig.pCtx = NULL;
827 /* Start with interrupts off */
828 int_context.Sig.TrigMask = INTS_OFF_MASK;
829 int_context.Sig.SaveMask = 0;
830
831 /* And call __TBIASyncTrigger() */
832 __TBIASyncTrigger(int_context);
833 }
834
835 void __init trap_init(void)
836 {
837 unsigned long cpu = smp_processor_id();
838 PTBI _pTBI = per_cpu(pTBI, cpu);
839
840 _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
841 _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
842 _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
843 _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
844 _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
845 _pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler;
846
847 #ifdef CONFIG_METAG_META21
848 _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
849 _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
850 #endif
851
852 per_cpu_trap_init(cpu);
853 }
854
855 void tbi_startup_interrupt(int irq)
856 {
857 unsigned long cpu = smp_processor_id();
858 PTBI _pTBI = per_cpu(pTBI, cpu);
859
860 BUG_ON(irq > TBID_SIGNUM_MAX);
861
862 /* For TR1 and TR2, the thread id is encoded in the irq number */
863 if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
864 cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
865
866 set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
867
868 _pTBI->fnSigs[irq] = trigger_handler;
869 }
870
871 void tbi_shutdown_interrupt(int irq)
872 {
873 unsigned long cpu = smp_processor_id();
874 PTBI _pTBI = per_cpu(pTBI, cpu);
875
876 BUG_ON(irq > TBID_SIGNUM_MAX);
877
878 set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
879
880 _pTBI->fnSigs[irq] = __TBIUnExpXXX;
881 }
882
883 int ret_from_fork(TBIRES arg)
884 {
885 struct task_struct *prev = arg.Switch.pPara;
886 struct task_struct *tsk = current;
887 struct pt_regs *regs = task_pt_regs(tsk);
888 int (*fn)(void *);
889 TBIRES Next;
890
891 schedule_tail(prev);
892
893 if (tsk->flags & PF_KTHREAD) {
894 fn = (void *)regs->ctx.DX[4].U1;
895 BUG_ON(!fn);
896
897 fn((void *)regs->ctx.DX[3].U1);
898 }
899
900 if (test_syscall_work())
901 syscall_trace_leave(regs);
902
903 preempt_disable();
904
905 Next.Sig.TrigMask = get_trigger_mask();
906 Next.Sig.SaveMask = 0;
907 Next.Sig.pCtx = &regs->ctx;
908
909 set_gateway_tls(current->thread.tls_ptr);
910
911 preempt_enable_no_resched();
912
913 /* And interrupts should come back on when we resume the real usermode
914 * code. Call __TBIASyncResume()
915 */
916 __TBIASyncResume(tail_end(Next));
917 /* ASyncResume should NEVER return */
918 BUG();
919 return 0;
920 }
921
922 void show_trace(struct task_struct *tsk, unsigned long *sp,
923 struct pt_regs *regs)
924 {
925 unsigned long addr;
926 #ifdef CONFIG_FRAME_POINTER
927 unsigned long fp, fpnew;
928 unsigned long stack;
929 #endif
930
931 if (regs && user_mode(regs))
932 return;
933
934 printk("\nCall trace: ");
935 #ifdef CONFIG_KALLSYMS
936 printk("\n");
937 #endif
938
939 if (!tsk)
940 tsk = current;
941
942 #ifdef CONFIG_FRAME_POINTER
943 if (regs) {
944 print_ip_sym(regs->ctx.CurrPC);
945 fp = regs->ctx.AX[1].U0;
946 } else {
947 fp = __core_reg_get(A0FrP);
948 }
949
950 /* detect when the frame pointer has been used for other purposes and
951 * doesn't point to the stack (it may point completely elsewhere which
952 * kstack_end may not detect).
953 */
954 stack = (unsigned long)task_stack_page(tsk);
955 while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
956 addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
957 if (kernel_text_address(addr))
958 print_ip_sym(addr);
959 else
960 break;
961 /* stack grows up, so frame pointers must decrease */
962 fpnew = __raw_readl((unsigned long *)(fp + 0));
963 if (fpnew >= fp)
964 break;
965 fp = fpnew;
966 }
967 #else
968 while (!kstack_end(sp)) {
969 addr = (*sp--) - 4;
970 if (kernel_text_address(addr))
971 print_ip_sym(addr);
972 }
973 #endif
974
975 printk("\n");
976
977 debug_show_held_locks(tsk);
978 }
979
980 void show_stack(struct task_struct *tsk, unsigned long *sp)
981 {
982 if (!tsk)
983 tsk = current;
984 if (tsk == current)
985 sp = (unsigned long *)current_stack_pointer;
986 else
987 sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
988
989 show_trace(tsk, sp, NULL);
990 }