]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/nds32/kernel/traps.c
nds32: Exception handling
[mirror_ubuntu-jammy-kernel.git] / arch / nds32 / kernel / traps.c
CommitLineData
2923f5ea
GH
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/module.h>
5#include <linux/personality.h>
6#include <linux/kallsyms.h>
7#include <linux/hardirq.h>
8#include <linux/kdebug.h>
9#include <linux/sched/task_stack.h>
10#include <linux/uaccess.h>
11
12#include <asm/proc-fns.h>
13#include <asm/unistd.h>
14
15#include <linux/ptrace.h>
16#include <nds32_intrinsic.h>
17
18extern void show_pte(struct mm_struct *mm, unsigned long addr);
19
20/*
21 * Dump out the contents of some memory nicely...
22 */
23void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
24{
25 unsigned long first;
26 mm_segment_t fs;
27 int i;
28
29 /*
30 * We need to switch to kernel mode so that we can use __get_user
31 * to safely read from kernel space. Note that we now dump the
32 * code first, just in case the backtrace kills us.
33 */
34 fs = get_fs();
35 set_fs(KERNEL_DS);
36
37 pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
38
39 for (first = bottom & ~31; first < top; first += 32) {
40 unsigned long p;
41 char str[sizeof(" 12345678") * 8 + 1];
42
43 memset(str, ' ', sizeof(str));
44 str[sizeof(str) - 1] = '\0';
45
46 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
47 if (p >= bottom && p < top) {
48 unsigned long val;
49 if (__get_user(val, (unsigned long *)p) == 0)
50 sprintf(str + i * 9, " %08lx", val);
51 else
52 sprintf(str + i * 9, " ????????");
53 }
54 }
55 pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
56 }
57
58 set_fs(fs);
59}
60
61EXPORT_SYMBOL(dump_mem);
62
63static void dump_instr(struct pt_regs *regs)
64{
65 unsigned long addr = instruction_pointer(regs);
66 mm_segment_t fs;
67 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
68 int i;
69
70 return;
71 /*
72 * We need to switch to kernel mode so that we can use __get_user
73 * to safely read from kernel space. Note that we now dump the
74 * code first, just in case the backtrace kills us.
75 */
76 fs = get_fs();
77 set_fs(KERNEL_DS);
78
79 pr_emerg("Code: ");
80 for (i = -4; i < 1; i++) {
81 unsigned int val, bad;
82
83 bad = __get_user(val, &((u32 *) addr)[i]);
84
85 if (!bad) {
86 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
87 } else {
88 p += sprintf(p, "bad PC value");
89 break;
90 }
91 }
92 pr_emerg("Code: %s\n", str);
93
94 set_fs(fs);
95}
96
97#ifdef CONFIG_FUNCTION_GRAPH_TRACER
98#include <linux/ftrace.h>
99static void
100get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
101{
102 if (*addr == (unsigned long)return_to_handler) {
103 int index = tsk->curr_ret_stack;
104
105 if (tsk->ret_stack && index >= *graph) {
106 index -= *graph;
107 *addr = tsk->ret_stack[index].ret;
108 (*graph)++;
109 }
110 }
111}
112#else
113static inline void
114get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
115{
116}
117#endif
118
119#define LOOP_TIMES (100)
120static void __dump(struct task_struct *tsk, unsigned long *base_reg)
121{
122 unsigned long ret_addr;
123 int cnt = LOOP_TIMES, graph = 0;
124 pr_emerg("Call Trace:\n");
125 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
126 while (!kstack_end(base_reg)) {
127 ret_addr = *base_reg++;
128 if (__kernel_text_address(ret_addr)) {
129 get_real_ret_addr(&ret_addr, tsk, &graph);
130 print_ip_sym(ret_addr);
131 }
132 if (--cnt < 0)
133 break;
134 }
135 } else {
136 while (!kstack_end((void *)base_reg) &&
137 !((unsigned long)base_reg & 0x3) &&
138 ((unsigned long)base_reg >= TASK_SIZE)) {
139 unsigned long next_fp;
140#if !defined(NDS32_ABI_2)
141 ret_addr = base_reg[0];
142 next_fp = base_reg[1];
143#else
144 ret_addr = base_reg[-1];
145 next_fp = base_reg[FP_OFFSET];
146#endif
147 if (__kernel_text_address(ret_addr)) {
148 get_real_ret_addr(&ret_addr, tsk, &graph);
149 print_ip_sym(ret_addr);
150 }
151 if (--cnt < 0)
152 break;
153 base_reg = (unsigned long *)next_fp;
154 }
155 }
156 pr_emerg("\n");
157}
158
159void dump_stack(void)
160{
161 unsigned long *base_reg;
162 if (!IS_ENABLED(CONFIG_FRAME_POINTER))
163 __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
164 else
165 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
166 __dump(NULL, base_reg);
167}
168
169EXPORT_SYMBOL(dump_stack);
170
171void show_stack(struct task_struct *tsk, unsigned long *sp)
172{
173 unsigned long *base_reg;
174
175 if (!tsk)
176 tsk = current;
177 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
178 if (tsk != current)
179 base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
180 else
181 __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
182 } else {
183 if (tsk != current)
184 base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
185 else
186 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
187 }
188 __dump(tsk, base_reg);
189 barrier();
190}
191
192DEFINE_SPINLOCK(die_lock);
193
194/*
195 * This function is protected against re-entrancy.
196 */
197void die(const char *str, struct pt_regs *regs, int err)
198{
199 struct task_struct *tsk = current;
200 static int die_counter;
201
202 console_verbose();
203 spin_lock_irq(&die_lock);
204 bust_spinlocks(1);
205
206 pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
207 print_modules();
208 pr_emerg("CPU: %i\n", smp_processor_id());
209 show_regs(regs);
210 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
211 tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
212
213 if (!user_mode(regs) || in_interrupt()) {
214 dump_mem("Stack: ", regs->sp,
215 THREAD_SIZE + (unsigned long)task_thread_info(tsk));
216 dump_instr(regs);
217 dump_stack();
218 }
219
220 bust_spinlocks(0);
221 spin_unlock_irq(&die_lock);
222 do_exit(SIGSEGV);
223}
224
225EXPORT_SYMBOL(die);
226
227void die_if_kernel(const char *str, struct pt_regs *regs, int err)
228{
229 if (user_mode(regs))
230 return;
231
232 die(str, regs, err);
233}
234
235int bad_syscall(int n, struct pt_regs *regs)
236{
237 siginfo_t info;
238
239 if (current->personality != PER_LINUX) {
240 send_sig(SIGSEGV, current, 1);
241 return regs->uregs[0];
242 }
243
244 info.si_signo = SIGILL;
245 info.si_errno = 0;
246 info.si_code = ILL_ILLTRP;
247 info.si_addr = (void __user *)instruction_pointer(regs) - 4;
248
249 force_sig_info(SIGILL, &info, current);
250 die_if_kernel("Oops - bad syscall", regs, n);
251 return regs->uregs[0];
252}
253
254void __pte_error(const char *file, int line, unsigned long val)
255{
256 pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
257}
258
259void __pmd_error(const char *file, int line, unsigned long val)
260{
261 pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
262}
263
264void __pgd_error(const char *file, int line, unsigned long val)
265{
266 pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
267}
268
269extern char *exception_vector, *exception_vector_end;
270void __init trap_init(void)
271{
272 return;
273}
274
275void __init early_trap_init(void)
276{
277 unsigned long ivb = 0;
278 unsigned long base = PAGE_OFFSET;
279
280 memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
281 ((unsigned long)&exception_vector_end -
282 (unsigned long)&exception_vector));
283 ivb = __nds32__mfsr(NDS32_SR_IVB);
284 /* Check platform support. */
285 if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
286 panic
287 ("IVIC mode is not allowed on the platform with interrupt controller\n");
288 __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
289 IVB_BASE, NDS32_SR_IVB);
290 __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
291
292 /*
293 * 0x800 = 128 vectors * 16byte.
294 * It should be enough to flush a page.
295 */
296 cpu_cache_wbinval_page(base, true);
297}
298
299void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
300 int error_code, int si_code)
301{
302 struct siginfo info;
303
304 tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
305 tsk->thread.error_code = error_code;
306
307 memset(&info, 0, sizeof(info));
308 info.si_signo = SIGTRAP;
309 info.si_code = si_code;
310 info.si_addr = (void __user *)instruction_pointer(regs);
311 force_sig_info(SIGTRAP, &info, tsk);
312}
313
314void do_debug_trap(unsigned long entry, unsigned long addr,
315 unsigned long type, struct pt_regs *regs)
316{
317 if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
318 == NOTIFY_STOP)
319 return;
320
321 if (user_mode(regs)) {
322 /* trap_signal */
323 send_sigtrap(current, regs, 0, TRAP_BRKPT);
324 } else {
325 /* kernel_trap */
326 if (!fixup_exception(regs))
327 die("unexpected kernel_trap", regs, 0);
328 }
329}
330
331void unhandled_interruption(struct pt_regs *regs)
332{
333 siginfo_t si;
334 pr_emerg("unhandled_interruption\n");
335 show_regs(regs);
336 if (!user_mode(regs))
337 do_exit(SIGKILL);
338 si.si_signo = SIGKILL;
339 si.si_errno = 0;
340 force_sig_info(SIGKILL, &si, current);
341}
342
343void unhandled_exceptions(unsigned long entry, unsigned long addr,
344 unsigned long type, struct pt_regs *regs)
345{
346 siginfo_t si;
347 pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
348 addr, type);
349 show_regs(regs);
350 if (!user_mode(regs))
351 do_exit(SIGKILL);
352 si.si_signo = SIGKILL;
353 si.si_errno = 0;
354 si.si_addr = (void *)addr;
355 force_sig_info(SIGKILL, &si, current);
356}
357
358extern int do_page_fault(unsigned long entry, unsigned long addr,
359 unsigned int error_code, struct pt_regs *regs);
360
361/*
362 * 2:DEF dispatch for TLB MISC exception handler
363*/
364
365void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
366 unsigned long type, struct pt_regs *regs)
367{
368 type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
369 if ((type & ITYPE_mskETYPE) < 5) {
370 /* Permission exceptions */
371 do_page_fault(entry, addr, type, regs);
372 } else
373 unhandled_exceptions(entry, addr, type, regs);
374}
375
376void do_revinsn(struct pt_regs *regs)
377{
378 siginfo_t si;
379 pr_emerg("Reserved Instruction\n");
380 show_regs(regs);
381 if (!user_mode(regs))
382 do_exit(SIGILL);
383 si.si_signo = SIGILL;
384 si.si_errno = 0;
385 force_sig_info(SIGILL, &si, current);
386}
387
388#ifdef CONFIG_ALIGNMENT_TRAP
389extern int unalign_access_mode;
390extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
391#endif
392void do_dispatch_general(unsigned long entry, unsigned long addr,
393 unsigned long itype, struct pt_regs *regs,
394 unsigned long oipc)
395{
396 unsigned int swid = itype >> ITYPE_offSWID;
397 unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
398 if (type == ETYPE_ALIGNMENT_CHECK) {
399#ifdef CONFIG_ALIGNMENT_TRAP
400 /* Alignment check */
401 if (user_mode(regs) && unalign_access_mode) {
402 int ret;
403 ret = do_unaligned_access(addr, regs);
404
405 if (ret == 0)
406 return;
407
408 if (ret == -EFAULT)
409 pr_emerg
410 ("Unhandled unaligned access exception\n");
411 }
412#endif
413 do_page_fault(entry, addr, type, regs);
414 } else if (type == ETYPE_RESERVED_INSTRUCTION) {
415 /* Reserved instruction */
416 do_revinsn(regs);
417 } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
418 /* trap, used on v3 EDM target debugging workaround */
419 /*
420 * DIPC(OIPC) is passed as parameter before
421 * interrupt is enabled, so the DIPC will not be corrupted
422 * even though interrupts are coming in
423 */
424 /*
425 * 1. update ipc
426 * 2. update pt_regs ipc with oipc
427 * 3. update pt_regs ipsw (clear DEX)
428 */
429 __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
430 regs->ipc = oipc;
431 if (regs->pipsw & PSW_mskDEX) {
432 pr_emerg
433 ("Nested Debug exception is possibly happened\n");
434 pr_emerg("ipc:%08x pipc:%08x\n",
435 (unsigned int)regs->ipc,
436 (unsigned int)regs->pipc);
437 }
438 do_debug_trap(entry, addr, itype, regs);
439 regs->ipsw &= ~PSW_mskDEX;
440 } else
441 unhandled_exceptions(entry, addr, type, regs);
442}