]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/nds32/kernel/traps.c
nds32: add show_stack_loglvl()
[mirror_ubuntu-jammy-kernel.git] / arch / nds32 / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/module.h>
5 #include <linux/personality.h>
6 #include <linux/kallsyms.h>
7 #include <linux/hardirq.h>
8 #include <linux/kdebug.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12
13 #include <asm/proc-fns.h>
14 #include <asm/unistd.h>
15 #include <asm/fpu.h>
16
17 #include <linux/ptrace.h>
18 #include <nds32_intrinsic.h>
19
20 extern void show_pte(struct mm_struct *mm, unsigned long addr);
21
22 /*
23 * Dump out the contents of some memory nicely...
24 */
25 void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
26 {
27 unsigned long first;
28 mm_segment_t fs;
29 int i;
30
31 /*
32 * We need to switch to kernel mode so that we can use __get_user
33 * to safely read from kernel space. Note that we now dump the
34 * code first, just in case the backtrace kills us.
35 */
36 fs = get_fs();
37 set_fs(KERNEL_DS);
38
39 pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
40
41 for (first = bottom & ~31; first < top; first += 32) {
42 unsigned long p;
43 char str[sizeof(" 12345678") * 8 + 1];
44
45 memset(str, ' ', sizeof(str));
46 str[sizeof(str) - 1] = '\0';
47
48 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
49 if (p >= bottom && p < top) {
50 unsigned long val;
51 if (__get_user(val, (unsigned long *)p) == 0)
52 sprintf(str + i * 9, " %08lx", val);
53 else
54 sprintf(str + i * 9, " ????????");
55 }
56 }
57 pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
58 }
59
60 set_fs(fs);
61 }
62
63 EXPORT_SYMBOL(dump_mem);
64
65 static void dump_instr(struct pt_regs *regs)
66 {
67 unsigned long addr = instruction_pointer(regs);
68 mm_segment_t fs;
69 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
70 int i;
71
72 return;
73 /*
74 * We need to switch to kernel mode so that we can use __get_user
75 * to safely read from kernel space. Note that we now dump the
76 * code first, just in case the backtrace kills us.
77 */
78 fs = get_fs();
79 set_fs(KERNEL_DS);
80
81 pr_emerg("Code: ");
82 for (i = -4; i < 1; i++) {
83 unsigned int val, bad;
84
85 bad = __get_user(val, &((u32 *) addr)[i]);
86
87 if (!bad) {
88 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
89 } else {
90 p += sprintf(p, "bad PC value");
91 break;
92 }
93 }
94 pr_emerg("Code: %s\n", str);
95
96 set_fs(fs);
97 }
98
99 #define LOOP_TIMES (100)
100 static void __dump(struct task_struct *tsk, unsigned long *base_reg,
101 const char *loglvl)
102 {
103 unsigned long ret_addr;
104 int cnt = LOOP_TIMES, graph = 0;
105 printk("%sCall Trace:\n", loglvl);
106 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
107 while (!kstack_end(base_reg)) {
108 ret_addr = *base_reg++;
109 if (__kernel_text_address(ret_addr)) {
110 ret_addr = ftrace_graph_ret_addr(
111 tsk, &graph, ret_addr, NULL);
112 print_ip_sym(loglvl, ret_addr);
113 }
114 if (--cnt < 0)
115 break;
116 }
117 } else {
118 while (!kstack_end((void *)base_reg) &&
119 !((unsigned long)base_reg & 0x3) &&
120 ((unsigned long)base_reg >= TASK_SIZE)) {
121 unsigned long next_fp;
122 ret_addr = base_reg[LP_OFFSET];
123 next_fp = base_reg[FP_OFFSET];
124 if (__kernel_text_address(ret_addr)) {
125
126 ret_addr = ftrace_graph_ret_addr(
127 tsk, &graph, ret_addr, NULL);
128 print_ip_sym(loglvl, ret_addr);
129 }
130 if (--cnt < 0)
131 break;
132 base_reg = (unsigned long *)next_fp;
133 }
134 }
135 printk("%s\n", loglvl);
136 }
137
138 void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp,
139 const char *loglvl)
140 {
141 unsigned long *base_reg;
142
143 if (!tsk)
144 tsk = current;
145 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
146 if (tsk != current)
147 base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
148 else
149 __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
150 } else {
151 if (tsk != current)
152 base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
153 else
154 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
155 }
156 __dump(tsk, base_reg, loglvl);
157 barrier();
158 }
159
160 void show_stack(struct task_struct *tsk, unsigned long *sp)
161 {
162 show_stack_loglvl(tsk, sp, KERN_EMERG);
163 }
164
165 DEFINE_SPINLOCK(die_lock);
166
167 /*
168 * This function is protected against re-entrancy.
169 */
170 void die(const char *str, struct pt_regs *regs, int err)
171 {
172 struct task_struct *tsk = current;
173 static int die_counter;
174
175 console_verbose();
176 spin_lock_irq(&die_lock);
177 bust_spinlocks(1);
178
179 pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
180 print_modules();
181 pr_emerg("CPU: %i\n", smp_processor_id());
182 show_regs(regs);
183 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
184 tsk->comm, tsk->pid, end_of_stack(tsk));
185
186 if (!user_mode(regs) || in_interrupt()) {
187 dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
188 dump_instr(regs);
189 dump_stack();
190 }
191
192 bust_spinlocks(0);
193 spin_unlock_irq(&die_lock);
194 do_exit(SIGSEGV);
195 }
196
197 EXPORT_SYMBOL(die);
198
199 void die_if_kernel(const char *str, struct pt_regs *regs, int err)
200 {
201 if (user_mode(regs))
202 return;
203
204 die(str, regs, err);
205 }
206
207 int bad_syscall(int n, struct pt_regs *regs)
208 {
209 if (current->personality != PER_LINUX) {
210 send_sig(SIGSEGV, current, 1);
211 return regs->uregs[0];
212 }
213
214 force_sig_fault(SIGILL, ILL_ILLTRP,
215 (void __user *)instruction_pointer(regs) - 4);
216 die_if_kernel("Oops - bad syscall", regs, n);
217 return regs->uregs[0];
218 }
219
220 void __pte_error(const char *file, int line, unsigned long val)
221 {
222 pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
223 }
224
225 void __pmd_error(const char *file, int line, unsigned long val)
226 {
227 pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
228 }
229
230 void __pgd_error(const char *file, int line, unsigned long val)
231 {
232 pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
233 }
234
235 extern char *exception_vector, *exception_vector_end;
236 void __init trap_init(void)
237 {
238 return;
239 }
240
241 void __init early_trap_init(void)
242 {
243 unsigned long ivb = 0;
244 unsigned long base = PAGE_OFFSET;
245
246 memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
247 ((unsigned long)&exception_vector_end -
248 (unsigned long)&exception_vector));
249 ivb = __nds32__mfsr(NDS32_SR_IVB);
250 /* Check platform support. */
251 if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
252 panic
253 ("IVIC mode is not allowed on the platform with interrupt controller\n");
254 __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
255 IVB_BASE, NDS32_SR_IVB);
256 __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
257
258 /*
259 * 0x800 = 128 vectors * 16byte.
260 * It should be enough to flush a page.
261 */
262 cpu_cache_wbinval_page(base, true);
263 }
264
265 static void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
266 {
267 struct task_struct *tsk = current;
268
269 tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
270 tsk->thread.error_code = error_code;
271
272 force_sig_fault(SIGTRAP, si_code,
273 (void __user *)instruction_pointer(regs));
274 }
275
276 void do_debug_trap(unsigned long entry, unsigned long addr,
277 unsigned long type, struct pt_regs *regs)
278 {
279 if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
280 == NOTIFY_STOP)
281 return;
282
283 if (user_mode(regs)) {
284 /* trap_signal */
285 send_sigtrap(regs, 0, TRAP_BRKPT);
286 } else {
287 /* kernel_trap */
288 if (!fixup_exception(regs))
289 die("unexpected kernel_trap", regs, 0);
290 }
291 }
292
293 void unhandled_interruption(struct pt_regs *regs)
294 {
295 pr_emerg("unhandled_interruption\n");
296 show_regs(regs);
297 if (!user_mode(regs))
298 do_exit(SIGKILL);
299 force_sig(SIGKILL);
300 }
301
302 void unhandled_exceptions(unsigned long entry, unsigned long addr,
303 unsigned long type, struct pt_regs *regs)
304 {
305 pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
306 addr, type);
307 show_regs(regs);
308 if (!user_mode(regs))
309 do_exit(SIGKILL);
310 force_sig(SIGKILL);
311 }
312
313 extern int do_page_fault(unsigned long entry, unsigned long addr,
314 unsigned int error_code, struct pt_regs *regs);
315
316 /*
317 * 2:DEF dispatch for TLB MISC exception handler
318 */
319
320 void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
321 unsigned long type, struct pt_regs *regs)
322 {
323 type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
324 if ((type & ITYPE_mskETYPE) < 5) {
325 /* Permission exceptions */
326 do_page_fault(entry, addr, type, regs);
327 } else
328 unhandled_exceptions(entry, addr, type, regs);
329 }
330
331 void do_revinsn(struct pt_regs *regs)
332 {
333 pr_emerg("Reserved Instruction\n");
334 show_regs(regs);
335 if (!user_mode(regs))
336 do_exit(SIGILL);
337 force_sig(SIGILL);
338 }
339
340 #ifdef CONFIG_ALIGNMENT_TRAP
341 extern int unalign_access_mode;
342 extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
343 #endif
344 void do_dispatch_general(unsigned long entry, unsigned long addr,
345 unsigned long itype, struct pt_regs *regs,
346 unsigned long oipc)
347 {
348 unsigned int swid = itype >> ITYPE_offSWID;
349 unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
350 if (type == ETYPE_ALIGNMENT_CHECK) {
351 #ifdef CONFIG_ALIGNMENT_TRAP
352 /* Alignment check */
353 if (user_mode(regs) && unalign_access_mode) {
354 int ret;
355 ret = do_unaligned_access(addr, regs);
356
357 if (ret == 0)
358 return;
359
360 if (ret == -EFAULT)
361 pr_emerg
362 ("Unhandled unaligned access exception\n");
363 }
364 #endif
365 do_page_fault(entry, addr, type, regs);
366 } else if (type == ETYPE_RESERVED_INSTRUCTION) {
367 /* Reserved instruction */
368 do_revinsn(regs);
369 } else if (type == ETYPE_COPROCESSOR) {
370 /* Coprocessor */
371 #if IS_ENABLED(CONFIG_FPU)
372 unsigned int fucop_exist = __nds32__mfsr(NDS32_SR_FUCOP_EXIST);
373 unsigned int cpid = ((itype & ITYPE_mskCPID) >> ITYPE_offCPID);
374
375 if ((cpid == FPU_CPID) &&
376 (fucop_exist & FUCOP_EXIST_mskCP0ISFPU)) {
377 unsigned int subtype = (itype & ITYPE_mskSTYPE);
378
379 if (true == do_fpu_exception(subtype, regs))
380 return;
381 }
382 #endif
383 unhandled_exceptions(entry, addr, type, regs);
384 } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
385 /* trap, used on v3 EDM target debugging workaround */
386 /*
387 * DIPC(OIPC) is passed as parameter before
388 * interrupt is enabled, so the DIPC will not be corrupted
389 * even though interrupts are coming in
390 */
391 /*
392 * 1. update ipc
393 * 2. update pt_regs ipc with oipc
394 * 3. update pt_regs ipsw (clear DEX)
395 */
396 __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
397 regs->ipc = oipc;
398 if (regs->pipsw & PSW_mskDEX) {
399 pr_emerg
400 ("Nested Debug exception is possibly happened\n");
401 pr_emerg("ipc:%08x pipc:%08x\n",
402 (unsigned int)regs->ipc,
403 (unsigned int)regs->pipc);
404 }
405 do_debug_trap(entry, addr, itype, regs);
406 regs->ipsw &= ~PSW_mskDEX;
407 } else
408 unhandled_exceptions(entry, addr, type, regs);
409 }