]>
Commit | Line | Data |
---|---|---|
2923f5ea GH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (C) 2005-2017 Andes Technology Corporation | |
3 | ||
4 | #include <linux/module.h> | |
5 | #include <linux/personality.h> | |
6 | #include <linux/kallsyms.h> | |
7 | #include <linux/hardirq.h> | |
8 | #include <linux/kdebug.h> | |
9 | #include <linux/sched/task_stack.h> | |
10 | #include <linux/uaccess.h> | |
11 | ||
12 | #include <asm/proc-fns.h> | |
13 | #include <asm/unistd.h> | |
14 | ||
15 | #include <linux/ptrace.h> | |
16 | #include <nds32_intrinsic.h> | |
17 | ||
18 | extern void show_pte(struct mm_struct *mm, unsigned long addr); | |
19 | ||
20 | /* | |
21 | * Dump out the contents of some memory nicely... | |
22 | */ | |
23 | void dump_mem(const char *lvl, unsigned long bottom, unsigned long top) | |
24 | { | |
25 | unsigned long first; | |
26 | mm_segment_t fs; | |
27 | int i; | |
28 | ||
29 | /* | |
30 | * We need to switch to kernel mode so that we can use __get_user | |
31 | * to safely read from kernel space. Note that we now dump the | |
32 | * code first, just in case the backtrace kills us. | |
33 | */ | |
34 | fs = get_fs(); | |
35 | set_fs(KERNEL_DS); | |
36 | ||
37 | pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top); | |
38 | ||
39 | for (first = bottom & ~31; first < top; first += 32) { | |
40 | unsigned long p; | |
41 | char str[sizeof(" 12345678") * 8 + 1]; | |
42 | ||
43 | memset(str, ' ', sizeof(str)); | |
44 | str[sizeof(str) - 1] = '\0'; | |
45 | ||
46 | for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { | |
47 | if (p >= bottom && p < top) { | |
48 | unsigned long val; | |
49 | if (__get_user(val, (unsigned long *)p) == 0) | |
50 | sprintf(str + i * 9, " %08lx", val); | |
51 | else | |
52 | sprintf(str + i * 9, " ????????"); | |
53 | } | |
54 | } | |
55 | pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str); | |
56 | } | |
57 | ||
58 | set_fs(fs); | |
59 | } | |
60 | ||
61 | EXPORT_SYMBOL(dump_mem); | |
62 | ||
63 | static void dump_instr(struct pt_regs *regs) | |
64 | { | |
65 | unsigned long addr = instruction_pointer(regs); | |
66 | mm_segment_t fs; | |
67 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; | |
68 | int i; | |
69 | ||
70 | return; | |
71 | /* | |
72 | * We need to switch to kernel mode so that we can use __get_user | |
73 | * to safely read from kernel space. Note that we now dump the | |
74 | * code first, just in case the backtrace kills us. | |
75 | */ | |
76 | fs = get_fs(); | |
77 | set_fs(KERNEL_DS); | |
78 | ||
79 | pr_emerg("Code: "); | |
80 | for (i = -4; i < 1; i++) { | |
81 | unsigned int val, bad; | |
82 | ||
83 | bad = __get_user(val, &((u32 *) addr)[i]); | |
84 | ||
85 | if (!bad) { | |
86 | p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); | |
87 | } else { | |
88 | p += sprintf(p, "bad PC value"); | |
89 | break; | |
90 | } | |
91 | } | |
92 | pr_emerg("Code: %s\n", str); | |
93 | ||
94 | set_fs(fs); | |
95 | } | |
96 | ||
97 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
98 | #include <linux/ftrace.h> | |
99 | static void | |
100 | get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) | |
101 | { | |
102 | if (*addr == (unsigned long)return_to_handler) { | |
103 | int index = tsk->curr_ret_stack; | |
104 | ||
105 | if (tsk->ret_stack && index >= *graph) { | |
106 | index -= *graph; | |
107 | *addr = tsk->ret_stack[index].ret; | |
108 | (*graph)++; | |
109 | } | |
110 | } | |
111 | } | |
112 | #else | |
113 | static inline void | |
114 | get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) | |
115 | { | |
116 | } | |
117 | #endif | |
118 | ||
119 | #define LOOP_TIMES (100) | |
120 | static void __dump(struct task_struct *tsk, unsigned long *base_reg) | |
121 | { | |
122 | unsigned long ret_addr; | |
123 | int cnt = LOOP_TIMES, graph = 0; | |
124 | pr_emerg("Call Trace:\n"); | |
125 | if (!IS_ENABLED(CONFIG_FRAME_POINTER)) { | |
126 | while (!kstack_end(base_reg)) { | |
127 | ret_addr = *base_reg++; | |
128 | if (__kernel_text_address(ret_addr)) { | |
129 | get_real_ret_addr(&ret_addr, tsk, &graph); | |
130 | print_ip_sym(ret_addr); | |
131 | } | |
132 | if (--cnt < 0) | |
133 | break; | |
134 | } | |
135 | } else { | |
136 | while (!kstack_end((void *)base_reg) && | |
137 | !((unsigned long)base_reg & 0x3) && | |
138 | ((unsigned long)base_reg >= TASK_SIZE)) { | |
139 | unsigned long next_fp; | |
140 | #if !defined(NDS32_ABI_2) | |
141 | ret_addr = base_reg[0]; | |
142 | next_fp = base_reg[1]; | |
143 | #else | |
144 | ret_addr = base_reg[-1]; | |
145 | next_fp = base_reg[FP_OFFSET]; | |
146 | #endif | |
147 | if (__kernel_text_address(ret_addr)) { | |
148 | get_real_ret_addr(&ret_addr, tsk, &graph); | |
149 | print_ip_sym(ret_addr); | |
150 | } | |
151 | if (--cnt < 0) | |
152 | break; | |
153 | base_reg = (unsigned long *)next_fp; | |
154 | } | |
155 | } | |
156 | pr_emerg("\n"); | |
157 | } | |
158 | ||
2923f5ea GH |
159 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
160 | { | |
161 | unsigned long *base_reg; | |
162 | ||
163 | if (!tsk) | |
164 | tsk = current; | |
165 | if (!IS_ENABLED(CONFIG_FRAME_POINTER)) { | |
166 | if (tsk != current) | |
167 | base_reg = (unsigned long *)(tsk->thread.cpu_context.sp); | |
168 | else | |
169 | __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg)); | |
170 | } else { | |
171 | if (tsk != current) | |
172 | base_reg = (unsigned long *)(tsk->thread.cpu_context.fp); | |
173 | else | |
174 | __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg)); | |
175 | } | |
176 | __dump(tsk, base_reg); | |
177 | barrier(); | |
178 | } | |
179 | ||
180 | DEFINE_SPINLOCK(die_lock); | |
181 | ||
182 | /* | |
183 | * This function is protected against re-entrancy. | |
184 | */ | |
185 | void die(const char *str, struct pt_regs *regs, int err) | |
186 | { | |
187 | struct task_struct *tsk = current; | |
188 | static int die_counter; | |
189 | ||
190 | console_verbose(); | |
191 | spin_lock_irq(&die_lock); | |
192 | bust_spinlocks(1); | |
193 | ||
194 | pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); | |
195 | print_modules(); | |
196 | pr_emerg("CPU: %i\n", smp_processor_id()); | |
197 | show_regs(regs); | |
198 | pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", | |
199 | tsk->comm, tsk->pid, task_thread_info(tsk) + 1); | |
200 | ||
201 | if (!user_mode(regs) || in_interrupt()) { | |
202 | dump_mem("Stack: ", regs->sp, | |
203 | THREAD_SIZE + (unsigned long)task_thread_info(tsk)); | |
204 | dump_instr(regs); | |
205 | dump_stack(); | |
206 | } | |
207 | ||
208 | bust_spinlocks(0); | |
209 | spin_unlock_irq(&die_lock); | |
210 | do_exit(SIGSEGV); | |
211 | } | |
212 | ||
213 | EXPORT_SYMBOL(die); | |
214 | ||
215 | void die_if_kernel(const char *str, struct pt_regs *regs, int err) | |
216 | { | |
217 | if (user_mode(regs)) | |
218 | return; | |
219 | ||
220 | die(str, regs, err); | |
221 | } | |
222 | ||
223 | int bad_syscall(int n, struct pt_regs *regs) | |
224 | { | |
225 | siginfo_t info; | |
226 | ||
227 | if (current->personality != PER_LINUX) { | |
228 | send_sig(SIGSEGV, current, 1); | |
229 | return regs->uregs[0]; | |
230 | } | |
231 | ||
232 | info.si_signo = SIGILL; | |
233 | info.si_errno = 0; | |
234 | info.si_code = ILL_ILLTRP; | |
235 | info.si_addr = (void __user *)instruction_pointer(regs) - 4; | |
236 | ||
237 | force_sig_info(SIGILL, &info, current); | |
238 | die_if_kernel("Oops - bad syscall", regs, n); | |
239 | return regs->uregs[0]; | |
240 | } | |
241 | ||
242 | void __pte_error(const char *file, int line, unsigned long val) | |
243 | { | |
244 | pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val); | |
245 | } | |
246 | ||
247 | void __pmd_error(const char *file, int line, unsigned long val) | |
248 | { | |
249 | pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val); | |
250 | } | |
251 | ||
252 | void __pgd_error(const char *file, int line, unsigned long val) | |
253 | { | |
254 | pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val); | |
255 | } | |
256 | ||
257 | extern char *exception_vector, *exception_vector_end; | |
258 | void __init trap_init(void) | |
259 | { | |
260 | return; | |
261 | } | |
262 | ||
263 | void __init early_trap_init(void) | |
264 | { | |
265 | unsigned long ivb = 0; | |
266 | unsigned long base = PAGE_OFFSET; | |
267 | ||
268 | memcpy((unsigned long *)base, (unsigned long *)&exception_vector, | |
269 | ((unsigned long)&exception_vector_end - | |
270 | (unsigned long)&exception_vector)); | |
271 | ivb = __nds32__mfsr(NDS32_SR_IVB); | |
272 | /* Check platform support. */ | |
273 | if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2) | |
274 | panic | |
275 | ("IVIC mode is not allowed on the platform with interrupt controller\n"); | |
276 | __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) | | |
277 | IVB_BASE, NDS32_SR_IVB); | |
278 | __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK); | |
279 | ||
280 | /* | |
281 | * 0x800 = 128 vectors * 16byte. | |
282 | * It should be enough to flush a page. | |
283 | */ | |
284 | cpu_cache_wbinval_page(base, true); | |
285 | } | |
286 | ||
287 | void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |
288 | int error_code, int si_code) | |
289 | { | |
290 | struct siginfo info; | |
291 | ||
292 | tsk->thread.trap_no = ENTRY_DEBUG_RELATED; | |
293 | tsk->thread.error_code = error_code; | |
294 | ||
295 | memset(&info, 0, sizeof(info)); | |
296 | info.si_signo = SIGTRAP; | |
297 | info.si_code = si_code; | |
298 | info.si_addr = (void __user *)instruction_pointer(regs); | |
299 | force_sig_info(SIGTRAP, &info, tsk); | |
300 | } | |
301 | ||
302 | void do_debug_trap(unsigned long entry, unsigned long addr, | |
303 | unsigned long type, struct pt_regs *regs) | |
304 | { | |
305 | if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP) | |
306 | == NOTIFY_STOP) | |
307 | return; | |
308 | ||
309 | if (user_mode(regs)) { | |
310 | /* trap_signal */ | |
311 | send_sigtrap(current, regs, 0, TRAP_BRKPT); | |
312 | } else { | |
313 | /* kernel_trap */ | |
314 | if (!fixup_exception(regs)) | |
315 | die("unexpected kernel_trap", regs, 0); | |
316 | } | |
317 | } | |
318 | ||
319 | void unhandled_interruption(struct pt_regs *regs) | |
320 | { | |
2923f5ea GH |
321 | pr_emerg("unhandled_interruption\n"); |
322 | show_regs(regs); | |
323 | if (!user_mode(regs)) | |
324 | do_exit(SIGKILL); | |
be5c2ff0 | 325 | force_sig(SIGKILL, current); |
2923f5ea GH |
326 | } |
327 | ||
328 | void unhandled_exceptions(unsigned long entry, unsigned long addr, | |
329 | unsigned long type, struct pt_regs *regs) | |
330 | { | |
2923f5ea GH |
331 | pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry, |
332 | addr, type); | |
333 | show_regs(regs); | |
334 | if (!user_mode(regs)) | |
335 | do_exit(SIGKILL); | |
be5c2ff0 | 336 | force_sig(SIGKILL, current); |
2923f5ea GH |
337 | } |
338 | ||
339 | extern int do_page_fault(unsigned long entry, unsigned long addr, | |
340 | unsigned int error_code, struct pt_regs *regs); | |
341 | ||
342 | /* | |
343 | * 2:DEF dispatch for TLB MISC exception handler | |
344 | */ | |
345 | ||
346 | void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr, | |
347 | unsigned long type, struct pt_regs *regs) | |
348 | { | |
349 | type = type & (ITYPE_mskINST | ITYPE_mskETYPE); | |
350 | if ((type & ITYPE_mskETYPE) < 5) { | |
351 | /* Permission exceptions */ | |
352 | do_page_fault(entry, addr, type, regs); | |
353 | } else | |
354 | unhandled_exceptions(entry, addr, type, regs); | |
355 | } | |
356 | ||
357 | void do_revinsn(struct pt_regs *regs) | |
358 | { | |
359 | siginfo_t si; | |
360 | pr_emerg("Reserved Instruction\n"); | |
361 | show_regs(regs); | |
362 | if (!user_mode(regs)) | |
363 | do_exit(SIGILL); | |
364 | si.si_signo = SIGILL; | |
365 | si.si_errno = 0; | |
366 | force_sig_info(SIGILL, &si, current); | |
367 | } | |
368 | ||
369 | #ifdef CONFIG_ALIGNMENT_TRAP | |
370 | extern int unalign_access_mode; | |
371 | extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs); | |
372 | #endif | |
373 | void do_dispatch_general(unsigned long entry, unsigned long addr, | |
374 | unsigned long itype, struct pt_regs *regs, | |
375 | unsigned long oipc) | |
376 | { | |
377 | unsigned int swid = itype >> ITYPE_offSWID; | |
378 | unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE); | |
379 | if (type == ETYPE_ALIGNMENT_CHECK) { | |
380 | #ifdef CONFIG_ALIGNMENT_TRAP | |
381 | /* Alignment check */ | |
382 | if (user_mode(regs) && unalign_access_mode) { | |
383 | int ret; | |
384 | ret = do_unaligned_access(addr, regs); | |
385 | ||
386 | if (ret == 0) | |
387 | return; | |
388 | ||
389 | if (ret == -EFAULT) | |
390 | pr_emerg | |
391 | ("Unhandled unaligned access exception\n"); | |
392 | } | |
393 | #endif | |
394 | do_page_fault(entry, addr, type, regs); | |
395 | } else if (type == ETYPE_RESERVED_INSTRUCTION) { | |
396 | /* Reserved instruction */ | |
397 | do_revinsn(regs); | |
398 | } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) { | |
399 | /* trap, used on v3 EDM target debugging workaround */ | |
400 | /* | |
401 | * DIPC(OIPC) is passed as parameter before | |
402 | * interrupt is enabled, so the DIPC will not be corrupted | |
403 | * even though interrupts are coming in | |
404 | */ | |
405 | /* | |
406 | * 1. update ipc | |
407 | * 2. update pt_regs ipc with oipc | |
408 | * 3. update pt_regs ipsw (clear DEX) | |
409 | */ | |
410 | __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc)); | |
411 | regs->ipc = oipc; | |
412 | if (regs->pipsw & PSW_mskDEX) { | |
413 | pr_emerg | |
414 | ("Nested Debug exception is possibly happened\n"); | |
415 | pr_emerg("ipc:%08x pipc:%08x\n", | |
416 | (unsigned int)regs->ipc, | |
417 | (unsigned int)regs->pipc); | |
418 | } | |
419 | do_debug_trap(entry, addr, itype, regs); | |
420 | regs->ipsw &= ~PSW_mskDEX; | |
421 | } else | |
422 | unhandled_exceptions(entry, addr, type, regs); | |
423 | } |