1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/module.h>
5 #include <linux/personality.h>
6 #include <linux/kallsyms.h>
7 #include <linux/hardirq.h>
8 #include <linux/kdebug.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
13 #include <asm/proc-fns.h>
14 #include <asm/unistd.h>
17 #include <linux/ptrace.h>
18 #include <nds32_intrinsic.h>
20 extern void show_pte(struct mm_struct
*mm
, unsigned long addr
);
23 * Dump out the contents of some memory nicely...
25 void dump_mem(const char *lvl
, unsigned long bottom
, unsigned long top
)
32 * We need to switch to kernel mode so that we can use __get_user
33 * to safely read from kernel space. Note that we now dump the
34 * code first, just in case the backtrace kills us.
39 pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl
, bottom
, top
);
41 for (first
= bottom
& ~31; first
< top
; first
+= 32) {
43 char str
[sizeof(" 12345678") * 8 + 1];
45 memset(str
, ' ', sizeof(str
));
46 str
[sizeof(str
) - 1] = '\0';
48 for (p
= first
, i
= 0; i
< 8 && p
< top
; i
++, p
+= 4) {
49 if (p
>= bottom
&& p
< top
) {
51 if (__get_user(val
, (unsigned long *)p
) == 0)
52 sprintf(str
+ i
* 9, " %08lx", val
);
54 sprintf(str
+ i
* 9, " ????????");
57 pr_emerg("%s%04lx:%s\n", lvl
, first
& 0xffff, str
);
63 EXPORT_SYMBOL(dump_mem
);
65 static void dump_instr(struct pt_regs
*regs
)
67 unsigned long addr
= instruction_pointer(regs
);
69 char str
[sizeof("00000000 ") * 5 + 2 + 1], *p
= str
;
74 * We need to switch to kernel mode so that we can use __get_user
75 * to safely read from kernel space. Note that we now dump the
76 * code first, just in case the backtrace kills us.
82 for (i
= -4; i
< 1; i
++) {
83 unsigned int val
, bad
;
85 bad
= __get_user(val
, &((u32
*) addr
)[i
]);
88 p
+= sprintf(p
, i
== 0 ? "(%08x) " : "%08x ", val
);
90 p
+= sprintf(p
, "bad PC value");
94 pr_emerg("Code: %s\n", str
);
99 #define LOOP_TIMES (100)
100 static void __dump(struct task_struct
*tsk
, unsigned long *base_reg
,
103 unsigned long ret_addr
;
104 int cnt
= LOOP_TIMES
, graph
= 0;
105 printk("%sCall Trace:\n", loglvl
);
106 if (!IS_ENABLED(CONFIG_FRAME_POINTER
)) {
107 while (!kstack_end(base_reg
)) {
108 ret_addr
= *base_reg
++;
109 if (__kernel_text_address(ret_addr
)) {
110 ret_addr
= ftrace_graph_ret_addr(
111 tsk
, &graph
, ret_addr
, NULL
);
112 print_ip_sym(loglvl
, ret_addr
);
118 while (!kstack_end((void *)base_reg
) &&
119 !((unsigned long)base_reg
& 0x3) &&
120 ((unsigned long)base_reg
>= TASK_SIZE
)) {
121 unsigned long next_fp
;
122 ret_addr
= base_reg
[LP_OFFSET
];
123 next_fp
= base_reg
[FP_OFFSET
];
124 if (__kernel_text_address(ret_addr
)) {
126 ret_addr
= ftrace_graph_ret_addr(
127 tsk
, &graph
, ret_addr
, NULL
);
128 print_ip_sym(loglvl
, ret_addr
);
132 base_reg
= (unsigned long *)next_fp
;
135 printk("%s\n", loglvl
);
138 void show_stack_loglvl(struct task_struct
*tsk
, unsigned long *sp
,
141 unsigned long *base_reg
;
145 if (!IS_ENABLED(CONFIG_FRAME_POINTER
)) {
147 base_reg
= (unsigned long *)(tsk
->thread
.cpu_context
.sp
);
149 __asm__
__volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg
));
152 base_reg
= (unsigned long *)(tsk
->thread
.cpu_context
.fp
);
154 __asm__
__volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg
));
156 __dump(tsk
, base_reg
, loglvl
);
160 void show_stack(struct task_struct
*tsk
, unsigned long *sp
)
162 show_stack_loglvl(tsk
, sp
, KERN_EMERG
);
165 DEFINE_SPINLOCK(die_lock
);
168 * This function is protected against re-entrancy.
170 void die(const char *str
, struct pt_regs
*regs
, int err
)
172 struct task_struct
*tsk
= current
;
173 static int die_counter
;
176 spin_lock_irq(&die_lock
);
179 pr_emerg("Internal error: %s: %x [#%d]\n", str
, err
, ++die_counter
);
181 pr_emerg("CPU: %i\n", smp_processor_id());
183 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
184 tsk
->comm
, tsk
->pid
, end_of_stack(tsk
));
186 if (!user_mode(regs
) || in_interrupt()) {
187 dump_mem("Stack: ", regs
->sp
, (regs
->sp
+ PAGE_SIZE
) & PAGE_MASK
);
193 spin_unlock_irq(&die_lock
);
199 void die_if_kernel(const char *str
, struct pt_regs
*regs
, int err
)
207 int bad_syscall(int n
, struct pt_regs
*regs
)
209 if (current
->personality
!= PER_LINUX
) {
210 send_sig(SIGSEGV
, current
, 1);
211 return regs
->uregs
[0];
214 force_sig_fault(SIGILL
, ILL_ILLTRP
,
215 (void __user
*)instruction_pointer(regs
) - 4);
216 die_if_kernel("Oops - bad syscall", regs
, n
);
217 return regs
->uregs
[0];
220 void __pte_error(const char *file
, int line
, unsigned long val
)
222 pr_emerg("%s:%d: bad pte %08lx.\n", file
, line
, val
);
225 void __pmd_error(const char *file
, int line
, unsigned long val
)
227 pr_emerg("%s:%d: bad pmd %08lx.\n", file
, line
, val
);
230 void __pgd_error(const char *file
, int line
, unsigned long val
)
232 pr_emerg("%s:%d: bad pgd %08lx.\n", file
, line
, val
);
235 extern char *exception_vector
, *exception_vector_end
;
236 void __init
trap_init(void)
241 void __init
early_trap_init(void)
243 unsigned long ivb
= 0;
244 unsigned long base
= PAGE_OFFSET
;
246 memcpy((unsigned long *)base
, (unsigned long *)&exception_vector
,
247 ((unsigned long)&exception_vector_end
-
248 (unsigned long)&exception_vector
));
249 ivb
= __nds32__mfsr(NDS32_SR_IVB
);
250 /* Check platform support. */
251 if (((ivb
& IVB_mskNIVIC
) >> IVB_offNIVIC
) < 2)
253 ("IVIC mode is not allowed on the platform with interrupt controller\n");
254 __nds32__mtsr((ivb
& ~IVB_mskESZ
) | (IVB_valESZ16
<< IVB_offESZ
) |
255 IVB_BASE
, NDS32_SR_IVB
);
256 __nds32__mtsr(INT_MASK_INITAIAL_VAL
, NDS32_SR_INT_MASK
);
259 * 0x800 = 128 vectors * 16byte.
260 * It should be enough to flush a page.
262 cpu_cache_wbinval_page(base
, true);
265 static void send_sigtrap(struct pt_regs
*regs
, int error_code
, int si_code
)
267 struct task_struct
*tsk
= current
;
269 tsk
->thread
.trap_no
= ENTRY_DEBUG_RELATED
;
270 tsk
->thread
.error_code
= error_code
;
272 force_sig_fault(SIGTRAP
, si_code
,
273 (void __user
*)instruction_pointer(regs
));
276 void do_debug_trap(unsigned long entry
, unsigned long addr
,
277 unsigned long type
, struct pt_regs
*regs
)
279 if (notify_die(DIE_OOPS
, "Oops", regs
, addr
, type
, SIGTRAP
)
283 if (user_mode(regs
)) {
285 send_sigtrap(regs
, 0, TRAP_BRKPT
);
288 if (!fixup_exception(regs
))
289 die("unexpected kernel_trap", regs
, 0);
293 void unhandled_interruption(struct pt_regs
*regs
)
295 pr_emerg("unhandled_interruption\n");
297 if (!user_mode(regs
))
302 void unhandled_exceptions(unsigned long entry
, unsigned long addr
,
303 unsigned long type
, struct pt_regs
*regs
)
305 pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry
,
308 if (!user_mode(regs
))
313 extern int do_page_fault(unsigned long entry
, unsigned long addr
,
314 unsigned int error_code
, struct pt_regs
*regs
);
317 * 2:DEF dispatch for TLB MISC exception handler
320 void do_dispatch_tlb_misc(unsigned long entry
, unsigned long addr
,
321 unsigned long type
, struct pt_regs
*regs
)
323 type
= type
& (ITYPE_mskINST
| ITYPE_mskETYPE
);
324 if ((type
& ITYPE_mskETYPE
) < 5) {
325 /* Permission exceptions */
326 do_page_fault(entry
, addr
, type
, regs
);
328 unhandled_exceptions(entry
, addr
, type
, regs
);
331 void do_revinsn(struct pt_regs
*regs
)
333 pr_emerg("Reserved Instruction\n");
335 if (!user_mode(regs
))
340 #ifdef CONFIG_ALIGNMENT_TRAP
341 extern int unalign_access_mode
;
342 extern int do_unaligned_access(unsigned long addr
, struct pt_regs
*regs
);
344 void do_dispatch_general(unsigned long entry
, unsigned long addr
,
345 unsigned long itype
, struct pt_regs
*regs
,
348 unsigned int swid
= itype
>> ITYPE_offSWID
;
349 unsigned long type
= itype
& (ITYPE_mskINST
| ITYPE_mskETYPE
);
350 if (type
== ETYPE_ALIGNMENT_CHECK
) {
351 #ifdef CONFIG_ALIGNMENT_TRAP
352 /* Alignment check */
353 if (user_mode(regs
) && unalign_access_mode
) {
355 ret
= do_unaligned_access(addr
, regs
);
362 ("Unhandled unaligned access exception\n");
365 do_page_fault(entry
, addr
, type
, regs
);
366 } else if (type
== ETYPE_RESERVED_INSTRUCTION
) {
367 /* Reserved instruction */
369 } else if (type
== ETYPE_COPROCESSOR
) {
371 #if IS_ENABLED(CONFIG_FPU)
372 unsigned int fucop_exist
= __nds32__mfsr(NDS32_SR_FUCOP_EXIST
);
373 unsigned int cpid
= ((itype
& ITYPE_mskCPID
) >> ITYPE_offCPID
);
375 if ((cpid
== FPU_CPID
) &&
376 (fucop_exist
& FUCOP_EXIST_mskCP0ISFPU
)) {
377 unsigned int subtype
= (itype
& ITYPE_mskSTYPE
);
379 if (true == do_fpu_exception(subtype
, regs
))
383 unhandled_exceptions(entry
, addr
, type
, regs
);
384 } else if (type
== ETYPE_TRAP
&& swid
== SWID_RAISE_INTERRUPT_LEVEL
) {
385 /* trap, used on v3 EDM target debugging workaround */
387 * DIPC(OIPC) is passed as parameter before
388 * interrupt is enabled, so the DIPC will not be corrupted
389 * even though interrupts are coming in
393 * 2. update pt_regs ipc with oipc
394 * 3. update pt_regs ipsw (clear DEX)
396 __asm__
volatile ("mtsr %0, $IPC\n\t"::"r" (oipc
));
398 if (regs
->pipsw
& PSW_mskDEX
) {
400 ("Nested Debug exception is possibly happened\n");
401 pr_emerg("ipc:%08x pipc:%08x\n",
402 (unsigned int)regs
->ipc
,
403 (unsigned int)regs
->pipc
);
405 do_debug_trap(entry
, addr
, itype
, regs
);
406 regs
->ipsw
&= ~PSW_mskDEX
;
408 unhandled_exceptions(entry
, addr
, type
, regs
);