]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Based on arch/arm/kernel/traps.c | |
3 | * | |
4 | * Copyright (C) 1995-2009 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/bug.h> | |
21 | #include <linux/signal.h> | |
22 | #include <linux/personality.h> | |
23 | #include <linux/kallsyms.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/uaccess.h> | |
26 | #include <linux/hardirq.h> | |
27 | #include <linux/kdebug.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/kexec.h> | |
30 | #include <linux/delay.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/sched/signal.h> | |
33 | #include <linux/sched/debug.h> | |
34 | #include <linux/sched/task_stack.h> | |
35 | #include <linux/sizes.h> | |
36 | #include <linux/syscalls.h> | |
37 | #include <linux/mm_types.h> | |
38 | ||
39 | #include <asm/atomic.h> | |
40 | #include <asm/bug.h> | |
41 | #include <asm/debug-monitors.h> | |
42 | #include <asm/esr.h> | |
43 | #include <asm/insn.h> | |
44 | #include <asm/traps.h> | |
45 | #include <asm/smp.h> | |
46 | #include <asm/stack_pointer.h> | |
47 | #include <asm/stacktrace.h> | |
48 | #include <asm/exception.h> | |
49 | #include <asm/system_misc.h> | |
50 | #include <asm/sysreg.h> | |
51 | ||
52 | static const char *handler[]= { | |
53 | "Synchronous Abort", | |
54 | "IRQ", | |
55 | "FIQ", | |
56 | "Error" | |
57 | }; | |
58 | ||
59 | int show_unhandled_signals = 1; | |
60 | ||
61 | /* | |
62 | * Dump out the contents of some kernel memory nicely... | |
63 | */ | |
64 | static void dump_mem(const char *lvl, const char *str, unsigned long bottom, | |
65 | unsigned long top) | |
66 | { | |
67 | unsigned long first; | |
68 | mm_segment_t fs; | |
69 | int i; | |
70 | ||
71 | /* | |
72 | * We need to switch to kernel mode so that we can use __get_user | |
73 | * to safely read from kernel space. | |
74 | */ | |
75 | fs = get_fs(); | |
76 | set_fs(KERNEL_DS); | |
77 | ||
78 | printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top); | |
79 | ||
80 | for (first = bottom & ~31; first < top; first += 32) { | |
81 | unsigned long p; | |
82 | char str[sizeof(" 12345678") * 8 + 1]; | |
83 | ||
84 | memset(str, ' ', sizeof(str)); | |
85 | str[sizeof(str) - 1] = '\0'; | |
86 | ||
87 | for (p = first, i = 0; i < (32 / 8) | |
88 | && p < top; i++, p += 8) { | |
89 | if (p >= bottom && p < top) { | |
90 | unsigned long val; | |
91 | ||
92 | if (__get_user(val, (unsigned long *)p) == 0) | |
93 | sprintf(str + i * 17, " %016lx", val); | |
94 | else | |
95 | sprintf(str + i * 17, " ????????????????"); | |
96 | } | |
97 | } | |
98 | printk("%s%04lx:%s\n", lvl, first & 0xffff, str); | |
99 | } | |
100 | ||
101 | set_fs(fs); | |
102 | } | |
103 | ||
104 | static void dump_backtrace_entry(unsigned long where) | |
105 | { | |
106 | /* | |
107 | * Note that 'where' can have a physical address, but it's not handled. | |
108 | */ | |
109 | print_ip_sym(where); | |
110 | } | |
111 | ||
112 | static void __dump_instr(const char *lvl, struct pt_regs *regs) | |
113 | { | |
114 | unsigned long addr = instruction_pointer(regs); | |
115 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; | |
116 | int i; | |
117 | ||
118 | for (i = -4; i < 1; i++) { | |
119 | unsigned int val, bad; | |
120 | ||
121 | bad = get_user(val, &((u32 *)addr)[i]); | |
122 | ||
123 | if (!bad) | |
124 | p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); | |
125 | else { | |
126 | p += sprintf(p, "bad PC value"); | |
127 | break; | |
128 | } | |
129 | } | |
130 | printk("%sCode: %s\n", lvl, str); | |
131 | } | |
132 | ||
133 | static void dump_instr(const char *lvl, struct pt_regs *regs) | |
134 | { | |
135 | if (!user_mode(regs)) { | |
136 | mm_segment_t fs = get_fs(); | |
137 | set_fs(KERNEL_DS); | |
138 | __dump_instr(lvl, regs); | |
139 | set_fs(fs); | |
140 | } else { | |
141 | __dump_instr(lvl, regs); | |
142 | } | |
143 | } | |
144 | ||
145 | void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |
146 | { | |
147 | struct stackframe frame; | |
148 | int skip; | |
149 | ||
150 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); | |
151 | ||
152 | if (!tsk) | |
153 | tsk = current; | |
154 | ||
155 | if (!try_get_task_stack(tsk)) | |
156 | return; | |
157 | ||
158 | if (tsk == current) { | |
159 | frame.fp = (unsigned long)__builtin_frame_address(0); | |
160 | frame.pc = (unsigned long)dump_backtrace; | |
161 | } else { | |
162 | /* | |
163 | * task blocked in __switch_to | |
164 | */ | |
165 | frame.fp = thread_saved_fp(tsk); | |
166 | frame.pc = thread_saved_pc(tsk); | |
167 | } | |
168 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
169 | frame.graph = tsk->curr_ret_stack; | |
170 | #endif | |
171 | ||
172 | skip = !!regs; | |
173 | printk("Call trace:\n"); | |
174 | while (1) { | |
175 | unsigned long stack; | |
176 | int ret; | |
177 | ||
178 | /* skip until specified stack frame */ | |
179 | if (!skip) { | |
180 | dump_backtrace_entry(frame.pc); | |
181 | } else if (frame.fp == regs->regs[29]) { | |
182 | skip = 0; | |
183 | /* | |
184 | * Mostly, this is the case where this function is | |
185 | * called in panic/abort. As exception handler's | |
186 | * stack frame does not contain the corresponding pc | |
187 | * at which an exception has taken place, use regs->pc | |
188 | * instead. | |
189 | */ | |
190 | dump_backtrace_entry(regs->pc); | |
191 | } | |
192 | ret = unwind_frame(tsk, &frame); | |
193 | if (ret < 0) | |
194 | break; | |
195 | if (in_entry_text(frame.pc)) { | |
196 | stack = frame.fp - offsetof(struct pt_regs, stackframe); | |
197 | ||
198 | if (on_accessible_stack(tsk, stack)) | |
199 | dump_mem("", "Exception stack", stack, | |
200 | stack + sizeof(struct pt_regs)); | |
201 | } | |
202 | } | |
203 | ||
204 | put_task_stack(tsk); | |
205 | } | |
206 | ||
207 | void show_stack(struct task_struct *tsk, unsigned long *sp) | |
208 | { | |
209 | dump_backtrace(NULL, tsk); | |
210 | barrier(); | |
211 | } | |
212 | ||
213 | #ifdef CONFIG_PREEMPT | |
214 | #define S_PREEMPT " PREEMPT" | |
215 | #else | |
216 | #define S_PREEMPT "" | |
217 | #endif | |
218 | #define S_SMP " SMP" | |
219 | ||
220 | static int __die(const char *str, int err, struct pt_regs *regs) | |
221 | { | |
222 | struct task_struct *tsk = current; | |
223 | static int die_counter; | |
224 | int ret; | |
225 | ||
226 | pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", | |
227 | str, err, ++die_counter); | |
228 | ||
229 | /* trap and error numbers are mostly meaningless on ARM */ | |
230 | ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); | |
231 | if (ret == NOTIFY_STOP) | |
232 | return ret; | |
233 | ||
234 | print_modules(); | |
235 | __show_regs(regs); | |
236 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", | |
237 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), | |
238 | end_of_stack(tsk)); | |
239 | ||
240 | if (!user_mode(regs)) { | |
241 | dump_backtrace(regs, tsk); | |
242 | dump_instr(KERN_EMERG, regs); | |
243 | } | |
244 | ||
245 | return ret; | |
246 | } | |
247 | ||
248 | static DEFINE_RAW_SPINLOCK(die_lock); | |
249 | ||
250 | /* | |
251 | * This function is protected against re-entrancy. | |
252 | */ | |
253 | void die(const char *str, struct pt_regs *regs, int err) | |
254 | { | |
255 | int ret; | |
256 | unsigned long flags; | |
257 | ||
258 | raw_spin_lock_irqsave(&die_lock, flags); | |
259 | ||
260 | oops_enter(); | |
261 | ||
262 | console_verbose(); | |
263 | bust_spinlocks(1); | |
264 | ret = __die(str, err, regs); | |
265 | ||
266 | if (regs && kexec_should_crash(current)) | |
267 | crash_kexec(regs); | |
268 | ||
269 | bust_spinlocks(0); | |
270 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); | |
271 | oops_exit(); | |
272 | ||
273 | if (in_interrupt()) | |
274 | panic("Fatal exception in interrupt"); | |
275 | if (panic_on_oops) | |
276 | panic("Fatal exception"); | |
277 | ||
278 | raw_spin_unlock_irqrestore(&die_lock, flags); | |
279 | ||
280 | if (ret != NOTIFY_STOP) | |
281 | do_exit(SIGSEGV); | |
282 | } | |
283 | ||
284 | void arm64_notify_die(const char *str, struct pt_regs *regs, | |
285 | struct siginfo *info, int err) | |
286 | { | |
287 | if (user_mode(regs)) { | |
288 | current->thread.fault_address = 0; | |
289 | current->thread.fault_code = err; | |
290 | force_sig_info(info->si_signo, info, current); | |
291 | } else { | |
292 | die(str, regs, err); | |
293 | } | |
294 | } | |
295 | ||
296 | static LIST_HEAD(undef_hook); | |
297 | static DEFINE_RAW_SPINLOCK(undef_lock); | |
298 | ||
299 | void register_undef_hook(struct undef_hook *hook) | |
300 | { | |
301 | unsigned long flags; | |
302 | ||
303 | raw_spin_lock_irqsave(&undef_lock, flags); | |
304 | list_add(&hook->node, &undef_hook); | |
305 | raw_spin_unlock_irqrestore(&undef_lock, flags); | |
306 | } | |
307 | ||
308 | void unregister_undef_hook(struct undef_hook *hook) | |
309 | { | |
310 | unsigned long flags; | |
311 | ||
312 | raw_spin_lock_irqsave(&undef_lock, flags); | |
313 | list_del(&hook->node); | |
314 | raw_spin_unlock_irqrestore(&undef_lock, flags); | |
315 | } | |
316 | ||
317 | static int call_undef_hook(struct pt_regs *regs) | |
318 | { | |
319 | struct undef_hook *hook; | |
320 | unsigned long flags; | |
321 | u32 instr; | |
322 | int (*fn)(struct pt_regs *regs, u32 instr) = NULL; | |
323 | void __user *pc = (void __user *)instruction_pointer(regs); | |
324 | ||
325 | if (!user_mode(regs)) | |
326 | return 1; | |
327 | ||
328 | if (compat_thumb_mode(regs)) { | |
329 | /* 16-bit Thumb instruction */ | |
330 | __le16 instr_le; | |
331 | if (get_user(instr_le, (__le16 __user *)pc)) | |
332 | goto exit; | |
333 | instr = le16_to_cpu(instr_le); | |
334 | if (aarch32_insn_is_wide(instr)) { | |
335 | u32 instr2; | |
336 | ||
337 | if (get_user(instr_le, (__le16 __user *)(pc + 2))) | |
338 | goto exit; | |
339 | instr2 = le16_to_cpu(instr_le); | |
340 | instr = (instr << 16) | instr2; | |
341 | } | |
342 | } else { | |
343 | /* 32-bit ARM instruction */ | |
344 | __le32 instr_le; | |
345 | if (get_user(instr_le, (__le32 __user *)pc)) | |
346 | goto exit; | |
347 | instr = le32_to_cpu(instr_le); | |
348 | } | |
349 | ||
350 | raw_spin_lock_irqsave(&undef_lock, flags); | |
351 | list_for_each_entry(hook, &undef_hook, node) | |
352 | if ((instr & hook->instr_mask) == hook->instr_val && | |
353 | (regs->pstate & hook->pstate_mask) == hook->pstate_val) | |
354 | fn = hook->fn; | |
355 | ||
356 | raw_spin_unlock_irqrestore(&undef_lock, flags); | |
357 | exit: | |
358 | return fn ? fn(regs, instr) : 1; | |
359 | } | |
360 | ||
361 | static void force_signal_inject(int signal, int code, struct pt_regs *regs, | |
362 | unsigned long address) | |
363 | { | |
364 | siginfo_t info; | |
365 | void __user *pc = (void __user *)instruction_pointer(regs); | |
366 | const char *desc; | |
367 | ||
368 | switch (signal) { | |
369 | case SIGILL: | |
370 | desc = "undefined instruction"; | |
371 | break; | |
372 | case SIGSEGV: | |
373 | desc = "illegal memory access"; | |
374 | break; | |
375 | default: | |
376 | desc = "bad mode"; | |
377 | break; | |
378 | } | |
379 | ||
380 | if (unhandled_signal(current, signal) && | |
381 | show_unhandled_signals_ratelimited()) { | |
382 | pr_info("%s[%d]: %s: pc=%p\n", | |
383 | current->comm, task_pid_nr(current), desc, pc); | |
384 | dump_instr(KERN_INFO, regs); | |
385 | } | |
386 | ||
387 | info.si_signo = signal; | |
388 | info.si_errno = 0; | |
389 | info.si_code = code; | |
390 | info.si_addr = pc; | |
391 | ||
392 | arm64_notify_die(desc, regs, &info, 0); | |
393 | } | |
394 | ||
395 | /* | |
396 | * Set up process info to signal segmentation fault - called on access error. | |
397 | */ | |
398 | void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr) | |
399 | { | |
400 | int code; | |
401 | ||
402 | down_read(¤t->mm->mmap_sem); | |
403 | if (find_vma(current->mm, addr) == NULL) | |
404 | code = SEGV_MAPERR; | |
405 | else | |
406 | code = SEGV_ACCERR; | |
407 | up_read(¤t->mm->mmap_sem); | |
408 | ||
409 | force_signal_inject(SIGSEGV, code, regs, addr); | |
410 | } | |
411 | ||
412 | asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |
413 | { | |
414 | /* check for AArch32 breakpoint instructions */ | |
415 | if (!aarch32_break_handler(regs)) | |
416 | return; | |
417 | ||
418 | if (call_undef_hook(regs) == 0) | |
419 | return; | |
420 | ||
421 | force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); | |
422 | } | |
423 | ||
424 | int cpu_enable_cache_maint_trap(void *__unused) | |
425 | { | |
426 | config_sctlr_el1(SCTLR_EL1_UCI, 0); | |
427 | return 0; | |
428 | } | |
429 | ||
430 | #define __user_cache_maint(insn, address, res) \ | |
431 | if (address >= user_addr_max()) { \ | |
432 | res = -EFAULT; \ | |
433 | } else { \ | |
434 | uaccess_ttbr0_enable(); \ | |
435 | asm volatile ( \ | |
436 | "1: " insn ", %1\n" \ | |
437 | " mov %w0, #0\n" \ | |
438 | "2:\n" \ | |
439 | " .pushsection .fixup,\"ax\"\n" \ | |
440 | " .align 2\n" \ | |
441 | "3: mov %w0, %w2\n" \ | |
442 | " b 2b\n" \ | |
443 | " .popsection\n" \ | |
444 | _ASM_EXTABLE(1b, 3b) \ | |
445 | : "=r" (res) \ | |
446 | : "r" (address), "i" (-EFAULT)); \ | |
447 | uaccess_ttbr0_disable(); \ | |
448 | } | |
449 | ||
450 | static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) | |
451 | { | |
452 | unsigned long address; | |
453 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | |
454 | int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
455 | int ret = 0; | |
456 | ||
457 | address = untagged_addr(pt_regs_read_reg(regs, rt)); | |
458 | ||
459 | switch (crm) { | |
460 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ | |
461 | __user_cache_maint("dc civac", address, ret); | |
462 | break; | |
463 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ | |
464 | __user_cache_maint("dc civac", address, ret); | |
465 | break; | |
466 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ | |
467 | __user_cache_maint("sys 3, c7, c12, 1", address, ret); | |
468 | break; | |
469 | case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ | |
470 | __user_cache_maint("dc civac", address, ret); | |
471 | break; | |
472 | case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ | |
473 | __user_cache_maint("ic ivau", address, ret); | |
474 | break; | |
475 | default: | |
476 | force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); | |
477 | return; | |
478 | } | |
479 | ||
480 | if (ret) | |
481 | arm64_notify_segfault(regs, address); | |
482 | else | |
483 | regs->pc += 4; | |
484 | } | |
485 | ||
486 | static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) | |
487 | { | |
488 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | |
489 | unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); | |
490 | ||
491 | pt_regs_write_reg(regs, rt, val); | |
492 | ||
493 | regs->pc += 4; | |
494 | } | |
495 | ||
496 | static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) | |
497 | { | |
498 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | |
499 | ||
500 | pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); | |
501 | regs->pc += 4; | |
502 | } | |
503 | ||
504 | static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) | |
505 | { | |
506 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | |
507 | ||
508 | pt_regs_write_reg(regs, rt, arch_timer_get_rate()); | |
509 | regs->pc += 4; | |
510 | } | |
511 | ||
512 | struct sys64_hook { | |
513 | unsigned int esr_mask; | |
514 | unsigned int esr_val; | |
515 | void (*handler)(unsigned int esr, struct pt_regs *regs); | |
516 | }; | |
517 | ||
518 | static struct sys64_hook sys64_hooks[] = { | |
519 | { | |
520 | .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, | |
521 | .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, | |
522 | .handler = user_cache_maint_handler, | |
523 | }, | |
524 | { | |
525 | /* Trap read access to CTR_EL0 */ | |
526 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, | |
527 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, | |
528 | .handler = ctr_read_handler, | |
529 | }, | |
530 | { | |
531 | /* Trap read access to CNTVCT_EL0 */ | |
532 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, | |
533 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, | |
534 | .handler = cntvct_read_handler, | |
535 | }, | |
536 | { | |
537 | /* Trap read access to CNTFRQ_EL0 */ | |
538 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, | |
539 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, | |
540 | .handler = cntfrq_read_handler, | |
541 | }, | |
542 | {}, | |
543 | }; | |
544 | ||
545 | asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) | |
546 | { | |
547 | struct sys64_hook *hook; | |
548 | ||
549 | for (hook = sys64_hooks; hook->handler; hook++) | |
550 | if ((hook->esr_mask & esr) == hook->esr_val) { | |
551 | hook->handler(esr, regs); | |
552 | return; | |
553 | } | |
554 | ||
555 | /* | |
556 | * New SYS instructions may previously have been undefined at EL0. Fall | |
557 | * back to our usual undefined instruction handler so that we handle | |
558 | * these consistently. | |
559 | */ | |
560 | do_undefinstr(regs); | |
561 | } | |
562 | ||
563 | long compat_arm_syscall(struct pt_regs *regs); | |
564 | ||
565 | asmlinkage long do_ni_syscall(struct pt_regs *regs) | |
566 | { | |
567 | #ifdef CONFIG_COMPAT | |
568 | long ret; | |
569 | if (is_compat_task()) { | |
570 | ret = compat_arm_syscall(regs); | |
571 | if (ret != -ENOSYS) | |
572 | return ret; | |
573 | } | |
574 | #endif | |
575 | ||
576 | if (show_unhandled_signals_ratelimited()) { | |
577 | pr_info("%s[%d]: syscall %d\n", current->comm, | |
578 | task_pid_nr(current), regs->syscallno); | |
579 | dump_instr("", regs); | |
580 | if (user_mode(regs)) | |
581 | __show_regs(regs); | |
582 | } | |
583 | ||
584 | return sys_ni_syscall(); | |
585 | } | |
586 | ||
587 | static const char *esr_class_str[] = { | |
588 | [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", | |
589 | [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", | |
590 | [ESR_ELx_EC_WFx] = "WFI/WFE", | |
591 | [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", | |
592 | [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", | |
593 | [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", | |
594 | [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", | |
595 | [ESR_ELx_EC_FP_ASIMD] = "ASIMD", | |
596 | [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", | |
597 | [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", | |
598 | [ESR_ELx_EC_ILL] = "PSTATE.IL", | |
599 | [ESR_ELx_EC_SVC32] = "SVC (AArch32)", | |
600 | [ESR_ELx_EC_HVC32] = "HVC (AArch32)", | |
601 | [ESR_ELx_EC_SMC32] = "SMC (AArch32)", | |
602 | [ESR_ELx_EC_SVC64] = "SVC (AArch64)", | |
603 | [ESR_ELx_EC_HVC64] = "HVC (AArch64)", | |
604 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)", | |
605 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", | |
606 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", | |
607 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", | |
608 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", | |
609 | [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", | |
610 | [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", | |
611 | [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", | |
612 | [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", | |
613 | [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", | |
614 | [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", | |
615 | [ESR_ELx_EC_SERROR] = "SError", | |
616 | [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", | |
617 | [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", | |
618 | [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", | |
619 | [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", | |
620 | [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", | |
621 | [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", | |
622 | [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", | |
623 | [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", | |
624 | [ESR_ELx_EC_BRK64] = "BRK (AArch64)", | |
625 | }; | |
626 | ||
627 | const char *esr_get_class_string(u32 esr) | |
628 | { | |
629 | return esr_class_str[ESR_ELx_EC(esr)]; | |
630 | } | |
631 | ||
632 | /* | |
633 | * bad_mode handles the impossible case in the exception vector. This is always | |
634 | * fatal. | |
635 | */ | |
636 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) | |
637 | { | |
638 | console_verbose(); | |
639 | ||
640 | pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", | |
641 | handler[reason], smp_processor_id(), esr, | |
642 | esr_get_class_string(esr)); | |
643 | ||
644 | die("Oops - bad mode", regs, 0); | |
645 | local_irq_disable(); | |
646 | panic("bad mode"); | |
647 | } | |
648 | ||
649 | /* | |
650 | * bad_el0_sync handles unexpected, but potentially recoverable synchronous | |
651 | * exceptions taken from EL0. Unlike bad_mode, this returns. | |
652 | */ | |
653 | asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) | |
654 | { | |
655 | siginfo_t info; | |
656 | void __user *pc = (void __user *)instruction_pointer(regs); | |
657 | console_verbose(); | |
658 | ||
659 | pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n", | |
660 | smp_processor_id(), esr, esr_get_class_string(esr)); | |
661 | __show_regs(regs); | |
662 | ||
663 | info.si_signo = SIGILL; | |
664 | info.si_errno = 0; | |
665 | info.si_code = ILL_ILLOPC; | |
666 | info.si_addr = pc; | |
667 | ||
668 | current->thread.fault_address = 0; | |
669 | current->thread.fault_code = 0; | |
670 | ||
671 | force_sig_info(info.si_signo, &info, current); | |
672 | } | |
673 | ||
674 | #ifdef CONFIG_VMAP_STACK | |
675 | ||
676 | DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) | |
677 | __aligned(16); | |
678 | ||
679 | asmlinkage void handle_bad_stack(struct pt_regs *regs) | |
680 | { | |
681 | unsigned long tsk_stk = (unsigned long)current->stack; | |
682 | unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); | |
683 | unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); | |
684 | unsigned int esr = read_sysreg(esr_el1); | |
685 | unsigned long far = read_sysreg(far_el1); | |
686 | ||
687 | console_verbose(); | |
688 | pr_emerg("Insufficient stack space to handle exception!"); | |
689 | ||
690 | pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); | |
691 | pr_emerg("FAR: 0x%016lx\n", far); | |
692 | ||
693 | pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", | |
694 | tsk_stk, tsk_stk + THREAD_SIZE); | |
695 | pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n", | |
696 | irq_stk, irq_stk + THREAD_SIZE); | |
697 | pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", | |
698 | ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); | |
699 | ||
700 | __show_regs(regs); | |
701 | ||
702 | /* | |
703 | * We use nmi_panic to limit the potential for recusive overflows, and | |
704 | * to get a better stack trace. | |
705 | */ | |
706 | nmi_panic(NULL, "kernel stack overflow"); | |
707 | cpu_park_loop(); | |
708 | } | |
709 | #endif | |
710 | ||
711 | void __pte_error(const char *file, int line, unsigned long val) | |
712 | { | |
713 | pr_err("%s:%d: bad pte %016lx.\n", file, line, val); | |
714 | } | |
715 | ||
716 | void __pmd_error(const char *file, int line, unsigned long val) | |
717 | { | |
718 | pr_err("%s:%d: bad pmd %016lx.\n", file, line, val); | |
719 | } | |
720 | ||
721 | void __pud_error(const char *file, int line, unsigned long val) | |
722 | { | |
723 | pr_err("%s:%d: bad pud %016lx.\n", file, line, val); | |
724 | } | |
725 | ||
726 | void __pgd_error(const char *file, int line, unsigned long val) | |
727 | { | |
728 | pr_err("%s:%d: bad pgd %016lx.\n", file, line, val); | |
729 | } | |
730 | ||
731 | /* GENERIC_BUG traps */ | |
732 | ||
733 | int is_valid_bugaddr(unsigned long addr) | |
734 | { | |
735 | /* | |
736 | * bug_handler() only called for BRK #BUG_BRK_IMM. | |
737 | * So the answer is trivial -- any spurious instances with no | |
738 | * bug table entry will be rejected by report_bug() and passed | |
739 | * back to the debug-monitors code and handled as a fatal | |
740 | * unexpected debug exception. | |
741 | */ | |
742 | return 1; | |
743 | } | |
744 | ||
745 | static int bug_handler(struct pt_regs *regs, unsigned int esr) | |
746 | { | |
747 | if (user_mode(regs)) | |
748 | return DBG_HOOK_ERROR; | |
749 | ||
750 | switch (report_bug(regs->pc, regs)) { | |
751 | case BUG_TRAP_TYPE_BUG: | |
752 | die("Oops - BUG", regs, 0); | |
753 | break; | |
754 | ||
755 | case BUG_TRAP_TYPE_WARN: | |
756 | break; | |
757 | ||
758 | default: | |
759 | /* unknown/unrecognised bug trap type */ | |
760 | return DBG_HOOK_ERROR; | |
761 | } | |
762 | ||
763 | /* If thread survives, skip over the BUG instruction and continue: */ | |
764 | regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */ | |
765 | return DBG_HOOK_HANDLED; | |
766 | } | |
767 | ||
768 | static struct break_hook bug_break_hook = { | |
769 | .esr_val = 0xf2000000 | BUG_BRK_IMM, | |
770 | .esr_mask = 0xffffffff, | |
771 | .fn = bug_handler, | |
772 | }; | |
773 | ||
774 | /* | |
775 | * Initial handler for AArch64 BRK exceptions | |
776 | * This handler only used until debug_traps_init(). | |
777 | */ | |
778 | int __init early_brk64(unsigned long addr, unsigned int esr, | |
779 | struct pt_regs *regs) | |
780 | { | |
781 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; | |
782 | } | |
783 | ||
784 | /* This registration must happen early, before debug_traps_init(). */ | |
785 | void __init trap_init(void) | |
786 | { | |
787 | register_break_hook(&bug_break_hook); | |
788 | } |