2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/ftrace.h>
14 #include <linux/kexec.h>
15 #include <linux/bug.h>
16 #include <linux/nmi.h>
17 #include <linux/sysfs.h>
19 #include <asm/stacktrace.h>
22 int panic_on_unrecovered_nmi
;
24 unsigned int code_bytes
= 64;
25 int kstack_depth_to_print
= 3 * STACKSLOTS_PER_LINE
;
26 static int die_counter
;
28 void printk_address(unsigned long address
, int reliable
)
30 printk(" [<%p>] %s%pS\n", (void *) address
,
31 reliable
? "" : "? ", (void *) address
);
34 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
36 print_ftrace_graph_addr(unsigned long addr
, void *data
,
37 const struct stacktrace_ops
*ops
,
38 struct thread_info
*tinfo
, int *graph
)
40 struct task_struct
*task
= tinfo
->task
;
41 unsigned long ret_addr
;
42 int index
= task
->curr_ret_stack
;
44 if (addr
!= (unsigned long)return_to_handler
)
47 if (!task
->ret_stack
|| index
< *graph
)
51 ret_addr
= task
->ret_stack
[index
].ret
;
53 ops
->address(data
, ret_addr
, 1);
59 print_ftrace_graph_addr(unsigned long addr
, void *data
,
60 const struct stacktrace_ops
*ops
,
61 struct thread_info
*tinfo
, int *graph
)
66 * x86-64 can have up to three kernel stacks:
69 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
72 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
73 void *p
, unsigned int size
, void *end
)
77 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
82 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
86 print_context_stack(struct thread_info
*tinfo
,
87 unsigned long *stack
, unsigned long bp
,
88 const struct stacktrace_ops
*ops
, void *data
,
89 unsigned long *end
, int *graph
)
91 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
93 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
97 if (__kernel_text_address(addr
)) {
98 if ((unsigned long) stack
== bp
+ sizeof(long)) {
99 ops
->address(data
, addr
, 1);
100 frame
= frame
->next_frame
;
101 bp
= (unsigned long) frame
;
103 ops
->address(data
, addr
, 0);
105 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
111 EXPORT_SYMBOL_GPL(print_context_stack
);
114 print_context_stack_bp(struct thread_info
*tinfo
,
115 unsigned long *stack
, unsigned long bp
,
116 const struct stacktrace_ops
*ops
, void *data
,
117 unsigned long *end
, int *graph
)
119 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
120 unsigned long *ret_addr
= &frame
->return_address
;
122 while (valid_stack_ptr(tinfo
, ret_addr
, sizeof(*ret_addr
), end
)) {
123 unsigned long addr
= *ret_addr
;
125 if (!__kernel_text_address(addr
))
128 ops
->address(data
, addr
, 1);
129 frame
= frame
->next_frame
;
130 ret_addr
= &frame
->return_address
;
131 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
134 return (unsigned long)frame
;
136 EXPORT_SYMBOL_GPL(print_context_stack_bp
);
140 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
143 print_symbol(msg
, symbol
);
147 static void print_trace_warning(void *data
, char *msg
)
149 printk("%s%s\n", (char *)data
, msg
);
152 static int print_trace_stack(void *data
, char *name
)
154 printk("%s <%s> ", (char *)data
, name
);
159 * Print one address/symbol entries per line.
161 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
163 touch_nmi_watchdog();
165 printk_address(addr
, reliable
);
168 static const struct stacktrace_ops print_trace_ops
= {
169 .warning
= print_trace_warning
,
170 .warning_symbol
= print_trace_warning_symbol
,
171 .stack
= print_trace_stack
,
172 .address
= print_trace_address
,
173 .walk_stack
= print_context_stack
,
177 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
178 unsigned long *stack
, char *log_lvl
)
180 printk("%sCall Trace:\n", log_lvl
);
181 dump_trace(task
, regs
, stack
, &print_trace_ops
, log_lvl
);
184 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
185 unsigned long *stack
)
187 show_trace_log_lvl(task
, regs
, stack
, "");
190 void show_stack(struct task_struct
*task
, unsigned long *sp
)
192 show_stack_log_lvl(task
, NULL
, sp
, "");
196 * The architecture-independent dump_stack generator
198 void dump_stack(void)
202 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
203 current
->pid
, current
->comm
, print_tainted(),
204 init_utsname()->release
,
205 (int)strcspn(init_utsname()->version
, " "),
206 init_utsname()->version
);
207 show_trace(NULL
, NULL
, &stack
);
209 EXPORT_SYMBOL(dump_stack
);
211 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
212 static int die_owner
= -1;
213 static unsigned int die_nest_count
;
215 unsigned __kprobes
long oops_begin(void)
222 /* racy, but better than risking deadlock. */
223 raw_local_irq_save(flags
);
224 cpu
= smp_processor_id();
225 if (!arch_spin_trylock(&die_lock
)) {
226 if (cpu
== die_owner
)
227 /* nested oops. should stop eventually */;
229 arch_spin_lock(&die_lock
);
237 EXPORT_SYMBOL_GPL(oops_begin
);
239 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
241 if (regs
&& kexec_should_crash(current
))
246 add_taint(TAINT_DIE
);
249 /* Nest count reaches zero, release the lock. */
250 arch_spin_unlock(&die_lock
);
251 raw_local_irq_restore(flags
);
257 panic("Fatal exception in interrupt");
259 panic("Fatal exception");
263 int __kprobes
__die(const char *str
, struct pt_regs
*regs
, long err
)
269 printk(KERN_EMERG
"%s: %04lx [#%d] ", str
, err
& 0xffff, ++die_counter
);
270 #ifdef CONFIG_PREEMPT
276 #ifdef CONFIG_DEBUG_PAGEALLOC
277 printk("DEBUG_PAGEALLOC");
280 sysfs_printk_last_file();
281 if (notify_die(DIE_OOPS
, str
, regs
, err
,
282 current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
285 show_registers(regs
);
287 if (user_mode_vm(regs
)) {
289 ss
= regs
->ss
& 0xffff;
291 sp
= kernel_stack_pointer(regs
);
294 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
295 print_symbol("%s", regs
->ip
);
296 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
298 /* Executive summary in case the oops scrolled away */
299 printk(KERN_ALERT
"RIP ");
300 printk_address(regs
->ip
, 1);
301 printk(" RSP <%016lx>\n", regs
->sp
);
307 * This is gone through when something in the kernel has done something bad
308 * and is about to be terminated:
310 void die(const char *str
, struct pt_regs
*regs
, long err
)
312 unsigned long flags
= oops_begin();
315 if (!user_mode_vm(regs
))
316 report_bug(regs
->ip
, regs
);
318 if (__die(str
, regs
, err
))
320 oops_end(flags
, regs
, sig
);
323 void notrace __kprobes
324 die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
328 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) == NOTIFY_STOP
)
332 * We are in trouble anyway, lets at least try
333 * to get a message out.
335 flags
= oops_begin();
336 printk(KERN_EMERG
"%s", str
);
337 printk(" on CPU%d, ip %08lx, registers:\n",
338 smp_processor_id(), regs
->ip
);
339 show_registers(regs
);
340 oops_end(flags
, regs
, 0);
341 if (do_panic
|| panic_on_oops
)
342 panic("Non maskable interrupt");
348 static int __init
oops_setup(char *s
)
352 if (!strcmp(s
, "panic"))
356 early_param("oops", oops_setup
);
358 static int __init
kstack_setup(char *s
)
362 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
365 early_param("kstack", kstack_setup
);
367 static int __init
code_bytes_setup(char *s
)
369 code_bytes
= simple_strtoul(s
, NULL
, 0);
370 if (code_bytes
> 8192)
375 __setup("code_bytes=", code_bytes_setup
);