]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/kernel/dumpstack.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kernel / dumpstack.c
1 /*
2 * Stack dumping functions
3 *
4 * Copyright IBM Corp. 1999, 2013
5 */
6
7 #include <linux/kallsyms.h>
8 #include <linux/hardirq.h>
9 #include <linux/kprobes.h>
10 #include <linux/utsname.h>
11 #include <linux/export.h>
12 #include <linux/kdebug.h>
13 #include <linux/ptrace.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <asm/processor.h>
19 #include <asm/debug.h>
20 #include <asm/dis.h>
21 #include <asm/ipl.h>
22
23 /*
24 * For dump_trace we have tree different stack to consider:
25 * - the panic stack which is used if the kernel stack has overflown
26 * - the asynchronous interrupt stack (cpu related)
27 * - the synchronous kernel stack (process related)
28 * The stack trace can start at any of the three stacks and can potentially
29 * touch all of them. The order is: panic stack, async stack, sync stack.
30 */
31 static unsigned long
32 __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
33 unsigned long low, unsigned long high)
34 {
35 struct stack_frame *sf;
36 struct pt_regs *regs;
37
38 while (1) {
39 if (sp < low || sp > high - sizeof(*sf))
40 return sp;
41 sf = (struct stack_frame *) sp;
42 if (func(data, sf->gprs[8], 0))
43 return sp;
44 /* Follow the backchain. */
45 while (1) {
46 low = sp;
47 sp = sf->back_chain;
48 if (!sp)
49 break;
50 if (sp <= low || sp > high - sizeof(*sf))
51 return sp;
52 sf = (struct stack_frame *) sp;
53 if (func(data, sf->gprs[8], 1))
54 return sp;
55 }
56 /* Zero backchain detected, check for interrupt frame. */
57 sp = (unsigned long) (sf + 1);
58 if (sp <= low || sp > high - sizeof(*regs))
59 return sp;
60 regs = (struct pt_regs *) sp;
61 if (!user_mode(regs)) {
62 if (func(data, regs->psw.addr, 1))
63 return sp;
64 }
65 low = sp;
66 sp = regs->gprs[15];
67 }
68 }
69
70 void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
71 unsigned long sp)
72 {
73 unsigned long frame_size;
74
75 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
76 #ifdef CONFIG_CHECK_STACK
77 sp = __dump_trace(func, data, sp,
78 S390_lowcore.panic_stack + frame_size - 4096,
79 S390_lowcore.panic_stack + frame_size);
80 #endif
81 sp = __dump_trace(func, data, sp,
82 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
83 S390_lowcore.async_stack + frame_size);
84 task = task ?: current;
85 __dump_trace(func, data, sp,
86 (unsigned long)task_stack_page(task),
87 (unsigned long)task_stack_page(task) + THREAD_SIZE);
88 }
89 EXPORT_SYMBOL_GPL(dump_trace);
90
91 static int show_address(void *data, unsigned long address, int reliable)
92 {
93 if (reliable)
94 printk(" [<%016lx>] %pSR \n", address, (void *)address);
95 else
96 printk("([<%016lx>] %pSR)\n", address, (void *)address);
97 return 0;
98 }
99
100 static void show_trace(struct task_struct *task, unsigned long sp)
101 {
102 if (!sp)
103 sp = task ? task->thread.ksp : current_stack_pointer();
104 printk("Call Trace:\n");
105 dump_trace(show_address, NULL, task, sp);
106 if (!task)
107 task = current;
108 debug_show_held_locks(task);
109 }
110
111 void show_stack(struct task_struct *task, unsigned long *sp)
112 {
113 unsigned long *stack;
114 int i;
115
116 stack = sp;
117 if (!stack) {
118 if (!task)
119 stack = (unsigned long *)current_stack_pointer();
120 else
121 stack = (unsigned long *)task->thread.ksp;
122 }
123 printk(KERN_DEFAULT "Stack:\n");
124 for (i = 0; i < 20; i++) {
125 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
126 break;
127 if (i % 4 == 0)
128 printk(KERN_DEFAULT " ");
129 pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
130 }
131 show_trace(task, (unsigned long)sp);
132 }
133
134 static void show_last_breaking_event(struct pt_regs *regs)
135 {
136 printk("Last Breaking-Event-Address:\n");
137 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
138 }
139
140 void show_registers(struct pt_regs *regs)
141 {
142 struct psw_bits *psw = &psw_bits(regs->psw);
143 char *mode;
144
145 mode = user_mode(regs) ? "User" : "Krnl";
146 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
147 if (!user_mode(regs))
148 pr_cont(" (%pSR)", (void *)regs->psw.addr);
149 pr_cont("\n");
150 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
151 "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
152 psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
153 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
154 printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
155 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
156 printk(" %016lx %016lx %016lx %016lx\n",
157 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
158 printk(" %016lx %016lx %016lx %016lx\n",
159 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
160 printk(" %016lx %016lx %016lx %016lx\n",
161 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
162 show_code(regs);
163 }
164
165 void show_regs(struct pt_regs *regs)
166 {
167 show_regs_print_info(KERN_DEFAULT);
168 show_registers(regs);
169 /* Show stack backtrace if pt_regs is from kernel mode */
170 if (!user_mode(regs))
171 show_trace(NULL, regs->gprs[15]);
172 show_last_breaking_event(regs);
173 }
174
175 static DEFINE_SPINLOCK(die_lock);
176
177 void die(struct pt_regs *regs, const char *str)
178 {
179 static int die_counter;
180
181 oops_enter();
182 lgr_info_log();
183 debug_stop_all();
184 console_verbose();
185 spin_lock_irq(&die_lock);
186 bust_spinlocks(1);
187 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
188 regs->int_code >> 17, ++die_counter);
189 #ifdef CONFIG_PREEMPT
190 pr_cont("PREEMPT ");
191 #endif
192 #ifdef CONFIG_SMP
193 pr_cont("SMP ");
194 #endif
195 if (debug_pagealloc_enabled())
196 pr_cont("DEBUG_PAGEALLOC");
197 pr_cont("\n");
198 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
199 print_modules();
200 show_regs(regs);
201 bust_spinlocks(0);
202 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
203 spin_unlock_irq(&die_lock);
204 if (in_interrupt())
205 panic("Fatal exception in interrupt");
206 if (panic_on_oops)
207 panic("Fatal exception: panic_on_oops");
208 oops_exit();
209 do_exit(SIGSEGV);
210 }