]>
Commit | Line | Data |
---|---|---|
21b32bbf | 1 | /* |
21b32bbf IM |
2 | * Stack trace management functions |
3 | * | |
8f47e163 | 4 | * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
21b32bbf IM |
5 | */ |
6 | #include <linux/sched.h> | |
b17b0153 | 7 | #include <linux/sched/debug.h> |
68db0cf1 | 8 | #include <linux/sched/task_stack.h> |
21b32bbf | 9 | #include <linux/stacktrace.h> |
186f4360 | 10 | #include <linux/export.h> |
02b67518 | 11 | #include <linux/uaccess.h> |
c0b766f1 | 12 | #include <asm/stacktrace.h> |
49a612c6 | 13 | #include <asm/unwind.h> |
21b32bbf | 14 | |
49a612c6 JP |
15 | static int save_stack_address(struct stack_trace *trace, unsigned long addr, |
16 | bool nosched) | |
21b32bbf | 17 | { |
018378c5 | 18 | if (nosched && in_sched_functions(addr)) |
568b329a | 19 | return 0; |
49a612c6 | 20 | |
c0b766f1 AK |
21 | if (trace->skip > 0) { |
22 | trace->skip--; | |
568b329a | 23 | return 0; |
21b32bbf | 24 | } |
21b32bbf | 25 | |
49a612c6 JP |
26 | if (trace->nr_entries >= trace->max_entries) |
27 | return -1; | |
28 | ||
29 | trace->entries[trace->nr_entries++] = addr; | |
30 | return 0; | |
018378c5 ON |
31 | } |
32 | ||
49a612c6 JP |
33 | static void __save_stack_trace(struct stack_trace *trace, |
34 | struct task_struct *task, struct pt_regs *regs, | |
35 | bool nosched) | |
9745512c | 36 | { |
49a612c6 JP |
37 | struct unwind_state state; |
38 | unsigned long addr; | |
9745512c | 39 | |
49a612c6 JP |
40 | if (regs) |
41 | save_stack_address(trace, regs->ip, nosched); | |
21b32bbf | 42 | |
49a612c6 JP |
43 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
44 | unwind_next_frame(&state)) { | |
45 | addr = unwind_get_return_address(&state); | |
46 | if (!addr || save_stack_address(trace, addr, nosched)) | |
47 | break; | |
48 | } | |
49 | ||
50 | if (trace->nr_entries < trace->max_entries) | |
51 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
52 | } | |
9745512c | 53 | |
21b32bbf IM |
54 | /* |
55 | * Save stack-backtrace addresses into a stack_trace buffer. | |
21b32bbf | 56 | */ |
ab1b6f03 | 57 | void save_stack_trace(struct stack_trace *trace) |
21b32bbf | 58 | { |
49a612c6 | 59 | __save_stack_trace(trace, current, NULL, false); |
21b32bbf | 60 | } |
8594698e | 61 | EXPORT_SYMBOL_GPL(save_stack_trace); |
9745512c | 62 | |
39581062 | 63 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
acc6be54 | 64 | { |
49a612c6 | 65 | __save_stack_trace(trace, current, regs, false); |
acc6be54 VN |
66 | } |
67 | ||
9745512c AV |
68 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
69 | { | |
1959a601 AL |
70 | if (!try_get_task_stack(tsk)) |
71 | return; | |
72 | ||
49a612c6 | 73 | __save_stack_trace(trace, tsk, NULL, true); |
1959a601 AL |
74 | |
75 | put_task_stack(tsk); | |
9745512c | 76 | } |
8594698e | 77 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
02b67518 | 78 | |
af085d90 JP |
79 | #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE |
80 | ||
81 | #define STACKTRACE_DUMP_ONCE(task) ({ \ | |
82 | static bool __section(.data.unlikely) __dumped; \ | |
83 | \ | |
84 | if (!__dumped) { \ | |
85 | __dumped = true; \ | |
86 | WARN_ON(1); \ | |
87 | show_stack(task, NULL); \ | |
88 | } \ | |
89 | }) | |
90 | ||
91 | static int __save_stack_trace_reliable(struct stack_trace *trace, | |
92 | struct task_struct *task) | |
93 | { | |
94 | struct unwind_state state; | |
95 | struct pt_regs *regs; | |
96 | unsigned long addr; | |
97 | ||
98 | for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); | |
99 | unwind_next_frame(&state)) { | |
100 | ||
101 | regs = unwind_get_entry_regs(&state); | |
102 | if (regs) { | |
103 | /* | |
104 | * Kernel mode registers on the stack indicate an | |
105 | * in-kernel interrupt or exception (e.g., preemption | |
106 | * or a page fault), which can make frame pointers | |
107 | * unreliable. | |
108 | */ | |
109 | if (!user_mode(regs)) | |
110 | return -EINVAL; | |
111 | ||
112 | /* | |
113 | * The last frame contains the user mode syscall | |
114 | * pt_regs. Skip it and finish the unwind. | |
115 | */ | |
116 | unwind_next_frame(&state); | |
117 | if (!unwind_done(&state)) { | |
118 | STACKTRACE_DUMP_ONCE(task); | |
119 | return -EINVAL; | |
120 | } | |
121 | break; | |
122 | } | |
123 | ||
124 | addr = unwind_get_return_address(&state); | |
125 | ||
126 | /* | |
127 | * A NULL or invalid return address probably means there's some | |
128 | * generated code which __kernel_text_address() doesn't know | |
129 | * about. | |
130 | */ | |
131 | if (!addr) { | |
132 | STACKTRACE_DUMP_ONCE(task); | |
133 | return -EINVAL; | |
134 | } | |
135 | ||
136 | if (save_stack_address(trace, addr, false)) | |
137 | return -EINVAL; | |
138 | } | |
139 | ||
140 | /* Check for stack corruption */ | |
141 | if (unwind_error(&state)) { | |
142 | STACKTRACE_DUMP_ONCE(task); | |
143 | return -EINVAL; | |
144 | } | |
145 | ||
146 | if (trace->nr_entries < trace->max_entries) | |
147 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
148 | ||
149 | return 0; | |
150 | } | |
151 | ||
152 | /* | |
153 | * This function returns an error if it detects any unreliable features of the | |
154 | * stack. Otherwise it guarantees that the stack trace is reliable. | |
155 | * | |
156 | * If the task is not 'current', the caller *must* ensure the task is inactive. | |
157 | */ | |
158 | int save_stack_trace_tsk_reliable(struct task_struct *tsk, | |
159 | struct stack_trace *trace) | |
160 | { | |
161 | int ret; | |
162 | ||
163 | if (!try_get_task_stack(tsk)) | |
164 | return -EINVAL; | |
165 | ||
166 | ret = __save_stack_trace_reliable(trace, tsk); | |
167 | ||
168 | put_task_stack(tsk); | |
169 | ||
170 | return ret; | |
171 | } | |
172 | #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ | |
173 | ||
02b67518 TE |
174 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ |
175 | ||
c9cf4dbb | 176 | struct stack_frame_user { |
02b67518 | 177 | const void __user *next_fp; |
8d7c6a96 | 178 | unsigned long ret_addr; |
02b67518 TE |
179 | }; |
180 | ||
c9cf4dbb FW |
181 | static int |
182 | copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) | |
02b67518 TE |
183 | { |
184 | int ret; | |
185 | ||
186 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | |
187 | return 0; | |
188 | ||
189 | ret = 1; | |
190 | pagefault_disable(); | |
191 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | |
192 | ret = 0; | |
193 | pagefault_enable(); | |
194 | ||
195 | return ret; | |
196 | } | |
197 | ||
8d7c6a96 TE |
198 | static inline void __save_stack_trace_user(struct stack_trace *trace) |
199 | { | |
200 | const struct pt_regs *regs = task_pt_regs(current); | |
201 | const void __user *fp = (const void __user *)regs->bp; | |
202 | ||
203 | if (trace->nr_entries < trace->max_entries) | |
204 | trace->entries[trace->nr_entries++] = regs->ip; | |
205 | ||
206 | while (trace->nr_entries < trace->max_entries) { | |
c9cf4dbb | 207 | struct stack_frame_user frame; |
8d7c6a96 TE |
208 | |
209 | frame.next_fp = NULL; | |
210 | frame.ret_addr = 0; | |
211 | if (!copy_stack_frame(fp, &frame)) | |
212 | break; | |
213 | if ((unsigned long)fp < regs->sp) | |
214 | break; | |
215 | if (frame.ret_addr) { | |
216 | trace->entries[trace->nr_entries++] = | |
217 | frame.ret_addr; | |
218 | } | |
219 | if (fp == frame.next_fp) | |
220 | break; | |
221 | fp = frame.next_fp; | |
222 | } | |
223 | } | |
224 | ||
02b67518 TE |
225 | void save_stack_trace_user(struct stack_trace *trace) |
226 | { | |
227 | /* | |
228 | * Trace user stack if we are not a kernel thread | |
229 | */ | |
230 | if (current->mm) { | |
8d7c6a96 | 231 | __save_stack_trace_user(trace); |
02b67518 TE |
232 | } |
233 | if (trace->nr_entries < trace->max_entries) | |
234 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
235 | } |