]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/trace_stack.c
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_...
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_stack.c
1 /*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5 #include <linux/sched/task_stack.h>
6 #include <linux/stacktrace.h>
7 #include <linux/kallsyms.h>
8 #include <linux/seq_file.h>
9 #include <linux/spinlock.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15
16 #include <asm/setup.h>
17
18 #include "trace.h"
19
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
23
24 /*
25 * Reserve one entry for the passed in ip. This will allow
26 * us to remove most or all of the stack size overhead
27 * added by the stack tracer itself.
28 */
29 struct stack_trace stack_trace_max = {
30 .max_entries = STACK_TRACE_ENTRIES - 1,
31 .entries = &stack_dump_trace[0],
32 };
33
34 unsigned long stack_trace_max_size;
35 arch_spinlock_t stack_trace_max_lock =
36 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37
38 DEFINE_PER_CPU(int, disable_stack_tracer);
39 static DEFINE_MUTEX(stack_sysctl_mutex);
40
41 int stack_tracer_enabled;
42 static int last_stack_tracer_enabled;
43
44 void stack_trace_print(void)
45 {
46 long i;
47 int size;
48
49 pr_emerg(" Depth Size Location (%d entries)\n"
50 " ----- ---- --------\n",
51 stack_trace_max.nr_entries);
52
53 for (i = 0; i < stack_trace_max.nr_entries; i++) {
54 if (stack_dump_trace[i] == ULONG_MAX)
55 break;
56 if (i+1 == stack_trace_max.nr_entries ||
57 stack_dump_trace[i+1] == ULONG_MAX)
58 size = stack_trace_index[i];
59 else
60 size = stack_trace_index[i] - stack_trace_index[i+1];
61
62 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
63 size, (void *)stack_dump_trace[i]);
64 }
65 }
66
67 /*
68 * When arch-specific code overrides this function, the following
69 * data should be filled up, assuming stack_trace_max_lock is held to
70 * prevent concurrent updates.
71 * stack_trace_index[]
72 * stack_trace_max
73 * stack_trace_max_size
74 */
75 void __weak
76 check_stack(unsigned long ip, unsigned long *stack)
77 {
78 unsigned long this_size, flags; unsigned long *p, *top, *start;
79 static int tracer_frame;
80 int frame_size = READ_ONCE(tracer_frame);
81 int i, x;
82
83 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
84 this_size = THREAD_SIZE - this_size;
85 /* Remove the frame of the tracer */
86 this_size -= frame_size;
87
88 if (this_size <= stack_trace_max_size)
89 return;
90
91 /* we do not handle interrupt stacks yet */
92 if (!object_is_on_stack(stack))
93 return;
94
95 /* Can't do this from NMI context (can cause deadlocks) */
96 if (in_nmi())
97 return;
98
99 local_irq_save(flags);
100 arch_spin_lock(&stack_trace_max_lock);
101
102 /* In case another CPU set the tracer_frame on us */
103 if (unlikely(!frame_size))
104 this_size -= tracer_frame;
105
106 /* a race could have already updated it */
107 if (this_size <= stack_trace_max_size)
108 goto out;
109
110 stack_trace_max_size = this_size;
111
112 stack_trace_max.nr_entries = 0;
113 stack_trace_max.skip = 3;
114
115 save_stack_trace(&stack_trace_max);
116
117 /* Skip over the overhead of the stack tracer itself */
118 for (i = 0; i < stack_trace_max.nr_entries; i++) {
119 if (stack_dump_trace[i] == ip)
120 break;
121 }
122
123 /*
124 * Some archs may not have the passed in ip in the dump.
125 * If that happens, we need to show everything.
126 */
127 if (i == stack_trace_max.nr_entries)
128 i = 0;
129
130 /*
131 * Now find where in the stack these are.
132 */
133 x = 0;
134 start = stack;
135 top = (unsigned long *)
136 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
137
138 /*
139 * Loop through all the entries. One of the entries may
140 * for some reason be missed on the stack, so we may
141 * have to account for them. If they are all there, this
142 * loop will only happen once. This code only takes place
143 * on a new max, so it is far from a fast path.
144 */
145 while (i < stack_trace_max.nr_entries) {
146 int found = 0;
147
148 stack_trace_index[x] = this_size;
149 p = start;
150
151 for (; p < top && i < stack_trace_max.nr_entries; p++) {
152 if (stack_dump_trace[i] == ULONG_MAX)
153 break;
154 /*
155 * The READ_ONCE_NOCHECK is used to let KASAN know that
156 * this is not a stack-out-of-bounds error.
157 */
158 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
159 stack_dump_trace[x] = stack_dump_trace[i++];
160 this_size = stack_trace_index[x++] =
161 (top - p) * sizeof(unsigned long);
162 found = 1;
163 /* Start the search from here */
164 start = p + 1;
165 /*
166 * We do not want to show the overhead
167 * of the stack tracer stack in the
168 * max stack. If we haven't figured
169 * out what that is, then figure it out
170 * now.
171 */
172 if (unlikely(!tracer_frame)) {
173 tracer_frame = (p - stack) *
174 sizeof(unsigned long);
175 stack_trace_max_size -= tracer_frame;
176 }
177 }
178 }
179
180 if (!found)
181 i++;
182 }
183
184 stack_trace_max.nr_entries = x;
185 for (; x < i; x++)
186 stack_dump_trace[x] = ULONG_MAX;
187
188 if (task_stack_end_corrupted(current)) {
189 stack_trace_print();
190 BUG();
191 }
192
193 out:
194 arch_spin_unlock(&stack_trace_max_lock);
195 local_irq_restore(flags);
196 }
197
198 static void
199 stack_trace_call(unsigned long ip, unsigned long parent_ip,
200 struct ftrace_ops *op, struct pt_regs *pt_regs)
201 {
202 unsigned long stack;
203
204 preempt_disable_notrace();
205
206 /* no atomic needed, we only modify this variable by this cpu */
207 __this_cpu_inc(disable_stack_tracer);
208 if (__this_cpu_read(disable_stack_tracer) != 1)
209 goto out;
210
211 ip += MCOUNT_INSN_SIZE;
212
213 check_stack(ip, &stack);
214
215 out:
216 __this_cpu_dec(disable_stack_tracer);
217 /* prevent recursion in schedule */
218 preempt_enable_notrace();
219 }
220
221 static struct ftrace_ops trace_ops __read_mostly =
222 {
223 .func = stack_trace_call,
224 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
225 };
226
227 static ssize_t
228 stack_max_size_read(struct file *filp, char __user *ubuf,
229 size_t count, loff_t *ppos)
230 {
231 unsigned long *ptr = filp->private_data;
232 char buf[64];
233 int r;
234
235 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
236 if (r > sizeof(buf))
237 r = sizeof(buf);
238 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
239 }
240
241 static ssize_t
242 stack_max_size_write(struct file *filp, const char __user *ubuf,
243 size_t count, loff_t *ppos)
244 {
245 long *ptr = filp->private_data;
246 unsigned long val, flags;
247 int ret;
248
249 ret = kstrtoul_from_user(ubuf, count, 10, &val);
250 if (ret)
251 return ret;
252
253 local_irq_save(flags);
254
255 /*
256 * In case we trace inside arch_spin_lock() or after (NMI),
257 * we will cause circular lock, so we also need to increase
258 * the percpu disable_stack_tracer here.
259 */
260 __this_cpu_inc(disable_stack_tracer);
261
262 arch_spin_lock(&stack_trace_max_lock);
263 *ptr = val;
264 arch_spin_unlock(&stack_trace_max_lock);
265
266 __this_cpu_dec(disable_stack_tracer);
267 local_irq_restore(flags);
268
269 return count;
270 }
271
272 static const struct file_operations stack_max_size_fops = {
273 .open = tracing_open_generic,
274 .read = stack_max_size_read,
275 .write = stack_max_size_write,
276 .llseek = default_llseek,
277 };
278
279 static void *
280 __next(struct seq_file *m, loff_t *pos)
281 {
282 long n = *pos - 1;
283
284 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
285 return NULL;
286
287 m->private = (void *)n;
288 return &m->private;
289 }
290
291 static void *
292 t_next(struct seq_file *m, void *v, loff_t *pos)
293 {
294 (*pos)++;
295 return __next(m, pos);
296 }
297
298 static void *t_start(struct seq_file *m, loff_t *pos)
299 {
300 local_irq_disable();
301
302 __this_cpu_inc(disable_stack_tracer);
303
304 arch_spin_lock(&stack_trace_max_lock);
305
306 if (*pos == 0)
307 return SEQ_START_TOKEN;
308
309 return __next(m, pos);
310 }
311
312 static void t_stop(struct seq_file *m, void *p)
313 {
314 arch_spin_unlock(&stack_trace_max_lock);
315
316 __this_cpu_dec(disable_stack_tracer);
317
318 local_irq_enable();
319 }
320
321 static void trace_lookup_stack(struct seq_file *m, long i)
322 {
323 unsigned long addr = stack_dump_trace[i];
324
325 seq_printf(m, "%pS\n", (void *)addr);
326 }
327
328 static void print_disabled(struct seq_file *m)
329 {
330 seq_puts(m, "#\n"
331 "# Stack tracer disabled\n"
332 "#\n"
333 "# To enable the stack tracer, either add 'stacktrace' to the\n"
334 "# kernel command line\n"
335 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
336 "#\n");
337 }
338
339 static int t_show(struct seq_file *m, void *v)
340 {
341 long i;
342 int size;
343
344 if (v == SEQ_START_TOKEN) {
345 seq_printf(m, " Depth Size Location"
346 " (%d entries)\n"
347 " ----- ---- --------\n",
348 stack_trace_max.nr_entries);
349
350 if (!stack_tracer_enabled && !stack_trace_max_size)
351 print_disabled(m);
352
353 return 0;
354 }
355
356 i = *(long *)v;
357
358 if (i >= stack_trace_max.nr_entries ||
359 stack_dump_trace[i] == ULONG_MAX)
360 return 0;
361
362 if (i+1 == stack_trace_max.nr_entries ||
363 stack_dump_trace[i+1] == ULONG_MAX)
364 size = stack_trace_index[i];
365 else
366 size = stack_trace_index[i] - stack_trace_index[i+1];
367
368 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
369
370 trace_lookup_stack(m, i);
371
372 return 0;
373 }
374
375 static const struct seq_operations stack_trace_seq_ops = {
376 .start = t_start,
377 .next = t_next,
378 .stop = t_stop,
379 .show = t_show,
380 };
381
382 static int stack_trace_open(struct inode *inode, struct file *file)
383 {
384 return seq_open(file, &stack_trace_seq_ops);
385 }
386
387 static const struct file_operations stack_trace_fops = {
388 .open = stack_trace_open,
389 .read = seq_read,
390 .llseek = seq_lseek,
391 .release = seq_release,
392 };
393
394 #ifdef CONFIG_DYNAMIC_FTRACE
395
396 static int
397 stack_trace_filter_open(struct inode *inode, struct file *file)
398 {
399 struct ftrace_ops *ops = inode->i_private;
400
401 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
402 inode, file);
403 }
404
405 static const struct file_operations stack_trace_filter_fops = {
406 .open = stack_trace_filter_open,
407 .read = seq_read,
408 .write = ftrace_filter_write,
409 .llseek = tracing_lseek,
410 .release = ftrace_regex_release,
411 };
412
413 #endif /* CONFIG_DYNAMIC_FTRACE */
414
415 int
416 stack_trace_sysctl(struct ctl_table *table, int write,
417 void __user *buffer, size_t *lenp,
418 loff_t *ppos)
419 {
420 int ret;
421
422 mutex_lock(&stack_sysctl_mutex);
423
424 ret = proc_dointvec(table, write, buffer, lenp, ppos);
425
426 if (ret || !write ||
427 (last_stack_tracer_enabled == !!stack_tracer_enabled))
428 goto out;
429
430 last_stack_tracer_enabled = !!stack_tracer_enabled;
431
432 if (stack_tracer_enabled)
433 register_ftrace_function(&trace_ops);
434 else
435 unregister_ftrace_function(&trace_ops);
436
437 out:
438 mutex_unlock(&stack_sysctl_mutex);
439 return ret;
440 }
441
442 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
443
444 static __init int enable_stacktrace(char *str)
445 {
446 if (strncmp(str, "_filter=", 8) == 0)
447 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
448
449 stack_tracer_enabled = 1;
450 last_stack_tracer_enabled = 1;
451 return 1;
452 }
453 __setup("stacktrace", enable_stacktrace);
454
455 static __init int stack_trace_init(void)
456 {
457 struct dentry *d_tracer;
458
459 d_tracer = tracing_init_dentry();
460 if (IS_ERR(d_tracer))
461 return 0;
462
463 trace_create_file("stack_max_size", 0644, d_tracer,
464 &stack_trace_max_size, &stack_max_size_fops);
465
466 trace_create_file("stack_trace", 0444, d_tracer,
467 NULL, &stack_trace_fops);
468
469 #ifdef CONFIG_DYNAMIC_FTRACE
470 trace_create_file("stack_trace_filter", 0444, d_tracer,
471 &trace_ops, &stack_trace_filter_fops);
472 #endif
473
474 if (stack_trace_filter_buf[0])
475 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
476
477 if (stack_tracer_enabled)
478 register_ftrace_function(&trace_ops);
479
480 return 0;
481 }
482
483 device_initcall(stack_trace_init);