]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/trace/trace_stack.c
Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[mirror_ubuntu-hirsute-kernel.git] / kernel / trace / trace_stack.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 *
5 */
6 #include <linux/sched/task_stack.h>
7 #include <linux/stacktrace.h>
8 #include <linux/kallsyms.h>
9 #include <linux/seq_file.h>
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/sysctl.h>
15 #include <linux/init.h>
16
17 #include <asm/setup.h>
18
19 #include "trace.h"
20
21 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
22 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
24
25 /*
26 * Reserve one entry for the passed in ip. This will allow
27 * us to remove most or all of the stack size overhead
28 * added by the stack tracer itself.
29 */
30 struct stack_trace stack_trace_max = {
31 .max_entries = STACK_TRACE_ENTRIES - 1,
32 .entries = &stack_dump_trace[0],
33 };
34
35 unsigned long stack_trace_max_size;
36 arch_spinlock_t stack_trace_max_lock =
37 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
38
39 DEFINE_PER_CPU(int, disable_stack_tracer);
40 static DEFINE_MUTEX(stack_sysctl_mutex);
41
42 int stack_tracer_enabled;
43 static int last_stack_tracer_enabled;
44
45 void stack_trace_print(void)
46 {
47 long i;
48 int size;
49
50 pr_emerg(" Depth Size Location (%d entries)\n"
51 " ----- ---- --------\n",
52 stack_trace_max.nr_entries);
53
54 for (i = 0; i < stack_trace_max.nr_entries; i++) {
55 if (stack_dump_trace[i] == ULONG_MAX)
56 break;
57 if (i+1 == stack_trace_max.nr_entries ||
58 stack_dump_trace[i+1] == ULONG_MAX)
59 size = stack_trace_index[i];
60 else
61 size = stack_trace_index[i] - stack_trace_index[i+1];
62
63 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
64 size, (void *)stack_dump_trace[i]);
65 }
66 }
67
68 /*
69 * When arch-specific code overrides this function, the following
70 * data should be filled up, assuming stack_trace_max_lock is held to
71 * prevent concurrent updates.
72 * stack_trace_index[]
73 * stack_trace_max
74 * stack_trace_max_size
75 */
76 void __weak
77 check_stack(unsigned long ip, unsigned long *stack)
78 {
79 unsigned long this_size, flags; unsigned long *p, *top, *start;
80 static int tracer_frame;
81 int frame_size = READ_ONCE(tracer_frame);
82 int i, x;
83
84 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
85 this_size = THREAD_SIZE - this_size;
86 /* Remove the frame of the tracer */
87 this_size -= frame_size;
88
89 if (this_size <= stack_trace_max_size)
90 return;
91
92 /* we do not handle interrupt stacks yet */
93 if (!object_is_on_stack(stack))
94 return;
95
96 /* Can't do this from NMI context (can cause deadlocks) */
97 if (in_nmi())
98 return;
99
100 local_irq_save(flags);
101 arch_spin_lock(&stack_trace_max_lock);
102
103 /* In case another CPU set the tracer_frame on us */
104 if (unlikely(!frame_size))
105 this_size -= tracer_frame;
106
107 /* a race could have already updated it */
108 if (this_size <= stack_trace_max_size)
109 goto out;
110
111 stack_trace_max_size = this_size;
112
113 stack_trace_max.nr_entries = 0;
114 stack_trace_max.skip = 3;
115
116 save_stack_trace(&stack_trace_max);
117
118 /* Skip over the overhead of the stack tracer itself */
119 for (i = 0; i < stack_trace_max.nr_entries; i++) {
120 if (stack_dump_trace[i] == ip)
121 break;
122 }
123
124 /*
125 * Some archs may not have the passed in ip in the dump.
126 * If that happens, we need to show everything.
127 */
128 if (i == stack_trace_max.nr_entries)
129 i = 0;
130
131 /*
132 * Now find where in the stack these are.
133 */
134 x = 0;
135 start = stack;
136 top = (unsigned long *)
137 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
138
139 /*
140 * Loop through all the entries. One of the entries may
141 * for some reason be missed on the stack, so we may
142 * have to account for them. If they are all there, this
143 * loop will only happen once. This code only takes place
144 * on a new max, so it is far from a fast path.
145 */
146 while (i < stack_trace_max.nr_entries) {
147 int found = 0;
148
149 stack_trace_index[x] = this_size;
150 p = start;
151
152 for (; p < top && i < stack_trace_max.nr_entries; p++) {
153 if (stack_dump_trace[i] == ULONG_MAX)
154 break;
155 /*
156 * The READ_ONCE_NOCHECK is used to let KASAN know that
157 * this is not a stack-out-of-bounds error.
158 */
159 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
160 stack_dump_trace[x] = stack_dump_trace[i++];
161 this_size = stack_trace_index[x++] =
162 (top - p) * sizeof(unsigned long);
163 found = 1;
164 /* Start the search from here */
165 start = p + 1;
166 /*
167 * We do not want to show the overhead
168 * of the stack tracer stack in the
169 * max stack. If we haven't figured
170 * out what that is, then figure it out
171 * now.
172 */
173 if (unlikely(!tracer_frame)) {
174 tracer_frame = (p - stack) *
175 sizeof(unsigned long);
176 stack_trace_max_size -= tracer_frame;
177 }
178 }
179 }
180
181 if (!found)
182 i++;
183 }
184
185 stack_trace_max.nr_entries = x;
186 for (; x < i; x++)
187 stack_dump_trace[x] = ULONG_MAX;
188
189 if (task_stack_end_corrupted(current)) {
190 stack_trace_print();
191 BUG();
192 }
193
194 out:
195 arch_spin_unlock(&stack_trace_max_lock);
196 local_irq_restore(flags);
197 }
198
199 static void
200 stack_trace_call(unsigned long ip, unsigned long parent_ip,
201 struct ftrace_ops *op, struct pt_regs *pt_regs)
202 {
203 unsigned long stack;
204
205 preempt_disable_notrace();
206
207 /* no atomic needed, we only modify this variable by this cpu */
208 __this_cpu_inc(disable_stack_tracer);
209 if (__this_cpu_read(disable_stack_tracer) != 1)
210 goto out;
211
212 /* If rcu is not watching, then save stack trace can fail */
213 if (!rcu_is_watching())
214 goto out;
215
216 ip += MCOUNT_INSN_SIZE;
217
218 check_stack(ip, &stack);
219
220 out:
221 __this_cpu_dec(disable_stack_tracer);
222 /* prevent recursion in schedule */
223 preempt_enable_notrace();
224 }
225
226 static struct ftrace_ops trace_ops __read_mostly =
227 {
228 .func = stack_trace_call,
229 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
230 };
231
232 static ssize_t
233 stack_max_size_read(struct file *filp, char __user *ubuf,
234 size_t count, loff_t *ppos)
235 {
236 unsigned long *ptr = filp->private_data;
237 char buf[64];
238 int r;
239
240 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
241 if (r > sizeof(buf))
242 r = sizeof(buf);
243 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
244 }
245
246 static ssize_t
247 stack_max_size_write(struct file *filp, const char __user *ubuf,
248 size_t count, loff_t *ppos)
249 {
250 long *ptr = filp->private_data;
251 unsigned long val, flags;
252 int ret;
253
254 ret = kstrtoul_from_user(ubuf, count, 10, &val);
255 if (ret)
256 return ret;
257
258 local_irq_save(flags);
259
260 /*
261 * In case we trace inside arch_spin_lock() or after (NMI),
262 * we will cause circular lock, so we also need to increase
263 * the percpu disable_stack_tracer here.
264 */
265 __this_cpu_inc(disable_stack_tracer);
266
267 arch_spin_lock(&stack_trace_max_lock);
268 *ptr = val;
269 arch_spin_unlock(&stack_trace_max_lock);
270
271 __this_cpu_dec(disable_stack_tracer);
272 local_irq_restore(flags);
273
274 return count;
275 }
276
277 static const struct file_operations stack_max_size_fops = {
278 .open = tracing_open_generic,
279 .read = stack_max_size_read,
280 .write = stack_max_size_write,
281 .llseek = default_llseek,
282 };
283
284 static void *
285 __next(struct seq_file *m, loff_t *pos)
286 {
287 long n = *pos - 1;
288
289 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
290 return NULL;
291
292 m->private = (void *)n;
293 return &m->private;
294 }
295
296 static void *
297 t_next(struct seq_file *m, void *v, loff_t *pos)
298 {
299 (*pos)++;
300 return __next(m, pos);
301 }
302
303 static void *t_start(struct seq_file *m, loff_t *pos)
304 {
305 local_irq_disable();
306
307 __this_cpu_inc(disable_stack_tracer);
308
309 arch_spin_lock(&stack_trace_max_lock);
310
311 if (*pos == 0)
312 return SEQ_START_TOKEN;
313
314 return __next(m, pos);
315 }
316
317 static void t_stop(struct seq_file *m, void *p)
318 {
319 arch_spin_unlock(&stack_trace_max_lock);
320
321 __this_cpu_dec(disable_stack_tracer);
322
323 local_irq_enable();
324 }
325
326 static void trace_lookup_stack(struct seq_file *m, long i)
327 {
328 unsigned long addr = stack_dump_trace[i];
329
330 seq_printf(m, "%pS\n", (void *)addr);
331 }
332
333 static void print_disabled(struct seq_file *m)
334 {
335 seq_puts(m, "#\n"
336 "# Stack tracer disabled\n"
337 "#\n"
338 "# To enable the stack tracer, either add 'stacktrace' to the\n"
339 "# kernel command line\n"
340 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
341 "#\n");
342 }
343
344 static int t_show(struct seq_file *m, void *v)
345 {
346 long i;
347 int size;
348
349 if (v == SEQ_START_TOKEN) {
350 seq_printf(m, " Depth Size Location"
351 " (%d entries)\n"
352 " ----- ---- --------\n",
353 stack_trace_max.nr_entries);
354
355 if (!stack_tracer_enabled && !stack_trace_max_size)
356 print_disabled(m);
357
358 return 0;
359 }
360
361 i = *(long *)v;
362
363 if (i >= stack_trace_max.nr_entries ||
364 stack_dump_trace[i] == ULONG_MAX)
365 return 0;
366
367 if (i+1 == stack_trace_max.nr_entries ||
368 stack_dump_trace[i+1] == ULONG_MAX)
369 size = stack_trace_index[i];
370 else
371 size = stack_trace_index[i] - stack_trace_index[i+1];
372
373 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
374
375 trace_lookup_stack(m, i);
376
377 return 0;
378 }
379
380 static const struct seq_operations stack_trace_seq_ops = {
381 .start = t_start,
382 .next = t_next,
383 .stop = t_stop,
384 .show = t_show,
385 };
386
387 static int stack_trace_open(struct inode *inode, struct file *file)
388 {
389 return seq_open(file, &stack_trace_seq_ops);
390 }
391
392 static const struct file_operations stack_trace_fops = {
393 .open = stack_trace_open,
394 .read = seq_read,
395 .llseek = seq_lseek,
396 .release = seq_release,
397 };
398
399 #ifdef CONFIG_DYNAMIC_FTRACE
400
401 static int
402 stack_trace_filter_open(struct inode *inode, struct file *file)
403 {
404 struct ftrace_ops *ops = inode->i_private;
405
406 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
407 inode, file);
408 }
409
410 static const struct file_operations stack_trace_filter_fops = {
411 .open = stack_trace_filter_open,
412 .read = seq_read,
413 .write = ftrace_filter_write,
414 .llseek = tracing_lseek,
415 .release = ftrace_regex_release,
416 };
417
418 #endif /* CONFIG_DYNAMIC_FTRACE */
419
420 int
421 stack_trace_sysctl(struct ctl_table *table, int write,
422 void __user *buffer, size_t *lenp,
423 loff_t *ppos)
424 {
425 int ret;
426
427 mutex_lock(&stack_sysctl_mutex);
428
429 ret = proc_dointvec(table, write, buffer, lenp, ppos);
430
431 if (ret || !write ||
432 (last_stack_tracer_enabled == !!stack_tracer_enabled))
433 goto out;
434
435 last_stack_tracer_enabled = !!stack_tracer_enabled;
436
437 if (stack_tracer_enabled)
438 register_ftrace_function(&trace_ops);
439 else
440 unregister_ftrace_function(&trace_ops);
441
442 out:
443 mutex_unlock(&stack_sysctl_mutex);
444 return ret;
445 }
446
447 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
448
449 static __init int enable_stacktrace(char *str)
450 {
451 if (strncmp(str, "_filter=", 8) == 0)
452 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
453
454 stack_tracer_enabled = 1;
455 last_stack_tracer_enabled = 1;
456 return 1;
457 }
458 __setup("stacktrace", enable_stacktrace);
459
460 static __init int stack_trace_init(void)
461 {
462 struct dentry *d_tracer;
463
464 d_tracer = tracing_init_dentry();
465 if (IS_ERR(d_tracer))
466 return 0;
467
468 trace_create_file("stack_max_size", 0644, d_tracer,
469 &stack_trace_max_size, &stack_max_size_fops);
470
471 trace_create_file("stack_trace", 0444, d_tracer,
472 NULL, &stack_trace_fops);
473
474 #ifdef CONFIG_DYNAMIC_FTRACE
475 trace_create_file("stack_trace_filter", 0444, d_tracer,
476 &trace_ops, &stack_trace_filter_fops);
477 #endif
478
479 if (stack_trace_filter_buf[0])
480 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
481
482 if (stack_tracer_enabled)
483 register_ftrace_function(&trace_ops);
484
485 return 0;
486 }
487
488 device_initcall(stack_trace_init);