2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
15 #include <asm/setup.h>
19 static unsigned long stack_dump_trace
[STACK_TRACE_ENTRIES
+1] =
20 { [0 ... (STACK_TRACE_ENTRIES
)] = ULONG_MAX
};
21 unsigned stack_trace_index
[STACK_TRACE_ENTRIES
];
24 * Reserve one entry for the passed in ip. This will allow
25 * us to remove most or all of the stack size overhead
26 * added by the stack tracer itself.
28 struct stack_trace stack_trace_max
= {
29 .max_entries
= STACK_TRACE_ENTRIES
- 1,
30 .entries
= &stack_dump_trace
[0],
33 unsigned long stack_trace_max_size
;
34 arch_spinlock_t max_stack_lock
=
35 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
37 static DEFINE_PER_CPU(int, trace_active
);
38 static DEFINE_MUTEX(stack_sysctl_mutex
);
40 int stack_tracer_enabled
;
41 static int last_stack_tracer_enabled
;
43 void stack_trace_print(void)
48 pr_emerg(" Depth Size Location (%d entries)\n"
49 " ----- ---- --------\n",
50 stack_trace_max
.nr_entries
);
52 for (i
= 0; i
< stack_trace_max
.nr_entries
; i
++) {
53 if (stack_dump_trace
[i
] == ULONG_MAX
)
55 if (i
+1 == stack_trace_max
.nr_entries
||
56 stack_dump_trace
[i
+1] == ULONG_MAX
)
57 size
= stack_trace_index
[i
];
59 size
= stack_trace_index
[i
] - stack_trace_index
[i
+1];
61 pr_emerg("%3ld) %8d %5d %pS\n", i
, stack_trace_index
[i
],
62 size
, (void *)stack_dump_trace
[i
]);
67 * When arch-specific code overides this function, the following
68 * data should be filled up, assuming max_stack_lock is held to
69 * prevent concurrent updates.
72 * stack_trace_max_size
75 check_stack(unsigned long ip
, unsigned long *stack
)
77 unsigned long this_size
, flags
; unsigned long *p
, *top
, *start
;
78 static int tracer_frame
;
79 int frame_size
= ACCESS_ONCE(tracer_frame
);
82 this_size
= ((unsigned long)stack
) & (THREAD_SIZE
-1);
83 this_size
= THREAD_SIZE
- this_size
;
84 /* Remove the frame of the tracer */
85 this_size
-= frame_size
;
87 if (this_size
<= stack_trace_max_size
)
90 /* we do not handle interrupt stacks yet */
91 if (!object_is_on_stack(stack
))
94 local_irq_save(flags
);
95 arch_spin_lock(&max_stack_lock
);
97 /* In case another CPU set the tracer_frame on us */
98 if (unlikely(!frame_size
))
99 this_size
-= tracer_frame
;
101 /* a race could have already updated it */
102 if (this_size
<= stack_trace_max_size
)
105 stack_trace_max_size
= this_size
;
107 stack_trace_max
.nr_entries
= 0;
108 stack_trace_max
.skip
= 3;
110 save_stack_trace(&stack_trace_max
);
112 /* Skip over the overhead of the stack tracer itself */
113 for (i
= 0; i
< stack_trace_max
.nr_entries
; i
++) {
114 if (stack_dump_trace
[i
] == ip
)
119 * Now find where in the stack these are.
123 top
= (unsigned long *)
124 (((unsigned long)start
& ~(THREAD_SIZE
-1)) + THREAD_SIZE
);
127 * Loop through all the entries. One of the entries may
128 * for some reason be missed on the stack, so we may
129 * have to account for them. If they are all there, this
130 * loop will only happen once. This code only takes place
131 * on a new max, so it is far from a fast path.
133 while (i
< stack_trace_max
.nr_entries
) {
136 stack_trace_index
[x
] = this_size
;
139 for (; p
< top
&& i
< stack_trace_max
.nr_entries
; p
++) {
140 if (stack_dump_trace
[i
] == ULONG_MAX
)
142 if (*p
== stack_dump_trace
[i
]) {
143 stack_dump_trace
[x
] = stack_dump_trace
[i
++];
144 this_size
= stack_trace_index
[x
++] =
145 (top
- p
) * sizeof(unsigned long);
147 /* Start the search from here */
150 * We do not want to show the overhead
151 * of the stack tracer stack in the
152 * max stack. If we haven't figured
153 * out what that is, then figure it out
156 if (unlikely(!tracer_frame
)) {
157 tracer_frame
= (p
- stack
) *
158 sizeof(unsigned long);
159 stack_trace_max_size
-= tracer_frame
;
168 stack_trace_max
.nr_entries
= x
;
170 stack_dump_trace
[x
] = ULONG_MAX
;
172 if (task_stack_end_corrupted(current
)) {
178 arch_spin_unlock(&max_stack_lock
);
179 local_irq_restore(flags
);
183 stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
184 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
189 preempt_disable_notrace();
191 cpu
= raw_smp_processor_id();
192 /* no atomic needed, we only modify this variable by this cpu */
193 if (per_cpu(trace_active
, cpu
)++ != 0)
196 ip
+= MCOUNT_INSN_SIZE
;
198 check_stack(ip
, &stack
);
201 per_cpu(trace_active
, cpu
)--;
202 /* prevent recursion in schedule */
203 preempt_enable_notrace();
206 static struct ftrace_ops trace_ops __read_mostly
=
208 .func
= stack_trace_call
,
209 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
213 stack_max_size_read(struct file
*filp
, char __user
*ubuf
,
214 size_t count
, loff_t
*ppos
)
216 unsigned long *ptr
= filp
->private_data
;
220 r
= snprintf(buf
, sizeof(buf
), "%ld\n", *ptr
);
223 return simple_read_from_buffer(ubuf
, count
, ppos
, buf
, r
);
227 stack_max_size_write(struct file
*filp
, const char __user
*ubuf
,
228 size_t count
, loff_t
*ppos
)
230 long *ptr
= filp
->private_data
;
231 unsigned long val
, flags
;
235 ret
= kstrtoul_from_user(ubuf
, count
, 10, &val
);
239 local_irq_save(flags
);
242 * In case we trace inside arch_spin_lock() or after (NMI),
243 * we will cause circular lock, so we also need to increase
244 * the percpu trace_active here.
246 cpu
= smp_processor_id();
247 per_cpu(trace_active
, cpu
)++;
249 arch_spin_lock(&max_stack_lock
);
251 arch_spin_unlock(&max_stack_lock
);
253 per_cpu(trace_active
, cpu
)--;
254 local_irq_restore(flags
);
259 static const struct file_operations stack_max_size_fops
= {
260 .open
= tracing_open_generic
,
261 .read
= stack_max_size_read
,
262 .write
= stack_max_size_write
,
263 .llseek
= default_llseek
,
267 __next(struct seq_file
*m
, loff_t
*pos
)
271 if (n
> stack_trace_max
.nr_entries
|| stack_dump_trace
[n
] == ULONG_MAX
)
274 m
->private = (void *)n
;
279 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
282 return __next(m
, pos
);
285 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
291 cpu
= smp_processor_id();
292 per_cpu(trace_active
, cpu
)++;
294 arch_spin_lock(&max_stack_lock
);
297 return SEQ_START_TOKEN
;
299 return __next(m
, pos
);
302 static void t_stop(struct seq_file
*m
, void *p
)
306 arch_spin_unlock(&max_stack_lock
);
308 cpu
= smp_processor_id();
309 per_cpu(trace_active
, cpu
)--;
314 static void trace_lookup_stack(struct seq_file
*m
, long i
)
316 unsigned long addr
= stack_dump_trace
[i
];
318 seq_printf(m
, "%pS\n", (void *)addr
);
321 static void print_disabled(struct seq_file
*m
)
324 "# Stack tracer disabled\n"
326 "# To enable the stack tracer, either add 'stacktrace' to the\n"
327 "# kernel command line\n"
328 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
332 static int t_show(struct seq_file
*m
, void *v
)
337 if (v
== SEQ_START_TOKEN
) {
338 seq_printf(m
, " Depth Size Location"
340 " ----- ---- --------\n",
341 stack_trace_max
.nr_entries
);
343 if (!stack_tracer_enabled
&& !stack_trace_max_size
)
351 if (i
>= stack_trace_max
.nr_entries
||
352 stack_dump_trace
[i
] == ULONG_MAX
)
355 if (i
+1 == stack_trace_max
.nr_entries
||
356 stack_dump_trace
[i
+1] == ULONG_MAX
)
357 size
= stack_trace_index
[i
];
359 size
= stack_trace_index
[i
] - stack_trace_index
[i
+1];
361 seq_printf(m
, "%3ld) %8d %5d ", i
, stack_trace_index
[i
], size
);
363 trace_lookup_stack(m
, i
);
368 static const struct seq_operations stack_trace_seq_ops
= {
375 static int stack_trace_open(struct inode
*inode
, struct file
*file
)
377 return seq_open(file
, &stack_trace_seq_ops
);
380 static const struct file_operations stack_trace_fops
= {
381 .open
= stack_trace_open
,
384 .release
= seq_release
,
388 stack_trace_filter_open(struct inode
*inode
, struct file
*file
)
390 return ftrace_regex_open(&trace_ops
, FTRACE_ITER_FILTER
,
394 static const struct file_operations stack_trace_filter_fops
= {
395 .open
= stack_trace_filter_open
,
397 .write
= ftrace_filter_write
,
398 .llseek
= tracing_lseek
,
399 .release
= ftrace_regex_release
,
403 stack_trace_sysctl(struct ctl_table
*table
, int write
,
404 void __user
*buffer
, size_t *lenp
,
409 mutex_lock(&stack_sysctl_mutex
);
411 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
414 (last_stack_tracer_enabled
== !!stack_tracer_enabled
))
417 last_stack_tracer_enabled
= !!stack_tracer_enabled
;
419 if (stack_tracer_enabled
)
420 register_ftrace_function(&trace_ops
);
422 unregister_ftrace_function(&trace_ops
);
425 mutex_unlock(&stack_sysctl_mutex
);
429 static char stack_trace_filter_buf
[COMMAND_LINE_SIZE
+1] __initdata
;
431 static __init
int enable_stacktrace(char *str
)
433 if (strncmp(str
, "_filter=", 8) == 0)
434 strncpy(stack_trace_filter_buf
, str
+8, COMMAND_LINE_SIZE
);
436 stack_tracer_enabled
= 1;
437 last_stack_tracer_enabled
= 1;
440 __setup("stacktrace", enable_stacktrace
);
442 static __init
int stack_trace_init(void)
444 struct dentry
*d_tracer
;
446 d_tracer
= tracing_init_dentry();
447 if (IS_ERR(d_tracer
))
450 trace_create_file("stack_max_size", 0644, d_tracer
,
451 &stack_trace_max_size
, &stack_max_size_fops
);
453 trace_create_file("stack_trace", 0444, d_tracer
,
454 NULL
, &stack_trace_fops
);
456 trace_create_file("stack_trace_filter", 0444, d_tracer
,
457 NULL
, &stack_trace_filter_fops
);
459 if (stack_trace_filter_buf
[0])
460 ftrace_set_early_filter(&trace_ops
, stack_trace_filter_buf
, 1);
462 if (stack_tracer_enabled
)
463 register_ftrace_function(&trace_ops
);
468 device_initcall(stack_trace_init
);