]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/trace/trace_functions.c
ftrace: Consolidate arch dependent functions with 'list' function
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_functions.c
CommitLineData
1b29b018
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
23b4ff3a 12#include <linux/ring_buffer.h>
1b29b018
SR
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
2e0f5761 16#include <linux/fs.h>
1b29b018
SR
17
18#include "trace.h"
19
a225cdd2
SR
20/* function tracing enabled */
21static int ftrace_function_enabled;
22
53614991
SR
23static struct trace_array *func_trace;
24
a225cdd2
SR
25static void tracing_start_function_trace(void);
26static void tracing_stop_function_trace(void);
27
b6f11df2 28static int function_trace_init(struct trace_array *tr)
1b29b018 29{
bb3c3c95 30 func_trace = tr;
26bc83f4 31 tr->cpu = get_cpu();
26bc83f4
SR
32 put_cpu();
33
41bc8144 34 tracing_start_cmdline_record();
1b29b018 35 tracing_start_function_trace();
1c80025a 36 return 0;
1b29b018
SR
37}
38
e309b41d 39static void function_trace_reset(struct trace_array *tr)
1b29b018 40{
b6f11df2
ACM
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
1b29b018
SR
43}
44
9036990d
SR
45static void function_trace_start(struct trace_array *tr)
46{
213cc060 47 tracing_reset_online_cpus(tr);
9036990d
SR
48}
49
bb3c3c95 50static void
2f5f6ad9
SR
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op)
bb3c3c95
SR
53{
54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data;
56 unsigned long flags;
57 long disabled;
5168ae50 58 int cpu;
bb3c3c95
SR
59 int pc;
60
61 if (unlikely(!ftrace_function_enabled))
62 return;
63
64 pc = preempt_count();
5168ae50 65 preempt_disable_notrace();
bb3c3c95
SR
66 local_save_flags(flags);
67 cpu = raw_smp_processor_id();
68 data = tr->data[cpu];
69 disabled = atomic_inc_return(&data->disabled);
70
71 if (likely(disabled == 1))
7be42151 72 trace_function(tr, ip, parent_ip, flags, pc);
bb3c3c95
SR
73
74 atomic_dec(&data->disabled);
5168ae50 75 preempt_enable_notrace();
bb3c3c95
SR
76}
77
78static void
2f5f6ad9
SR
79function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op)
bb3c3c95
SR
81{
82 struct trace_array *tr = func_trace;
83 struct trace_array_cpu *data;
84 unsigned long flags;
85 long disabled;
86 int cpu;
87 int pc;
88
89 if (unlikely(!ftrace_function_enabled))
90 return;
91
92 /*
93 * Need to use raw, since this must be called before the
94 * recursive protection is performed.
95 */
96 local_irq_save(flags);
97 cpu = raw_smp_processor_id();
98 data = tr->data[cpu];
99 disabled = atomic_inc_return(&data->disabled);
100
101 if (likely(disabled == 1)) {
102 pc = preempt_count();
7be42151 103 trace_function(tr, ip, parent_ip, flags, pc);
bb3c3c95
SR
104 }
105
106 atomic_dec(&data->disabled);
107 local_irq_restore(flags);
108}
109
53614991 110static void
2f5f6ad9
SR
111function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
112 struct ftrace_ops *op)
53614991
SR
113{
114 struct trace_array *tr = func_trace;
115 struct trace_array_cpu *data;
116 unsigned long flags;
117 long disabled;
118 int cpu;
119 int pc;
120
121 if (unlikely(!ftrace_function_enabled))
122 return;
123
124 /*
125 * Need to use raw, since this must be called before the
126 * recursive protection is performed.
127 */
128 local_irq_save(flags);
129 cpu = raw_smp_processor_id();
130 data = tr->data[cpu];
131 disabled = atomic_inc_return(&data->disabled);
132
133 if (likely(disabled == 1)) {
134 pc = preempt_count();
7be42151 135 trace_function(tr, ip, parent_ip, flags, pc);
53614991
SR
136 /*
137 * skip over 5 funcs:
138 * __ftrace_trace_stack,
139 * __trace_stack,
140 * function_stack_trace_call
141 * ftrace_list_func
142 * ftrace_call
143 */
7be42151 144 __trace_stack(tr, flags, 5, pc);
53614991
SR
145 }
146
147 atomic_dec(&data->disabled);
148 local_irq_restore(flags);
149}
150
bb3c3c95
SR
151
152static struct ftrace_ops trace_ops __read_mostly =
153{
154 .func = function_trace_call,
b848914c 155 .flags = FTRACE_OPS_FL_GLOBAL,
bb3c3c95
SR
156};
157
53614991
SR
158static struct ftrace_ops trace_stack_ops __read_mostly =
159{
160 .func = function_stack_trace_call,
b848914c 161 .flags = FTRACE_OPS_FL_GLOBAL,
53614991
SR
162};
163
164/* Our two options */
165enum {
166 TRACE_FUNC_OPT_STACK = 0x1,
167};
168
169static struct tracer_opt func_opts[] = {
170#ifdef CONFIG_STACKTRACE
171 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
172#endif
173 { } /* Always set a last empty entry */
174};
175
176static struct tracer_flags func_flags = {
177 .val = 0, /* By default: all flags disabled */
178 .opts = func_opts
179};
180
a225cdd2 181static void tracing_start_function_trace(void)
3eb36aa0
SR
182{
183 ftrace_function_enabled = 0;
184
185 if (trace_flags & TRACE_ITER_PREEMPTONLY)
186 trace_ops.func = function_trace_call_preempt_only;
187 else
188 trace_ops.func = function_trace_call;
189
190 if (func_flags.val & TRACE_FUNC_OPT_STACK)
191 register_ftrace_function(&trace_stack_ops);
192 else
193 register_ftrace_function(&trace_ops);
194
195 ftrace_function_enabled = 1;
196}
197
a225cdd2 198static void tracing_stop_function_trace(void)
3eb36aa0
SR
199{
200 ftrace_function_enabled = 0;
c85a17e2
FW
201
202 if (func_flags.val & TRACE_FUNC_OPT_STACK)
203 unregister_ftrace_function(&trace_stack_ops);
204 else
205 unregister_ftrace_function(&trace_ops);
3eb36aa0
SR
206}
207
53614991
SR
208static int func_set_flag(u32 old_flags, u32 bit, int set)
209{
210 if (bit == TRACE_FUNC_OPT_STACK) {
211 /* do nothing if already set */
212 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
213 return 0;
214
3eb36aa0
SR
215 if (set) {
216 unregister_ftrace_function(&trace_ops);
53614991 217 register_ftrace_function(&trace_stack_ops);
3eb36aa0 218 } else {
53614991 219 unregister_ftrace_function(&trace_stack_ops);
3eb36aa0
SR
220 register_ftrace_function(&trace_ops);
221 }
53614991
SR
222
223 return 0;
224 }
225
226 return -EINVAL;
227}
228
1b29b018
SR
229static struct tracer function_trace __read_mostly =
230{
3eb36aa0
SR
231 .name = "function",
232 .init = function_trace_init,
233 .reset = function_trace_reset,
234 .start = function_trace_start,
6eaaa5d5 235 .wait_pipe = poll_wait_pipe,
53614991
SR
236 .flags = &func_flags,
237 .set_flag = func_set_flag,
60a11774 238#ifdef CONFIG_FTRACE_SELFTEST
3eb36aa0 239 .selftest = trace_selftest_startup_function,
60a11774 240#endif
1b29b018
SR
241};
242
23b4ff3a
SR
243#ifdef CONFIG_DYNAMIC_FTRACE
244static void
245ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
246{
247 long *count = (long *)data;
248
249 if (tracing_is_on())
250 return;
251
252 if (!*count)
253 return;
254
255 if (*count != -1)
256 (*count)--;
257
258 tracing_on();
259}
260
261static void
262ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
263{
264 long *count = (long *)data;
265
266 if (!tracing_is_on())
267 return;
268
269 if (!*count)
270 return;
271
272 if (*count != -1)
273 (*count)--;
274
275 tracing_off();
276}
277
e110e3d1
SR
278static int
279ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
b6887d79 280 struct ftrace_probe_ops *ops, void *data);
e110e3d1 281
b6887d79 282static struct ftrace_probe_ops traceon_probe_ops = {
23b4ff3a 283 .func = ftrace_traceon,
e110e3d1 284 .print = ftrace_trace_onoff_print,
23b4ff3a
SR
285};
286
b6887d79 287static struct ftrace_probe_ops traceoff_probe_ops = {
23b4ff3a 288 .func = ftrace_traceoff,
e110e3d1 289 .print = ftrace_trace_onoff_print,
23b4ff3a
SR
290};
291
e110e3d1
SR
292static int
293ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
b6887d79 294 struct ftrace_probe_ops *ops, void *data)
e110e3d1 295{
e110e3d1
SR
296 long count = (long)data;
297
b375a11a 298 seq_printf(m, "%ps:", (void *)ip);
e110e3d1 299
b6887d79 300 if (ops == &traceon_probe_ops)
e110e3d1
SR
301 seq_printf(m, "traceon");
302 else
303 seq_printf(m, "traceoff");
304
35ebf1ca
SR
305 if (count == -1)
306 seq_printf(m, ":unlimited\n");
307 else
00e54d08 308 seq_printf(m, ":count=%ld\n", count);
e110e3d1
SR
309
310 return 0;
311}
312
23b4ff3a
SR
313static int
314ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
315{
b6887d79 316 struct ftrace_probe_ops *ops;
23b4ff3a
SR
317
318 /* we register both traceon and traceoff to this callback */
319 if (strcmp(cmd, "traceon") == 0)
b6887d79 320 ops = &traceon_probe_ops;
23b4ff3a 321 else
b6887d79 322 ops = &traceoff_probe_ops;
23b4ff3a 323
b6887d79 324 unregister_ftrace_function_probe_func(glob, ops);
23b4ff3a
SR
325
326 return 0;
327}
328
329static int
43dd61c9
SR
330ftrace_trace_onoff_callback(struct ftrace_hash *hash,
331 char *glob, char *cmd, char *param, int enable)
23b4ff3a 332{
b6887d79 333 struct ftrace_probe_ops *ops;
23b4ff3a
SR
334 void *count = (void *)-1;
335 char *number;
336 int ret;
337
338 /* hash funcs only work with set_ftrace_filter */
339 if (!enable)
340 return -EINVAL;
341
342 if (glob[0] == '!')
343 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
344
345 /* we register both traceon and traceoff to this callback */
346 if (strcmp(cmd, "traceon") == 0)
b6887d79 347 ops = &traceon_probe_ops;
23b4ff3a 348 else
b6887d79 349 ops = &traceoff_probe_ops;
23b4ff3a
SR
350
351 if (!param)
352 goto out_reg;
353
354 number = strsep(&param, ":");
355
356 if (!strlen(number))
357 goto out_reg;
358
359 /*
360 * We use the callback data field (which is a pointer)
361 * as our counter.
362 */
363 ret = strict_strtoul(number, 0, (unsigned long *)&count);
364 if (ret)
365 return ret;
366
367 out_reg:
b6887d79 368 ret = register_ftrace_function_probe(glob, ops, count);
23b4ff3a 369
04aef32d 370 return ret < 0 ? ret : 0;
23b4ff3a
SR
371}
372
373static struct ftrace_func_command ftrace_traceon_cmd = {
374 .name = "traceon",
375 .func = ftrace_trace_onoff_callback,
376};
377
378static struct ftrace_func_command ftrace_traceoff_cmd = {
379 .name = "traceoff",
380 .func = ftrace_trace_onoff_callback,
381};
382
383static int __init init_func_cmd_traceon(void)
384{
385 int ret;
386
387 ret = register_ftrace_command(&ftrace_traceoff_cmd);
388 if (ret)
389 return ret;
390
391 ret = register_ftrace_command(&ftrace_traceon_cmd);
392 if (ret)
393 unregister_ftrace_command(&ftrace_traceoff_cmd);
394 return ret;
395}
396#else
397static inline int init_func_cmd_traceon(void)
398{
399 return 0;
400}
401#endif /* CONFIG_DYNAMIC_FTRACE */
402
1b29b018
SR
403static __init int init_function_trace(void)
404{
23b4ff3a 405 init_func_cmd_traceon();
1b29b018
SR
406 return register_tracer(&function_trace);
407}
1b29b018 408device_initcall(init_function_trace);
23b4ff3a 409