]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/trace/trace_functions.c
ftrace: add pretty print function for traceon and traceoff hooks
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / trace_functions.c
1 /*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17
18 #include "trace.h"
19
20 /* function tracing enabled */
21 static int ftrace_function_enabled;
22
23 static struct trace_array *func_trace;
24
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27
28 static int function_trace_init(struct trace_array *tr)
29 {
30 func_trace = tr;
31 tr->cpu = get_cpu();
32 put_cpu();
33
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
36 return 0;
37 }
38
39 static void function_trace_reset(struct trace_array *tr)
40 {
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
43 }
44
45 static void function_trace_start(struct trace_array *tr)
46 {
47 tracing_reset_online_cpus(tr);
48 }
49
50 static void
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52 {
53 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data;
55 unsigned long flags;
56 long disabled;
57 int cpu, resched;
58 int pc;
59
60 if (unlikely(!ftrace_function_enabled))
61 return;
62
63 pc = preempt_count();
64 resched = ftrace_preempt_disable();
65 local_save_flags(flags);
66 cpu = raw_smp_processor_id();
67 data = tr->data[cpu];
68 disabled = atomic_inc_return(&data->disabled);
69
70 if (likely(disabled == 1))
71 trace_function(tr, ip, parent_ip, flags, pc);
72
73 atomic_dec(&data->disabled);
74 ftrace_preempt_enable(resched);
75 }
76
77 static void
78 function_trace_call(unsigned long ip, unsigned long parent_ip)
79 {
80 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data;
82 unsigned long flags;
83 long disabled;
84 int cpu;
85 int pc;
86
87 if (unlikely(!ftrace_function_enabled))
88 return;
89
90 /*
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
93 */
94 local_irq_save(flags);
95 cpu = raw_smp_processor_id();
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1)) {
100 pc = preempt_count();
101 trace_function(tr, ip, parent_ip, flags, pc);
102 }
103
104 atomic_dec(&data->disabled);
105 local_irq_restore(flags);
106 }
107
108 static void
109 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110 {
111 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data;
113 unsigned long flags;
114 long disabled;
115 int cpu;
116 int pc;
117
118 if (unlikely(!ftrace_function_enabled))
119 return;
120
121 /*
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
124 */
125 local_irq_save(flags);
126 cpu = raw_smp_processor_id();
127 data = tr->data[cpu];
128 disabled = atomic_inc_return(&data->disabled);
129
130 if (likely(disabled == 1)) {
131 pc = preempt_count();
132 trace_function(tr, ip, parent_ip, flags, pc);
133 /*
134 * skip over 5 funcs:
135 * __ftrace_trace_stack,
136 * __trace_stack,
137 * function_stack_trace_call
138 * ftrace_list_func
139 * ftrace_call
140 */
141 __trace_stack(tr, flags, 5, pc);
142 }
143
144 atomic_dec(&data->disabled);
145 local_irq_restore(flags);
146 }
147
148
149 static struct ftrace_ops trace_ops __read_mostly =
150 {
151 .func = function_trace_call,
152 };
153
154 static struct ftrace_ops trace_stack_ops __read_mostly =
155 {
156 .func = function_stack_trace_call,
157 };
158
159 /* Our two options */
160 enum {
161 TRACE_FUNC_OPT_STACK = 0x1,
162 };
163
164 static struct tracer_opt func_opts[] = {
165 #ifdef CONFIG_STACKTRACE
166 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
167 #endif
168 { } /* Always set a last empty entry */
169 };
170
171 static struct tracer_flags func_flags = {
172 .val = 0, /* By default: all flags disabled */
173 .opts = func_opts
174 };
175
176 static void tracing_start_function_trace(void)
177 {
178 ftrace_function_enabled = 0;
179
180 if (trace_flags & TRACE_ITER_PREEMPTONLY)
181 trace_ops.func = function_trace_call_preempt_only;
182 else
183 trace_ops.func = function_trace_call;
184
185 if (func_flags.val & TRACE_FUNC_OPT_STACK)
186 register_ftrace_function(&trace_stack_ops);
187 else
188 register_ftrace_function(&trace_ops);
189
190 ftrace_function_enabled = 1;
191 }
192
193 static void tracing_stop_function_trace(void)
194 {
195 ftrace_function_enabled = 0;
196 /* OK if they are not registered */
197 unregister_ftrace_function(&trace_stack_ops);
198 unregister_ftrace_function(&trace_ops);
199 }
200
201 static int func_set_flag(u32 old_flags, u32 bit, int set)
202 {
203 if (bit == TRACE_FUNC_OPT_STACK) {
204 /* do nothing if already set */
205 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
206 return 0;
207
208 if (set) {
209 unregister_ftrace_function(&trace_ops);
210 register_ftrace_function(&trace_stack_ops);
211 } else {
212 unregister_ftrace_function(&trace_stack_ops);
213 register_ftrace_function(&trace_ops);
214 }
215
216 return 0;
217 }
218
219 return -EINVAL;
220 }
221
222 static struct tracer function_trace __read_mostly =
223 {
224 .name = "function",
225 .init = function_trace_init,
226 .reset = function_trace_reset,
227 .start = function_trace_start,
228 .flags = &func_flags,
229 .set_flag = func_set_flag,
230 #ifdef CONFIG_FTRACE_SELFTEST
231 .selftest = trace_selftest_startup_function,
232 #endif
233 };
234
235 #ifdef CONFIG_DYNAMIC_FTRACE
236 static void
237 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
238 {
239 long *count = (long *)data;
240
241 if (tracing_is_on())
242 return;
243
244 if (!*count)
245 return;
246
247 if (*count != -1)
248 (*count)--;
249
250 tracing_on();
251 }
252
253 static void
254 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
255 {
256 long *count = (long *)data;
257
258 if (!tracing_is_on())
259 return;
260
261 if (!*count)
262 return;
263
264 if (*count != -1)
265 (*count)--;
266
267 tracing_off();
268 }
269
270 static int
271 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
272 struct ftrace_hook_ops *ops, void *data);
273
274 static struct ftrace_hook_ops traceon_hook_ops = {
275 .func = ftrace_traceon,
276 .print = ftrace_trace_onoff_print,
277 };
278
279 static struct ftrace_hook_ops traceoff_hook_ops = {
280 .func = ftrace_traceoff,
281 .print = ftrace_trace_onoff_print,
282 };
283
284 static int
285 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
286 struct ftrace_hook_ops *ops, void *data)
287 {
288 char str[KSYM_SYMBOL_LEN];
289 long count = (long)data;
290
291 kallsyms_lookup(ip, NULL, NULL, NULL, str);
292 seq_printf(m, "%s:", str);
293
294 if (ops == &traceon_hook_ops)
295 seq_printf(m, "traceon");
296 else
297 seq_printf(m, "traceoff");
298
299 if (count != -1)
300 seq_printf(m, ":count=%ld", count);
301 seq_putc(m, '\n');
302
303 return 0;
304 }
305
306 static int
307 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
308 {
309 struct ftrace_hook_ops *ops;
310
311 /* we register both traceon and traceoff to this callback */
312 if (strcmp(cmd, "traceon") == 0)
313 ops = &traceon_hook_ops;
314 else
315 ops = &traceoff_hook_ops;
316
317 unregister_ftrace_function_hook_func(glob, ops);
318
319 return 0;
320 }
321
322 static int
323 ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
324 {
325 struct ftrace_hook_ops *ops;
326 void *count = (void *)-1;
327 char *number;
328 int ret;
329
330 /* hash funcs only work with set_ftrace_filter */
331 if (!enable)
332 return -EINVAL;
333
334 if (glob[0] == '!')
335 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
336
337 /* we register both traceon and traceoff to this callback */
338 if (strcmp(cmd, "traceon") == 0)
339 ops = &traceon_hook_ops;
340 else
341 ops = &traceoff_hook_ops;
342
343 if (!param)
344 goto out_reg;
345
346 number = strsep(&param, ":");
347
348 if (!strlen(number))
349 goto out_reg;
350
351 /*
352 * We use the callback data field (which is a pointer)
353 * as our counter.
354 */
355 ret = strict_strtoul(number, 0, (unsigned long *)&count);
356 if (ret)
357 return ret;
358
359 out_reg:
360 ret = register_ftrace_function_hook(glob, ops, count);
361
362 return ret;
363 }
364
365 static struct ftrace_func_command ftrace_traceon_cmd = {
366 .name = "traceon",
367 .func = ftrace_trace_onoff_callback,
368 };
369
370 static struct ftrace_func_command ftrace_traceoff_cmd = {
371 .name = "traceoff",
372 .func = ftrace_trace_onoff_callback,
373 };
374
375 static int __init init_func_cmd_traceon(void)
376 {
377 int ret;
378
379 ret = register_ftrace_command(&ftrace_traceoff_cmd);
380 if (ret)
381 return ret;
382
383 ret = register_ftrace_command(&ftrace_traceon_cmd);
384 if (ret)
385 unregister_ftrace_command(&ftrace_traceoff_cmd);
386 return ret;
387 }
388 #else
389 static inline int init_func_cmd_traceon(void)
390 {
391 return 0;
392 }
393 #endif /* CONFIG_DYNAMIC_FTRACE */
394
395 static __init int init_function_trace(void)
396 {
397 init_func_cmd_traceon();
398 return register_tracer(&function_trace);
399 }
400
401 device_initcall(init_function_trace);
402