]>
Commit | Line | Data |
---|---|---|
1b29b018 SR |
1 | /* |
2 | * ring buffer based function tracer | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
1b29b018 | 11 | */ |
23b4ff3a | 12 | #include <linux/ring_buffer.h> |
1b29b018 SR |
13 | #include <linux/debugfs.h> |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/ftrace.h> | |
f20a5806 | 16 | #include <linux/slab.h> |
2e0f5761 | 17 | #include <linux/fs.h> |
1b29b018 SR |
18 | |
19 | #include "trace.h" | |
20 | ||
f20a5806 SRRH |
21 | static void tracing_start_function_trace(struct trace_array *tr); |
22 | static void tracing_stop_function_trace(struct trace_array *tr); | |
23 | static void | |
24 | function_trace_call(unsigned long ip, unsigned long parent_ip, | |
25 | struct ftrace_ops *op, struct pt_regs *pt_regs); | |
26 | static void | |
27 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |
28 | struct ftrace_ops *op, struct pt_regs *pt_regs); | |
f20a5806 SRRH |
29 | static struct tracer_flags func_flags; |
30 | ||
31 | /* Our option */ | |
32 | enum { | |
33 | TRACE_FUNC_OPT_STACK = 0x1, | |
34 | }; | |
35 | ||
36 | static int allocate_ftrace_ops(struct trace_array *tr) | |
37 | { | |
38 | struct ftrace_ops *ops; | |
a225cdd2 | 39 | |
f20a5806 SRRH |
40 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); |
41 | if (!ops) | |
42 | return -ENOMEM; | |
53614991 | 43 | |
f20a5806 SRRH |
44 | /* Currently only the non stack verision is supported */ |
45 | ops->func = function_trace_call; | |
345ddcc8 | 46 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID; |
f20a5806 SRRH |
47 | |
48 | tr->ops = ops; | |
49 | ops->private = tr; | |
50 | return 0; | |
51 | } | |
a225cdd2 | 52 | |
591dffda SRRH |
53 | |
54 | int ftrace_create_function_files(struct trace_array *tr, | |
55 | struct dentry *parent) | |
56 | { | |
57 | int ret; | |
58 | ||
5d6c97c5 SRRH |
59 | /* |
60 | * The top level array uses the "global_ops", and the files are | |
61 | * created on boot up. | |
62 | */ | |
63 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | |
64 | return 0; | |
65 | ||
66 | ret = allocate_ftrace_ops(tr); | |
67 | if (ret) | |
68 | return ret; | |
591dffda SRRH |
69 | |
70 | ftrace_create_filter_files(tr->ops, parent); | |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
75 | void ftrace_destroy_function_files(struct trace_array *tr) | |
76 | { | |
77 | ftrace_destroy_filter_files(tr->ops); | |
78 | kfree(tr->ops); | |
79 | tr->ops = NULL; | |
80 | } | |
81 | ||
b6f11df2 | 82 | static int function_trace_init(struct trace_array *tr) |
1b29b018 | 83 | { |
4104d326 | 84 | ftrace_func_t func; |
f20a5806 | 85 | |
4104d326 SRRH |
86 | /* |
87 | * Instance trace_arrays get their ops allocated | |
88 | * at instance creation. Unless it failed | |
89 | * the allocation. | |
90 | */ | |
91 | if (!tr->ops) | |
591dffda | 92 | return -ENOMEM; |
4104d326 SRRH |
93 | |
94 | /* Currently only the global instance can do stack tracing */ | |
95 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && | |
96 | func_flags.val & TRACE_FUNC_OPT_STACK) | |
97 | func = function_stack_trace_call; | |
98 | else | |
99 | func = function_trace_call; | |
100 | ||
101 | ftrace_init_array_ops(tr, func); | |
f20a5806 | 102 | |
12883efb | 103 | tr->trace_buffer.cpu = get_cpu(); |
26bc83f4 SR |
104 | put_cpu(); |
105 | ||
41bc8144 | 106 | tracing_start_cmdline_record(); |
f20a5806 | 107 | tracing_start_function_trace(tr); |
1c80025a | 108 | return 0; |
1b29b018 SR |
109 | } |
110 | ||
e309b41d | 111 | static void function_trace_reset(struct trace_array *tr) |
1b29b018 | 112 | { |
f20a5806 | 113 | tracing_stop_function_trace(tr); |
b6f11df2 | 114 | tracing_stop_cmdline_record(); |
4104d326 | 115 | ftrace_reset_array_ops(tr); |
1b29b018 SR |
116 | } |
117 | ||
9036990d SR |
118 | static void function_trace_start(struct trace_array *tr) |
119 | { | |
12883efb | 120 | tracing_reset_online_cpus(&tr->trace_buffer); |
9036990d SR |
121 | } |
122 | ||
bb3c3c95 | 123 | static void |
2f5f6ad9 | 124 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 125 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
bb3c3c95 | 126 | { |
f20a5806 | 127 | struct trace_array *tr = op->private; |
bb3c3c95 SR |
128 | struct trace_array_cpu *data; |
129 | unsigned long flags; | |
d41032a8 | 130 | int bit; |
bb3c3c95 SR |
131 | int cpu; |
132 | int pc; | |
133 | ||
f20a5806 | 134 | if (unlikely(!tr->function_enabled)) |
bb3c3c95 SR |
135 | return; |
136 | ||
897f68a4 SR |
137 | pc = preempt_count(); |
138 | preempt_disable_notrace(); | |
bb3c3c95 | 139 | |
897f68a4 SR |
140 | bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); |
141 | if (bit < 0) | |
142 | goto out; | |
143 | ||
144 | cpu = smp_processor_id(); | |
12883efb | 145 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
897f68a4 SR |
146 | if (!atomic_read(&data->disabled)) { |
147 | local_save_flags(flags); | |
7be42151 | 148 | trace_function(tr, ip, parent_ip, flags, pc); |
bb3c3c95 | 149 | } |
897f68a4 | 150 | trace_clear_recursion(bit); |
bb3c3c95 | 151 | |
897f68a4 SR |
152 | out: |
153 | preempt_enable_notrace(); | |
bb3c3c95 SR |
154 | } |
155 | ||
53614991 | 156 | static void |
2f5f6ad9 | 157 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 158 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
53614991 | 159 | { |
f20a5806 | 160 | struct trace_array *tr = op->private; |
53614991 SR |
161 | struct trace_array_cpu *data; |
162 | unsigned long flags; | |
163 | long disabled; | |
164 | int cpu; | |
165 | int pc; | |
166 | ||
f20a5806 | 167 | if (unlikely(!tr->function_enabled)) |
53614991 SR |
168 | return; |
169 | ||
170 | /* | |
171 | * Need to use raw, since this must be called before the | |
172 | * recursive protection is performed. | |
173 | */ | |
174 | local_irq_save(flags); | |
175 | cpu = raw_smp_processor_id(); | |
12883efb | 176 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
53614991 SR |
177 | disabled = atomic_inc_return(&data->disabled); |
178 | ||
179 | if (likely(disabled == 1)) { | |
180 | pc = preempt_count(); | |
7be42151 | 181 | trace_function(tr, ip, parent_ip, flags, pc); |
53614991 SR |
182 | /* |
183 | * skip over 5 funcs: | |
184 | * __ftrace_trace_stack, | |
185 | * __trace_stack, | |
186 | * function_stack_trace_call | |
187 | * ftrace_list_func | |
188 | * ftrace_call | |
189 | */ | |
7be42151 | 190 | __trace_stack(tr, flags, 5, pc); |
53614991 SR |
191 | } |
192 | ||
193 | atomic_dec(&data->disabled); | |
194 | local_irq_restore(flags); | |
195 | } | |
196 | ||
53614991 SR |
197 | static struct tracer_opt func_opts[] = { |
198 | #ifdef CONFIG_STACKTRACE | |
199 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | |
200 | #endif | |
201 | { } /* Always set a last empty entry */ | |
202 | }; | |
203 | ||
204 | static struct tracer_flags func_flags = { | |
205 | .val = 0, /* By default: all flags disabled */ | |
206 | .opts = func_opts | |
207 | }; | |
208 | ||
f20a5806 | 209 | static void tracing_start_function_trace(struct trace_array *tr) |
3eb36aa0 | 210 | { |
f20a5806 SRRH |
211 | tr->function_enabled = 0; |
212 | register_ftrace_function(tr->ops); | |
213 | tr->function_enabled = 1; | |
3eb36aa0 SR |
214 | } |
215 | ||
f20a5806 | 216 | static void tracing_stop_function_trace(struct trace_array *tr) |
3eb36aa0 | 217 | { |
f20a5806 SRRH |
218 | tr->function_enabled = 0; |
219 | unregister_ftrace_function(tr->ops); | |
3eb36aa0 SR |
220 | } |
221 | ||
d39cdd20 CH |
222 | static struct tracer function_trace; |
223 | ||
8c1a49ae SRRH |
224 | static int |
225 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
53614991 | 226 | { |
f555f123 AV |
227 | switch (bit) { |
228 | case TRACE_FUNC_OPT_STACK: | |
53614991 SR |
229 | /* do nothing if already set */ |
230 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | |
f555f123 | 231 | break; |
53614991 | 232 | |
d39cdd20 CH |
233 | /* We can change this flag when not running. */ |
234 | if (tr->current_trace != &function_trace) | |
235 | break; | |
236 | ||
f20a5806 SRRH |
237 | unregister_ftrace_function(tr->ops); |
238 | ||
3eb36aa0 | 239 | if (set) { |
4104d326 | 240 | tr->ops->func = function_stack_trace_call; |
f20a5806 | 241 | register_ftrace_function(tr->ops); |
3eb36aa0 | 242 | } else { |
4104d326 | 243 | tr->ops->func = function_trace_call; |
f20a5806 | 244 | register_ftrace_function(tr->ops); |
3eb36aa0 | 245 | } |
53614991 | 246 | |
f555f123 AV |
247 | break; |
248 | default: | |
249 | return -EINVAL; | |
53614991 SR |
250 | } |
251 | ||
f555f123 | 252 | return 0; |
53614991 SR |
253 | } |
254 | ||
8f768993 | 255 | static struct tracer function_trace __tracer_data = |
1b29b018 | 256 | { |
3eb36aa0 SR |
257 | .name = "function", |
258 | .init = function_trace_init, | |
259 | .reset = function_trace_reset, | |
260 | .start = function_trace_start, | |
53614991 SR |
261 | .flags = &func_flags, |
262 | .set_flag = func_set_flag, | |
f20a5806 | 263 | .allow_instances = true, |
60a11774 | 264 | #ifdef CONFIG_FTRACE_SELFTEST |
3eb36aa0 | 265 | .selftest = trace_selftest_startup_function, |
60a11774 | 266 | #endif |
1b29b018 SR |
267 | }; |
268 | ||
23b4ff3a | 269 | #ifdef CONFIG_DYNAMIC_FTRACE |
fe014e24 | 270 | static void update_traceon_count(struct ftrace_probe_ops *ops, |
6e444319 SRV |
271 | unsigned long ip, bool on, |
272 | void *data) | |
23b4ff3a | 273 | { |
6e444319 | 274 | struct ftrace_func_mapper *mapper = data; |
fe014e24 SRV |
275 | long *count; |
276 | long old_count; | |
23b4ff3a | 277 | |
a9ce7c36 SRRH |
278 | /* |
279 | * Tracing gets disabled (or enabled) once per count. | |
0af26492 | 280 | * This function can be called at the same time on multiple CPUs. |
a9ce7c36 SRRH |
281 | * It is fine if both disable (or enable) tracing, as disabling |
282 | * (or enabling) the second time doesn't do anything as the | |
283 | * state of the tracer is already disabled (or enabled). | |
284 | * What needs to be synchronized in this case is that the count | |
285 | * only gets decremented once, even if the tracer is disabled | |
286 | * (or enabled) twice, as the second one is really a nop. | |
287 | * | |
288 | * The memory barriers guarantee that we only decrement the | |
289 | * counter once. First the count is read to a local variable | |
290 | * and a read barrier is used to make sure that it is loaded | |
291 | * before checking if the tracer is in the state we want. | |
292 | * If the tracer is not in the state we want, then the count | |
293 | * is guaranteed to be the old count. | |
294 | * | |
295 | * Next the tracer is set to the state we want (disabled or enabled) | |
296 | * then a write memory barrier is used to make sure that | |
297 | * the new state is visible before changing the counter by | |
298 | * one minus the old counter. This guarantees that another CPU | |
299 | * executing this code will see the new state before seeing | |
0af26492 | 300 | * the new counter value, and would not do anything if the new |
a9ce7c36 SRRH |
301 | * counter is seen. |
302 | * | |
303 | * Note, there is no synchronization between this and a user | |
304 | * setting the tracing_on file. But we currently don't care | |
305 | * about that. | |
306 | */ | |
fe014e24 SRV |
307 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
308 | old_count = *count; | |
309 | ||
310 | if (old_count <= 0) | |
a9ce7c36 | 311 | return; |
23b4ff3a | 312 | |
a9ce7c36 SRRH |
313 | /* Make sure we see count before checking tracing state */ |
314 | smp_rmb(); | |
23b4ff3a | 315 | |
a9ce7c36 SRRH |
316 | if (on == !!tracing_is_on()) |
317 | return; | |
318 | ||
319 | if (on) | |
320 | tracing_on(); | |
321 | else | |
322 | tracing_off(); | |
323 | ||
a9ce7c36 SRRH |
324 | /* Make sure tracing state is visible before updating count */ |
325 | smp_wmb(); | |
326 | ||
327 | *count = old_count - 1; | |
23b4ff3a SR |
328 | } |
329 | ||
330 | static void | |
bca6c8d0 | 331 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 332 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 333 | void *data) |
23b4ff3a | 334 | { |
6e444319 | 335 | update_traceon_count(ops, ip, 1, data); |
1c317143 | 336 | } |
23b4ff3a | 337 | |
1c317143 | 338 | static void |
bca6c8d0 | 339 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 340 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 341 | void *data) |
1c317143 | 342 | { |
6e444319 | 343 | update_traceon_count(ops, ip, 0, data); |
23b4ff3a SR |
344 | } |
345 | ||
8380d248 | 346 | static void |
bca6c8d0 | 347 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 348 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 349 | void *data) |
8380d248 SRRH |
350 | { |
351 | if (tracing_is_on()) | |
352 | return; | |
353 | ||
354 | tracing_on(); | |
355 | } | |
356 | ||
357 | static void | |
bca6c8d0 | 358 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 359 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 360 | void *data) |
8380d248 SRRH |
361 | { |
362 | if (!tracing_is_on()) | |
363 | return; | |
364 | ||
365 | tracing_off(); | |
366 | } | |
367 | ||
dd42cd3e SRRH |
368 | /* |
369 | * Skip 4: | |
370 | * ftrace_stacktrace() | |
371 | * function_trace_probe_call() | |
372 | * ftrace_ops_list_func() | |
373 | * ftrace_call() | |
374 | */ | |
375 | #define STACK_SKIP 4 | |
376 | ||
377 | static void | |
bca6c8d0 | 378 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 379 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 380 | void *data) |
dd42cd3e SRRH |
381 | { |
382 | trace_dump_stack(STACK_SKIP); | |
383 | } | |
384 | ||
385 | static void | |
bca6c8d0 | 386 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 387 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 388 | void *data) |
dd42cd3e | 389 | { |
6e444319 | 390 | struct ftrace_func_mapper *mapper = data; |
fe014e24 | 391 | long *count; |
a9ce7c36 SRRH |
392 | long old_count; |
393 | long new_count; | |
394 | ||
fe014e24 SRV |
395 | if (!tracing_is_on()) |
396 | return; | |
397 | ||
398 | /* unlimited? */ | |
399 | if (!mapper) { | |
400 | trace_dump_stack(STACK_SKIP); | |
401 | return; | |
402 | } | |
403 | ||
404 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | |
405 | ||
a9ce7c36 SRRH |
406 | /* |
407 | * Stack traces should only execute the number of times the | |
408 | * user specified in the counter. | |
409 | */ | |
410 | do { | |
a9ce7c36 SRRH |
411 | old_count = *count; |
412 | ||
413 | if (!old_count) | |
414 | return; | |
415 | ||
a9ce7c36 SRRH |
416 | new_count = old_count - 1; |
417 | new_count = cmpxchg(count, old_count, new_count); | |
418 | if (new_count == old_count) | |
419 | trace_dump_stack(STACK_SKIP); | |
420 | ||
fe014e24 SRV |
421 | if (!tracing_is_on()) |
422 | return; | |
423 | ||
a9ce7c36 SRRH |
424 | } while (new_count != old_count); |
425 | } | |
426 | ||
6e444319 SRV |
427 | static int update_count(struct ftrace_probe_ops *ops, unsigned long ip, |
428 | void *data) | |
a9ce7c36 | 429 | { |
6e444319 | 430 | struct ftrace_func_mapper *mapper = data; |
fe014e24 | 431 | long *count = NULL; |
a9ce7c36 | 432 | |
fe014e24 SRV |
433 | if (mapper) |
434 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | |
a9ce7c36 | 435 | |
fe014e24 SRV |
436 | if (count) { |
437 | if (*count <= 0) | |
438 | return 0; | |
a9ce7c36 | 439 | (*count)--; |
fe014e24 | 440 | } |
a9ce7c36 SRRH |
441 | |
442 | return 1; | |
dd42cd3e SRRH |
443 | } |
444 | ||
ad71d889 | 445 | static void |
bca6c8d0 | 446 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 447 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 448 | void *data) |
ad71d889 | 449 | { |
6e444319 | 450 | if (update_count(ops, ip, data)) |
ad71d889 SRRH |
451 | ftrace_dump(DUMP_ALL); |
452 | } | |
453 | ||
90e3c03c SRRH |
454 | /* Only dump the current CPU buffer. */ |
455 | static void | |
bca6c8d0 | 456 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 457 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 458 | void *data) |
90e3c03c | 459 | { |
6e444319 | 460 | if (update_count(ops, ip, data)) |
90e3c03c SRRH |
461 | ftrace_dump(DUMP_ORIG); |
462 | } | |
463 | ||
e110e3d1 | 464 | static int |
dd42cd3e | 465 | ftrace_probe_print(const char *name, struct seq_file *m, |
6e444319 SRV |
466 | unsigned long ip, struct ftrace_probe_ops *ops, |
467 | void *data) | |
dd42cd3e | 468 | { |
6e444319 | 469 | struct ftrace_func_mapper *mapper = data; |
fe014e24 | 470 | long *count = NULL; |
dd42cd3e SRRH |
471 | |
472 | seq_printf(m, "%ps:%s", (void *)ip, name); | |
473 | ||
fe014e24 SRV |
474 | if (mapper) |
475 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | |
476 | ||
477 | if (count) | |
478 | seq_printf(m, ":count=%ld\n", *count); | |
dd42cd3e | 479 | else |
fe014e24 | 480 | seq_puts(m, ":unlimited\n"); |
dd42cd3e SRRH |
481 | |
482 | return 0; | |
483 | } | |
484 | ||
485 | static int | |
486 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, | |
b5f081b5 SRV |
487 | struct ftrace_probe_ops *ops, |
488 | void *data) | |
dd42cd3e | 489 | { |
6e444319 | 490 | return ftrace_probe_print("traceon", m, ip, ops, data); |
dd42cd3e SRRH |
491 | } |
492 | ||
493 | static int | |
494 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, | |
495 | struct ftrace_probe_ops *ops, void *data) | |
496 | { | |
6e444319 | 497 | return ftrace_probe_print("traceoff", m, ip, ops, data); |
dd42cd3e SRRH |
498 | } |
499 | ||
500 | static int | |
501 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, | |
502 | struct ftrace_probe_ops *ops, void *data) | |
503 | { | |
6e444319 | 504 | return ftrace_probe_print("stacktrace", m, ip, ops, data); |
dd42cd3e | 505 | } |
e110e3d1 | 506 | |
ad71d889 SRRH |
507 | static int |
508 | ftrace_dump_print(struct seq_file *m, unsigned long ip, | |
509 | struct ftrace_probe_ops *ops, void *data) | |
510 | { | |
6e444319 | 511 | return ftrace_probe_print("dump", m, ip, ops, data); |
ad71d889 SRRH |
512 | } |
513 | ||
90e3c03c SRRH |
514 | static int |
515 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, | |
516 | struct ftrace_probe_ops *ops, void *data) | |
517 | { | |
6e444319 | 518 | return ftrace_probe_print("cpudump", m, ip, ops, data); |
fe014e24 SRV |
519 | } |
520 | ||
521 | ||
522 | static int | |
b5f081b5 | 523 | ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr, |
6e444319 | 524 | unsigned long ip, void *init_data, void **data) |
fe014e24 | 525 | { |
6e444319 SRV |
526 | struct ftrace_func_mapper *mapper = *data; |
527 | ||
528 | if (!mapper) { | |
529 | mapper = allocate_ftrace_func_mapper(); | |
530 | if (!mapper) | |
531 | return -ENOMEM; | |
532 | *data = mapper; | |
533 | } | |
fe014e24 | 534 | |
6e444319 | 535 | return ftrace_func_mapper_add_ip(mapper, ip, init_data); |
fe014e24 SRV |
536 | } |
537 | ||
538 | static void | |
b5f081b5 | 539 | ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr, |
6e444319 | 540 | unsigned long ip, void *data) |
fe014e24 | 541 | { |
6e444319 SRV |
542 | struct ftrace_func_mapper *mapper = data; |
543 | ||
544 | if (!ip) { | |
545 | free_ftrace_func_mapper(mapper, NULL); | |
546 | return; | |
547 | } | |
fe014e24 SRV |
548 | |
549 | ftrace_func_mapper_remove_ip(mapper, ip); | |
90e3c03c SRRH |
550 | } |
551 | ||
8380d248 SRRH |
552 | static struct ftrace_probe_ops traceon_count_probe_ops = { |
553 | .func = ftrace_traceon_count, | |
dd42cd3e | 554 | .print = ftrace_traceon_print, |
fe014e24 SRV |
555 | .init = ftrace_count_init, |
556 | .free = ftrace_count_free, | |
8380d248 SRRH |
557 | }; |
558 | ||
559 | static struct ftrace_probe_ops traceoff_count_probe_ops = { | |
560 | .func = ftrace_traceoff_count, | |
dd42cd3e | 561 | .print = ftrace_traceoff_print, |
fe014e24 SRV |
562 | .init = ftrace_count_init, |
563 | .free = ftrace_count_free, | |
dd42cd3e SRRH |
564 | }; |
565 | ||
566 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { | |
567 | .func = ftrace_stacktrace_count, | |
568 | .print = ftrace_stacktrace_print, | |
fe014e24 SRV |
569 | .init = ftrace_count_init, |
570 | .free = ftrace_count_free, | |
8380d248 SRRH |
571 | }; |
572 | ||
ad71d889 SRRH |
573 | static struct ftrace_probe_ops dump_probe_ops = { |
574 | .func = ftrace_dump_probe, | |
575 | .print = ftrace_dump_print, | |
fe014e24 SRV |
576 | .init = ftrace_count_init, |
577 | .free = ftrace_count_free, | |
ad71d889 SRRH |
578 | }; |
579 | ||
90e3c03c SRRH |
580 | static struct ftrace_probe_ops cpudump_probe_ops = { |
581 | .func = ftrace_cpudump_probe, | |
582 | .print = ftrace_cpudump_print, | |
583 | }; | |
584 | ||
b6887d79 | 585 | static struct ftrace_probe_ops traceon_probe_ops = { |
23b4ff3a | 586 | .func = ftrace_traceon, |
dd42cd3e | 587 | .print = ftrace_traceon_print, |
23b4ff3a SR |
588 | }; |
589 | ||
b6887d79 | 590 | static struct ftrace_probe_ops traceoff_probe_ops = { |
23b4ff3a | 591 | .func = ftrace_traceoff, |
dd42cd3e | 592 | .print = ftrace_traceoff_print, |
23b4ff3a SR |
593 | }; |
594 | ||
dd42cd3e SRRH |
595 | static struct ftrace_probe_ops stacktrace_probe_ops = { |
596 | .func = ftrace_stacktrace, | |
597 | .print = ftrace_stacktrace_print, | |
598 | }; | |
e110e3d1 | 599 | |
23b4ff3a | 600 | static int |
04ec7bb6 SRV |
601 | ftrace_trace_probe_callback(struct trace_array *tr, |
602 | struct ftrace_probe_ops *ops, | |
dd42cd3e SRRH |
603 | struct ftrace_hash *hash, char *glob, |
604 | char *cmd, char *param, int enable) | |
23b4ff3a | 605 | { |
23b4ff3a SR |
606 | void *count = (void *)-1; |
607 | char *number; | |
608 | int ret; | |
609 | ||
610 | /* hash funcs only work with set_ftrace_filter */ | |
611 | if (!enable) | |
612 | return -EINVAL; | |
613 | ||
d3d532d7 | 614 | if (glob[0] == '!') |
7b60f3d8 | 615 | return unregister_ftrace_function_probe_func(glob+1, tr, ops); |
8b8fa62c | 616 | |
23b4ff3a SR |
617 | if (!param) |
618 | goto out_reg; | |
619 | ||
620 | number = strsep(¶m, ":"); | |
621 | ||
622 | if (!strlen(number)) | |
623 | goto out_reg; | |
624 | ||
625 | /* | |
626 | * We use the callback data field (which is a pointer) | |
627 | * as our counter. | |
628 | */ | |
bcd83ea6 | 629 | ret = kstrtoul(number, 0, (unsigned long *)&count); |
23b4ff3a SR |
630 | if (ret) |
631 | return ret; | |
632 | ||
633 | out_reg: | |
04ec7bb6 | 634 | ret = register_ftrace_function_probe(glob, tr, ops, count); |
23b4ff3a | 635 | |
04aef32d | 636 | return ret < 0 ? ret : 0; |
23b4ff3a SR |
637 | } |
638 | ||
dd42cd3e | 639 | static int |
04ec7bb6 | 640 | ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash, |
dd42cd3e SRRH |
641 | char *glob, char *cmd, char *param, int enable) |
642 | { | |
643 | struct ftrace_probe_ops *ops; | |
644 | ||
645 | /* we register both traceon and traceoff to this callback */ | |
646 | if (strcmp(cmd, "traceon") == 0) | |
647 | ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; | |
648 | else | |
649 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; | |
650 | ||
04ec7bb6 | 651 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
dd42cd3e SRRH |
652 | param, enable); |
653 | } | |
654 | ||
655 | static int | |
04ec7bb6 | 656 | ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash, |
dd42cd3e SRRH |
657 | char *glob, char *cmd, char *param, int enable) |
658 | { | |
659 | struct ftrace_probe_ops *ops; | |
660 | ||
661 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; | |
662 | ||
04ec7bb6 | 663 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
dd42cd3e SRRH |
664 | param, enable); |
665 | } | |
666 | ||
ad71d889 | 667 | static int |
04ec7bb6 | 668 | ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
ad71d889 SRRH |
669 | char *glob, char *cmd, char *param, int enable) |
670 | { | |
671 | struct ftrace_probe_ops *ops; | |
672 | ||
673 | ops = &dump_probe_ops; | |
674 | ||
675 | /* Only dump once. */ | |
04ec7bb6 | 676 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
ad71d889 SRRH |
677 | "1", enable); |
678 | } | |
679 | ||
90e3c03c | 680 | static int |
04ec7bb6 | 681 | ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
90e3c03c SRRH |
682 | char *glob, char *cmd, char *param, int enable) |
683 | { | |
684 | struct ftrace_probe_ops *ops; | |
685 | ||
686 | ops = &cpudump_probe_ops; | |
687 | ||
688 | /* Only dump once. */ | |
04ec7bb6 | 689 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
90e3c03c SRRH |
690 | "1", enable); |
691 | } | |
692 | ||
23b4ff3a SR |
693 | static struct ftrace_func_command ftrace_traceon_cmd = { |
694 | .name = "traceon", | |
695 | .func = ftrace_trace_onoff_callback, | |
696 | }; | |
697 | ||
698 | static struct ftrace_func_command ftrace_traceoff_cmd = { | |
699 | .name = "traceoff", | |
700 | .func = ftrace_trace_onoff_callback, | |
701 | }; | |
702 | ||
dd42cd3e SRRH |
703 | static struct ftrace_func_command ftrace_stacktrace_cmd = { |
704 | .name = "stacktrace", | |
705 | .func = ftrace_stacktrace_callback, | |
706 | }; | |
707 | ||
ad71d889 SRRH |
708 | static struct ftrace_func_command ftrace_dump_cmd = { |
709 | .name = "dump", | |
710 | .func = ftrace_dump_callback, | |
711 | }; | |
712 | ||
90e3c03c SRRH |
713 | static struct ftrace_func_command ftrace_cpudump_cmd = { |
714 | .name = "cpudump", | |
715 | .func = ftrace_cpudump_callback, | |
716 | }; | |
717 | ||
23b4ff3a SR |
718 | static int __init init_func_cmd_traceon(void) |
719 | { | |
720 | int ret; | |
721 | ||
722 | ret = register_ftrace_command(&ftrace_traceoff_cmd); | |
723 | if (ret) | |
724 | return ret; | |
725 | ||
726 | ret = register_ftrace_command(&ftrace_traceon_cmd); | |
727 | if (ret) | |
ad71d889 | 728 | goto out_free_traceoff; |
dd42cd3e SRRH |
729 | |
730 | ret = register_ftrace_command(&ftrace_stacktrace_cmd); | |
ad71d889 SRRH |
731 | if (ret) |
732 | goto out_free_traceon; | |
733 | ||
734 | ret = register_ftrace_command(&ftrace_dump_cmd); | |
735 | if (ret) | |
736 | goto out_free_stacktrace; | |
737 | ||
90e3c03c SRRH |
738 | ret = register_ftrace_command(&ftrace_cpudump_cmd); |
739 | if (ret) | |
740 | goto out_free_dump; | |
741 | ||
ad71d889 SRRH |
742 | return 0; |
743 | ||
90e3c03c SRRH |
744 | out_free_dump: |
745 | unregister_ftrace_command(&ftrace_dump_cmd); | |
ad71d889 SRRH |
746 | out_free_stacktrace: |
747 | unregister_ftrace_command(&ftrace_stacktrace_cmd); | |
748 | out_free_traceon: | |
749 | unregister_ftrace_command(&ftrace_traceon_cmd); | |
750 | out_free_traceoff: | |
751 | unregister_ftrace_command(&ftrace_traceoff_cmd); | |
752 | ||
23b4ff3a SR |
753 | return ret; |
754 | } | |
755 | #else | |
756 | static inline int init_func_cmd_traceon(void) | |
757 | { | |
758 | return 0; | |
759 | } | |
760 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
761 | ||
dbeafd0d | 762 | __init int init_function_trace(void) |
1b29b018 | 763 | { |
23b4ff3a | 764 | init_func_cmd_traceon(); |
1b29b018 SR |
765 | return register_tracer(&function_trace); |
766 | } |