]>
Commit | Line | Data |
---|---|---|
fb52607a FW |
1 | /* |
2 | * | |
3 | * Function graph tracer. | |
9005f3eb | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
fb52607a FW |
5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | |
7 | * | |
8 | */ | |
fb52607a FW |
9 | #include <linux/uaccess.h> |
10 | #include <linux/ftrace.h> | |
be7635e7 | 11 | #include <linux/interrupt.h> |
5a0e3ad6 | 12 | #include <linux/slab.h> |
fb52607a FW |
13 | #include <linux/fs.h> |
14 | ||
15 | #include "trace.h" | |
f0868d1e | 16 | #include "trace_output.h" |
fb52607a | 17 | |
1b2f121c SRRH |
18 | static bool kill_ftrace_graph; |
19 | ||
20 | /** | |
21 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called | |
22 | * | |
23 | * ftrace_graph_stop() is called when a severe error is detected in | |
24 | * the function graph tracing. This function is called by the critical | |
25 | * paths of function graph to keep those paths from doing any more harm. | |
26 | */ | |
27 | bool ftrace_graph_is_dead(void) | |
28 | { | |
29 | return kill_ftrace_graph; | |
30 | } | |
31 | ||
32 | /** | |
33 | * ftrace_graph_stop - set to permanently disable function graph tracincg | |
34 | * | |
35 | * In case of an error int function graph tracing, this is called | |
36 | * to try to keep function graph tracing from causing any more harm. | |
37 | * Usually this is pretty severe and this is called to try to at least | |
38 | * get a warning out to the user. | |
39 | */ | |
40 | void ftrace_graph_stop(void) | |
41 | { | |
42 | kill_ftrace_graph = true; | |
1b2f121c SRRH |
43 | } |
44 | ||
b304d044 SR |
45 | /* When set, irq functions will be ignored */ |
46 | static int ftrace_graph_skip_irqs; | |
47 | ||
be1eca39 | 48 | struct fgraph_cpu_data { |
2fbcdb35 SR |
49 | pid_t last_pid; |
50 | int depth; | |
2bd16212 | 51 | int depth_irq; |
be1eca39 | 52 | int ignore; |
f1c7f517 | 53 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
be1eca39 JO |
54 | }; |
55 | ||
56 | struct fgraph_data { | |
6016ee13 | 57 | struct fgraph_cpu_data __percpu *cpu_data; |
be1eca39 JO |
58 | |
59 | /* Place to preserve last processed entry. */ | |
60 | struct ftrace_graph_ent_entry ent; | |
61 | struct ftrace_graph_ret_entry ret; | |
62 | int failed; | |
63 | int cpu; | |
2fbcdb35 SR |
64 | }; |
65 | ||
287b6e68 | 66 | #define TRACE_GRAPH_INDENT 2 |
fb52607a | 67 | |
8741db53 SR |
68 | static unsigned int max_depth; |
69 | ||
fb52607a | 70 | static struct tracer_opt trace_opts[] = { |
9005f3eb | 71 | /* Display overruns? (for self-debug purpose) */ |
1a056155 FW |
72 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
73 | /* Display CPU ? */ | |
74 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | |
75 | /* Display Overhead ? */ | |
76 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | |
11e84acc FW |
77 | /* Display proc name/pid */ |
78 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | |
9005f3eb FW |
79 | /* Display duration of execution */ |
80 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | |
81 | /* Display absolute time of an entry */ | |
82 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | |
2bd16212 JO |
83 | /* Display interrupts */ |
84 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | |
607e3a29 RE |
85 | /* Display function name after trailing } */ |
86 | { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, | |
55577204 SRRH |
87 | /* Include sleep time (scheduled out) between entry and return */ |
88 | { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, | |
89 | /* Include time within nested functions */ | |
90 | { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, | |
fb52607a FW |
91 | { } /* Empty entry */ |
92 | }; | |
93 | ||
94 | static struct tracer_flags tracer_flags = { | |
607e3a29 | 95 | /* Don't display overruns, proc, or tail by default */ |
9005f3eb | 96 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
55577204 SRRH |
97 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | |
98 | TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, | |
fb52607a FW |
99 | .opts = trace_opts |
100 | }; | |
101 | ||
1a0799a8 | 102 | static struct trace_array *graph_array; |
9005f3eb | 103 | |
ffeb80fc JO |
104 | /* |
105 | * DURATION column is being also used to display IRQ signs, | |
106 | * following values are used by print_graph_irq and others | |
107 | * to fill in space into DURATION column. | |
108 | */ | |
109 | enum { | |
6fc84ea7 SRRH |
110 | FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
111 | FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
112 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
ffeb80fc JO |
113 | }; |
114 | ||
9d9add34 | 115 | static void |
983f938a SRRH |
116 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
117 | struct trace_seq *s, u32 flags); | |
fb52607a | 118 | |
712406a6 SR |
119 | /* Add a function return address to the trace stack on thread info.*/ |
120 | int | |
71e308a2 | 121 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
9a7c348b | 122 | unsigned long frame_pointer, unsigned long *retp) |
712406a6 | 123 | { |
5d1a03dc | 124 | unsigned long long calltime; |
712406a6 SR |
125 | int index; |
126 | ||
1b2f121c SRRH |
127 | if (unlikely(ftrace_graph_is_dead())) |
128 | return -EBUSY; | |
129 | ||
712406a6 SR |
130 | if (!current->ret_stack) |
131 | return -EBUSY; | |
132 | ||
82310a32 SR |
133 | /* |
134 | * We must make sure the ret_stack is tested before we read | |
135 | * anything else. | |
136 | */ | |
137 | smp_rmb(); | |
138 | ||
712406a6 SR |
139 | /* The return trace stack is full */ |
140 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
141 | atomic_inc(¤t->trace_overrun); | |
142 | return -EBUSY; | |
143 | } | |
144 | ||
29ad23b0 NK |
145 | /* |
146 | * The curr_ret_stack is an index to ftrace return stack of | |
147 | * current task. Its value should be in [0, FTRACE_RETFUNC_ | |
148 | * DEPTH) when the function graph tracer is used. To support | |
149 | * filtering out specific functions, it makes the index | |
150 | * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) | |
151 | * so when it sees a negative index the ftrace will ignore | |
152 | * the record. And the index gets recovered when returning | |
153 | * from the filtered function by adding the FTRACE_NOTRACE_ | |
154 | * DEPTH and then it'll continue to record functions normally. | |
155 | * | |
156 | * The curr_ret_stack is initialized to -1 and get increased | |
157 | * in this function. So it can be less than -1 only if it was | |
158 | * filtered out via ftrace_graph_notrace_addr() which can be | |
8434dc93 | 159 | * set from set_graph_notrace file in tracefs by user. |
29ad23b0 NK |
160 | */ |
161 | if (current->curr_ret_stack < -1) | |
162 | return -EBUSY; | |
163 | ||
5d1a03dc SR |
164 | calltime = trace_clock_local(); |
165 | ||
712406a6 | 166 | index = ++current->curr_ret_stack; |
29ad23b0 NK |
167 | if (ftrace_graph_notrace_addr(func)) |
168 | current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; | |
712406a6 SR |
169 | barrier(); |
170 | current->ret_stack[index].ret = ret; | |
171 | current->ret_stack[index].func = func; | |
5d1a03dc | 172 | current->ret_stack[index].calltime = calltime; |
a2a16d6a | 173 | current->ret_stack[index].subtime = 0; |
daa460a8 | 174 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 | 175 | current->ret_stack[index].fp = frame_pointer; |
9a7c348b JP |
176 | #endif |
177 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
178 | current->ret_stack[index].retp = retp; | |
daa460a8 | 179 | #endif |
29ad23b0 | 180 | *depth = current->curr_ret_stack; |
712406a6 SR |
181 | |
182 | return 0; | |
183 | } | |
184 | ||
185 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
a2a16d6a | 186 | static void |
71e308a2 SR |
187 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
188 | unsigned long frame_pointer) | |
712406a6 SR |
189 | { |
190 | int index; | |
191 | ||
192 | index = current->curr_ret_stack; | |
193 | ||
29ad23b0 NK |
194 | /* |
195 | * A negative index here means that it's just returned from a | |
196 | * notrace'd function. Recover index to get an original | |
197 | * return address. See ftrace_push_return_trace(). | |
198 | * | |
199 | * TODO: Need to check whether the stack gets corrupted. | |
200 | */ | |
201 | if (index < 0) | |
202 | index += FTRACE_NOTRACE_DEPTH; | |
203 | ||
204 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { | |
712406a6 SR |
205 | ftrace_graph_stop(); |
206 | WARN_ON(1); | |
207 | /* Might as well panic, otherwise we have no where to go */ | |
208 | *ret = (unsigned long)panic; | |
209 | return; | |
210 | } | |
211 | ||
e4a744ef | 212 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 SR |
213 | /* |
214 | * The arch may choose to record the frame pointer used | |
215 | * and check it here to make sure that it is what we expect it | |
216 | * to be. If gcc does not set the place holder of the return | |
217 | * address in the frame pointer, and does a copy instead, then | |
218 | * the function graph trace will fail. This test detects this | |
219 | * case. | |
220 | * | |
221 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
222 | * gcc do the above. | |
781d0624 SR |
223 | * |
224 | * Note, -mfentry does not use frame pointers, and this test | |
225 | * is not needed if CC_USING_FENTRY is set. | |
71e308a2 SR |
226 | */ |
227 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
228 | ftrace_graph_stop(); | |
229 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
b375a11a | 230 | " from func %ps return to %lx\n", |
71e308a2 SR |
231 | current->ret_stack[index].fp, |
232 | frame_pointer, | |
233 | (void *)current->ret_stack[index].func, | |
234 | current->ret_stack[index].ret); | |
235 | *ret = (unsigned long)panic; | |
236 | return; | |
237 | } | |
238 | #endif | |
239 | ||
712406a6 SR |
240 | *ret = current->ret_stack[index].ret; |
241 | trace->func = current->ret_stack[index].func; | |
242 | trace->calltime = current->ret_stack[index].calltime; | |
243 | trace->overrun = atomic_read(¤t->trace_overrun); | |
244 | trace->depth = index; | |
712406a6 SR |
245 | } |
246 | ||
247 | /* | |
248 | * Send the trace to the ring-buffer. | |
249 | * @return the original return address. | |
250 | */ | |
71e308a2 | 251 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
712406a6 SR |
252 | { |
253 | struct ftrace_graph_ret trace; | |
254 | unsigned long ret; | |
255 | ||
71e308a2 | 256 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
0012693a | 257 | trace.rettime = trace_clock_local(); |
a2a16d6a SR |
258 | barrier(); |
259 | current->curr_ret_stack--; | |
29ad23b0 NK |
260 | /* |
261 | * The curr_ret_stack can be less than -1 only if it was | |
262 | * filtered out and it's about to return from the function. | |
263 | * Recover the index and continue to trace normal functions. | |
264 | */ | |
265 | if (current->curr_ret_stack < -1) { | |
266 | current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; | |
267 | return ret; | |
268 | } | |
712406a6 | 269 | |
03274a3f SRRH |
270 | /* |
271 | * The trace should run after decrementing the ret counter | |
272 | * in case an interrupt were to come in. We don't want to | |
273 | * lose the interrupt if max_depth is set. | |
274 | */ | |
275 | ftrace_graph_return(&trace); | |
276 | ||
712406a6 SR |
277 | if (unlikely(!ret)) { |
278 | ftrace_graph_stop(); | |
279 | WARN_ON(1); | |
280 | /* Might as well panic. What else to do? */ | |
281 | ret = (unsigned long)panic; | |
282 | } | |
283 | ||
284 | return ret; | |
285 | } | |
286 | ||
62b915f1 | 287 | int __trace_graph_entry(struct trace_array *tr, |
1a0799a8 FW |
288 | struct ftrace_graph_ent *trace, |
289 | unsigned long flags, | |
290 | int pc) | |
291 | { | |
2425bcb9 | 292 | struct trace_event_call *call = &event_funcgraph_entry; |
1a0799a8 | 293 | struct ring_buffer_event *event; |
12883efb | 294 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
295 | struct ftrace_graph_ent_entry *entry; |
296 | ||
e77405ad | 297 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
1a0799a8 FW |
298 | sizeof(*entry), flags, pc); |
299 | if (!event) | |
300 | return 0; | |
301 | entry = ring_buffer_event_data(event); | |
302 | entry->graph_ent = *trace; | |
f306cc82 | 303 | if (!call_filter_check_discard(call, entry, buffer, event)) |
7ffbd48d | 304 | __buffer_unlock_commit(buffer, event); |
1a0799a8 FW |
305 | |
306 | return 1; | |
307 | } | |
308 | ||
b304d044 SR |
309 | static inline int ftrace_graph_ignore_irqs(void) |
310 | { | |
e4a3f541 | 311 | if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) |
b304d044 SR |
312 | return 0; |
313 | ||
314 | return in_irq(); | |
315 | } | |
316 | ||
1a0799a8 FW |
317 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
318 | { | |
319 | struct trace_array *tr = graph_array; | |
320 | struct trace_array_cpu *data; | |
321 | unsigned long flags; | |
322 | long disabled; | |
323 | int ret; | |
324 | int cpu; | |
325 | int pc; | |
326 | ||
345ddcc8 | 327 | if (!ftrace_trace_task(tr)) |
1a0799a8 FW |
328 | return 0; |
329 | ||
ea2c68a0 | 330 | /* trace it when it is-nested-in or is a function enabled. */ |
8741db53 | 331 | if ((!(trace->depth || ftrace_graph_addr(trace->func)) || |
29ad23b0 | 332 | ftrace_graph_ignore_irqs()) || (trace->depth < 0) || |
8741db53 | 333 | (max_depth && trace->depth >= max_depth)) |
1a0799a8 FW |
334 | return 0; |
335 | ||
29ad23b0 NK |
336 | /* |
337 | * Do not trace a function if it's filtered by set_graph_notrace. | |
338 | * Make the index of ret stack negative to indicate that it should | |
339 | * ignore further functions. But it needs its own ret stack entry | |
340 | * to recover the original index in order to continue tracing after | |
341 | * returning from the function. | |
342 | */ | |
343 | if (ftrace_graph_notrace_addr(trace->func)) | |
344 | return 1; | |
345 | ||
7fa8b717 JF |
346 | /* |
347 | * Stop here if tracing_threshold is set. We only write function return | |
348 | * events to the ring buffer. | |
349 | */ | |
350 | if (tracing_thresh) | |
351 | return 1; | |
352 | ||
1a0799a8 FW |
353 | local_irq_save(flags); |
354 | cpu = raw_smp_processor_id(); | |
12883efb | 355 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
356 | disabled = atomic_inc_return(&data->disabled); |
357 | if (likely(disabled == 1)) { | |
358 | pc = preempt_count(); | |
359 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
360 | } else { | |
361 | ret = 0; | |
362 | } | |
1a0799a8 FW |
363 | |
364 | atomic_dec(&data->disabled); | |
365 | local_irq_restore(flags); | |
366 | ||
367 | return ret; | |
368 | } | |
369 | ||
0a772620 JO |
370 | static void |
371 | __trace_graph_function(struct trace_array *tr, | |
372 | unsigned long ip, unsigned long flags, int pc) | |
373 | { | |
374 | u64 time = trace_clock_local(); | |
375 | struct ftrace_graph_ent ent = { | |
376 | .func = ip, | |
377 | .depth = 0, | |
378 | }; | |
379 | struct ftrace_graph_ret ret = { | |
380 | .func = ip, | |
381 | .depth = 0, | |
382 | .calltime = time, | |
383 | .rettime = time, | |
384 | }; | |
385 | ||
386 | __trace_graph_entry(tr, &ent, flags, pc); | |
387 | __trace_graph_return(tr, &ret, flags, pc); | |
388 | } | |
389 | ||
390 | void | |
391 | trace_graph_function(struct trace_array *tr, | |
392 | unsigned long ip, unsigned long parent_ip, | |
393 | unsigned long flags, int pc) | |
394 | { | |
0a772620 JO |
395 | __trace_graph_function(tr, ip, flags, pc); |
396 | } | |
397 | ||
62b915f1 | 398 | void __trace_graph_return(struct trace_array *tr, |
1a0799a8 FW |
399 | struct ftrace_graph_ret *trace, |
400 | unsigned long flags, | |
401 | int pc) | |
402 | { | |
2425bcb9 | 403 | struct trace_event_call *call = &event_funcgraph_exit; |
1a0799a8 | 404 | struct ring_buffer_event *event; |
12883efb | 405 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
406 | struct ftrace_graph_ret_entry *entry; |
407 | ||
e77405ad | 408 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
1a0799a8 FW |
409 | sizeof(*entry), flags, pc); |
410 | if (!event) | |
411 | return; | |
412 | entry = ring_buffer_event_data(event); | |
413 | entry->ret = *trace; | |
f306cc82 | 414 | if (!call_filter_check_discard(call, entry, buffer, event)) |
7ffbd48d | 415 | __buffer_unlock_commit(buffer, event); |
1a0799a8 FW |
416 | } |
417 | ||
418 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
419 | { | |
420 | struct trace_array *tr = graph_array; | |
421 | struct trace_array_cpu *data; | |
422 | unsigned long flags; | |
423 | long disabled; | |
424 | int cpu; | |
425 | int pc; | |
426 | ||
427 | local_irq_save(flags); | |
428 | cpu = raw_smp_processor_id(); | |
12883efb | 429 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
430 | disabled = atomic_inc_return(&data->disabled); |
431 | if (likely(disabled == 1)) { | |
432 | pc = preempt_count(); | |
433 | __trace_graph_return(tr, trace, flags, pc); | |
434 | } | |
1a0799a8 FW |
435 | atomic_dec(&data->disabled); |
436 | local_irq_restore(flags); | |
437 | } | |
438 | ||
24a53652 FW |
439 | void set_graph_array(struct trace_array *tr) |
440 | { | |
441 | graph_array = tr; | |
442 | ||
443 | /* Make graph_array visible before we start tracing */ | |
444 | ||
445 | smp_mb(); | |
446 | } | |
447 | ||
ba1afef6 | 448 | static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
0e950173 TB |
449 | { |
450 | if (tracing_thresh && | |
451 | (trace->rettime - trace->calltime < tracing_thresh)) | |
452 | return; | |
453 | else | |
454 | trace_graph_return(trace); | |
455 | } | |
456 | ||
fb52607a FW |
457 | static int graph_trace_init(struct trace_array *tr) |
458 | { | |
1a0799a8 FW |
459 | int ret; |
460 | ||
24a53652 | 461 | set_graph_array(tr); |
0e950173 TB |
462 | if (tracing_thresh) |
463 | ret = register_ftrace_graph(&trace_graph_thresh_return, | |
7fa8b717 | 464 | &trace_graph_entry); |
0e950173 TB |
465 | else |
466 | ret = register_ftrace_graph(&trace_graph_return, | |
467 | &trace_graph_entry); | |
660c7f9b SR |
468 | if (ret) |
469 | return ret; | |
470 | tracing_start_cmdline_record(); | |
471 | ||
472 | return 0; | |
fb52607a FW |
473 | } |
474 | ||
475 | static void graph_trace_reset(struct trace_array *tr) | |
476 | { | |
660c7f9b SR |
477 | tracing_stop_cmdline_record(); |
478 | unregister_ftrace_graph(); | |
fb52607a FW |
479 | } |
480 | ||
ba1afef6 | 481 | static int graph_trace_update_thresh(struct trace_array *tr) |
6508fa76 SF |
482 | { |
483 | graph_trace_reset(tr); | |
484 | return graph_trace_init(tr); | |
485 | } | |
486 | ||
0c9e6f63 | 487 | static int max_bytes_for_cpu; |
1a056155 | 488 | |
9d9add34 | 489 | static void print_graph_cpu(struct trace_seq *s, int cpu) |
1a056155 | 490 | { |
d51090b3 IM |
491 | /* |
492 | * Start with a space character - to make it stand out | |
493 | * to the right a bit when trace output is pasted into | |
494 | * email: | |
495 | */ | |
9d9add34 | 496 | trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
1a056155 FW |
497 | } |
498 | ||
11e84acc FW |
499 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
500 | ||
9d9add34 | 501 | static void print_graph_proc(struct trace_seq *s, pid_t pid) |
11e84acc | 502 | { |
4ca53085 | 503 | char comm[TASK_COMM_LEN]; |
11e84acc FW |
504 | /* sign + log10(MAX_INT) + '\0' */ |
505 | char pid_str[11]; | |
4ca53085 | 506 | int spaces = 0; |
4ca53085 SR |
507 | int len; |
508 | int i; | |
11e84acc | 509 | |
4ca53085 | 510 | trace_find_cmdline(pid, comm); |
11e84acc FW |
511 | comm[7] = '\0'; |
512 | sprintf(pid_str, "%d", pid); | |
513 | ||
514 | /* 1 stands for the "-" character */ | |
515 | len = strlen(comm) + strlen(pid_str) + 1; | |
516 | ||
517 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | |
518 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | |
519 | ||
520 | /* First spaces to align center */ | |
9d9add34 SRRH |
521 | for (i = 0; i < spaces / 2; i++) |
522 | trace_seq_putc(s, ' '); | |
11e84acc | 523 | |
9d9add34 | 524 | trace_seq_printf(s, "%s-%s", comm, pid_str); |
11e84acc FW |
525 | |
526 | /* Last spaces to align center */ | |
9d9add34 SRRH |
527 | for (i = 0; i < spaces - (spaces / 2); i++) |
528 | trace_seq_putc(s, ' '); | |
11e84acc FW |
529 | } |
530 | ||
1a056155 | 531 | |
9d9add34 | 532 | static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
49ff5903 | 533 | { |
9d9add34 SRRH |
534 | trace_seq_putc(s, ' '); |
535 | trace_print_lat_fmt(s, entry); | |
49ff5903 SR |
536 | } |
537 | ||
287b6e68 | 538 | /* If the pid changed since the last trace, output this event */ |
9d9add34 | 539 | static void |
2fbcdb35 | 540 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
287b6e68 | 541 | { |
d51090b3 | 542 | pid_t prev_pid; |
9005f3eb | 543 | pid_t *last_pid; |
660c7f9b | 544 | |
2fbcdb35 | 545 | if (!data) |
9d9add34 | 546 | return; |
9005f3eb | 547 | |
be1eca39 | 548 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
9005f3eb FW |
549 | |
550 | if (*last_pid == pid) | |
9d9add34 | 551 | return; |
fb52607a | 552 | |
9005f3eb FW |
553 | prev_pid = *last_pid; |
554 | *last_pid = pid; | |
d51090b3 | 555 | |
9005f3eb | 556 | if (prev_pid == -1) |
9d9add34 | 557 | return; |
d51090b3 IM |
558 | /* |
559 | * Context-switch trace line: | |
560 | ||
561 | ------------------------------------------ | |
562 | | 1) migration/0--1 => sshd-1755 | |
563 | ------------------------------------------ | |
564 | ||
565 | */ | |
9d9add34 SRRH |
566 | trace_seq_puts(s, " ------------------------------------------\n"); |
567 | print_graph_cpu(s, cpu); | |
568 | print_graph_proc(s, prev_pid); | |
569 | trace_seq_puts(s, " => "); | |
570 | print_graph_proc(s, pid); | |
571 | trace_seq_puts(s, "\n ------------------------------------------\n\n"); | |
287b6e68 FW |
572 | } |
573 | ||
b91facc3 FW |
574 | static struct ftrace_graph_ret_entry * |
575 | get_return_for_leaf(struct trace_iterator *iter, | |
83a8df61 FW |
576 | struct ftrace_graph_ent_entry *curr) |
577 | { | |
be1eca39 JO |
578 | struct fgraph_data *data = iter->private; |
579 | struct ring_buffer_iter *ring_iter = NULL; | |
83a8df61 FW |
580 | struct ring_buffer_event *event; |
581 | struct ftrace_graph_ret_entry *next; | |
582 | ||
be1eca39 JO |
583 | /* |
584 | * If the previous output failed to write to the seq buffer, | |
585 | * then we just reuse the data from before. | |
586 | */ | |
587 | if (data && data->failed) { | |
588 | curr = &data->ent; | |
589 | next = &data->ret; | |
590 | } else { | |
83a8df61 | 591 | |
6d158a81 | 592 | ring_iter = trace_buffer_iter(iter, iter->cpu); |
be1eca39 JO |
593 | |
594 | /* First peek to compare current entry and the next one */ | |
595 | if (ring_iter) | |
596 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
597 | else { | |
598 | /* | |
599 | * We need to consume the current entry to see | |
600 | * the next one. | |
601 | */ | |
12883efb | 602 | ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 603 | NULL, NULL); |
12883efb | 604 | event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 605 | NULL, NULL); |
be1eca39 | 606 | } |
83a8df61 | 607 | |
be1eca39 JO |
608 | if (!event) |
609 | return NULL; | |
610 | ||
611 | next = ring_buffer_event_data(event); | |
83a8df61 | 612 | |
be1eca39 JO |
613 | if (data) { |
614 | /* | |
615 | * Save current and next entries for later reference | |
616 | * if the output fails. | |
617 | */ | |
618 | data->ent = *curr; | |
575570f0 SL |
619 | /* |
620 | * If the next event is not a return type, then | |
621 | * we only care about what type it is. Otherwise we can | |
622 | * safely copy the entire event. | |
623 | */ | |
624 | if (next->ent.type == TRACE_GRAPH_RET) | |
625 | data->ret = *next; | |
626 | else | |
627 | data->ret.ent.type = next->ent.type; | |
be1eca39 JO |
628 | } |
629 | } | |
83a8df61 FW |
630 | |
631 | if (next->ent.type != TRACE_GRAPH_RET) | |
b91facc3 | 632 | return NULL; |
83a8df61 FW |
633 | |
634 | if (curr->ent.pid != next->ent.pid || | |
635 | curr->graph_ent.func != next->ret.func) | |
b91facc3 | 636 | return NULL; |
83a8df61 | 637 | |
b91facc3 FW |
638 | /* this is a leaf, now advance the iterator */ |
639 | if (ring_iter) | |
640 | ring_buffer_read(ring_iter, NULL); | |
641 | ||
642 | return next; | |
83a8df61 FW |
643 | } |
644 | ||
9d9add34 | 645 | static void print_graph_abs_time(u64 t, struct trace_seq *s) |
d1f9cbd7 FW |
646 | { |
647 | unsigned long usecs_rem; | |
648 | ||
649 | usecs_rem = do_div(t, NSEC_PER_SEC); | |
650 | usecs_rem /= 1000; | |
651 | ||
9d9add34 SRRH |
652 | trace_seq_printf(s, "%5lu.%06lu | ", |
653 | (unsigned long)t, usecs_rem); | |
d1f9cbd7 FW |
654 | } |
655 | ||
9d9add34 | 656 | static void |
d1f9cbd7 | 657 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
d7a8d9e9 | 658 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
f8b755ac | 659 | { |
983f938a | 660 | struct trace_array *tr = iter->tr; |
d1f9cbd7 | 661 | struct trace_seq *s = &iter->seq; |
678f845e | 662 | struct trace_entry *ent = iter->ent; |
f8b755ac FW |
663 | |
664 | if (addr < (unsigned long)__irqentry_text_start || | |
665 | addr >= (unsigned long)__irqentry_text_end) | |
9d9add34 | 666 | return; |
f8b755ac | 667 | |
983f938a | 668 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { |
749230b0 | 669 | /* Absolute time */ |
9d9add34 SRRH |
670 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
671 | print_graph_abs_time(iter->ts, s); | |
d1f9cbd7 | 672 | |
749230b0 | 673 | /* Cpu */ |
9d9add34 SRRH |
674 | if (flags & TRACE_GRAPH_PRINT_CPU) |
675 | print_graph_cpu(s, cpu); | |
49ff5903 | 676 | |
749230b0 JO |
677 | /* Proc */ |
678 | if (flags & TRACE_GRAPH_PRINT_PROC) { | |
9d9add34 SRRH |
679 | print_graph_proc(s, pid); |
680 | trace_seq_puts(s, " | "); | |
749230b0 | 681 | } |
678f845e DBO |
682 | |
683 | /* Latency format */ | |
983f938a | 684 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 685 | print_graph_lat_fmt(s, ent); |
9005f3eb | 686 | } |
f8b755ac | 687 | |
9005f3eb | 688 | /* No overhead */ |
983f938a | 689 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); |
f8b755ac | 690 | |
9005f3eb | 691 | if (type == TRACE_GRAPH_ENT) |
9d9add34 | 692 | trace_seq_puts(s, "==========>"); |
9005f3eb | 693 | else |
9d9add34 | 694 | trace_seq_puts(s, "<=========="); |
9005f3eb | 695 | |
983f938a | 696 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); |
9d9add34 | 697 | trace_seq_putc(s, '\n'); |
f8b755ac | 698 | } |
83a8df61 | 699 | |
9d9add34 | 700 | void |
0706f1c4 | 701 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
83a8df61 FW |
702 | { |
703 | unsigned long nsecs_rem = do_div(duration, 1000); | |
166d3c79 | 704 | /* log10(ULONG_MAX) + '\0' */ |
4526d067 | 705 | char usecs_str[21]; |
166d3c79 | 706 | char nsecs_str[5]; |
9d9add34 | 707 | int len; |
166d3c79 FW |
708 | int i; |
709 | ||
4526d067 | 710 | sprintf(usecs_str, "%lu", (unsigned long) duration); |
166d3c79 FW |
711 | |
712 | /* Print msecs */ | |
9d9add34 | 713 | trace_seq_printf(s, "%s", usecs_str); |
166d3c79 | 714 | |
4526d067 | 715 | len = strlen(usecs_str); |
166d3c79 FW |
716 | |
717 | /* Print nsecs (we don't want to exceed 7 numbers) */ | |
718 | if (len < 7) { | |
14cae9bd BP |
719 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
720 | ||
721 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | |
9d9add34 | 722 | trace_seq_printf(s, ".%s", nsecs_str); |
82c355e8 | 723 | len += strlen(nsecs_str) + 1; |
166d3c79 FW |
724 | } |
725 | ||
9d9add34 | 726 | trace_seq_puts(s, " us "); |
166d3c79 FW |
727 | |
728 | /* Print remaining spaces to fit the row's width */ | |
82c355e8 | 729 | for (i = len; i < 8; i++) |
9d9add34 | 730 | trace_seq_putc(s, ' '); |
0706f1c4 SR |
731 | } |
732 | ||
9d9add34 | 733 | static void |
983f938a SRRH |
734 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
735 | struct trace_seq *s, u32 flags) | |
0706f1c4 | 736 | { |
749230b0 | 737 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
983f938a | 738 | !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 739 | return; |
ffeb80fc JO |
740 | |
741 | /* No real adata, just filling the column with spaces */ | |
6fc84ea7 SRRH |
742 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { |
743 | case FLAGS_FILL_FULL: | |
9d9add34 SRRH |
744 | trace_seq_puts(s, " | "); |
745 | return; | |
6fc84ea7 | 746 | case FLAGS_FILL_START: |
9d9add34 SRRH |
747 | trace_seq_puts(s, " "); |
748 | return; | |
6fc84ea7 | 749 | case FLAGS_FILL_END: |
9d9add34 SRRH |
750 | trace_seq_puts(s, " |"); |
751 | return; | |
ffeb80fc JO |
752 | } |
753 | ||
754 | /* Signal a overhead of time execution to the output */ | |
8e1e1df2 BP |
755 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) |
756 | trace_seq_printf(s, "%c ", trace_find_mark(duration)); | |
757 | else | |
9d9add34 | 758 | trace_seq_puts(s, " "); |
0706f1c4 | 759 | |
9d9add34 SRRH |
760 | trace_print_graph_duration(duration, s); |
761 | trace_seq_puts(s, "| "); | |
83a8df61 FW |
762 | } |
763 | ||
83a8df61 | 764 | /* Case of a leaf function on its call entry */ |
287b6e68 | 765 | static enum print_line_t |
83a8df61 | 766 | print_graph_entry_leaf(struct trace_iterator *iter, |
b91facc3 | 767 | struct ftrace_graph_ent_entry *entry, |
d7a8d9e9 JO |
768 | struct ftrace_graph_ret_entry *ret_entry, |
769 | struct trace_seq *s, u32 flags) | |
fb52607a | 770 | { |
2fbcdb35 | 771 | struct fgraph_data *data = iter->private; |
983f938a | 772 | struct trace_array *tr = iter->tr; |
83a8df61 | 773 | struct ftrace_graph_ret *graph_ret; |
83a8df61 FW |
774 | struct ftrace_graph_ent *call; |
775 | unsigned long long duration; | |
1a056155 | 776 | int i; |
fb52607a | 777 | |
83a8df61 FW |
778 | graph_ret = &ret_entry->ret; |
779 | call = &entry->graph_ent; | |
780 | duration = graph_ret->rettime - graph_ret->calltime; | |
781 | ||
2fbcdb35 | 782 | if (data) { |
f1c7f517 | 783 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 784 | int cpu = iter->cpu; |
f1c7f517 SR |
785 | |
786 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 SR |
787 | |
788 | /* | |
789 | * Comments display at + 1 to depth. Since | |
790 | * this is a leaf function, keep the comments | |
791 | * equal to this depth. | |
792 | */ | |
f1c7f517 SR |
793 | cpu_data->depth = call->depth - 1; |
794 | ||
795 | /* No need to keep this function around for this depth */ | |
796 | if (call->depth < FTRACE_RETFUNC_DEPTH) | |
797 | cpu_data->enter_funcs[call->depth] = 0; | |
2fbcdb35 SR |
798 | } |
799 | ||
ffeb80fc | 800 | /* Overhead and duration */ |
983f938a | 801 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 802 | |
83a8df61 | 803 | /* Function */ |
9d9add34 SRRH |
804 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
805 | trace_seq_putc(s, ' '); | |
83a8df61 | 806 | |
9d9add34 | 807 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
83a8df61 | 808 | |
9d9add34 | 809 | return trace_handle_return(s); |
83a8df61 FW |
810 | } |
811 | ||
812 | static enum print_line_t | |
2fbcdb35 SR |
813 | print_graph_entry_nested(struct trace_iterator *iter, |
814 | struct ftrace_graph_ent_entry *entry, | |
d7a8d9e9 | 815 | struct trace_seq *s, int cpu, u32 flags) |
83a8df61 | 816 | { |
83a8df61 | 817 | struct ftrace_graph_ent *call = &entry->graph_ent; |
2fbcdb35 | 818 | struct fgraph_data *data = iter->private; |
983f938a | 819 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
820 | int i; |
821 | ||
822 | if (data) { | |
f1c7f517 | 823 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 824 | int cpu = iter->cpu; |
2fbcdb35 | 825 | |
f1c7f517 SR |
826 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
827 | cpu_data->depth = call->depth; | |
828 | ||
829 | /* Save this function pointer to see if the exit matches */ | |
830 | if (call->depth < FTRACE_RETFUNC_DEPTH) | |
831 | cpu_data->enter_funcs[call->depth] = call->func; | |
2fbcdb35 | 832 | } |
83a8df61 | 833 | |
9005f3eb | 834 | /* No time */ |
983f938a | 835 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
f8b755ac | 836 | |
83a8df61 | 837 | /* Function */ |
9d9add34 SRRH |
838 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
839 | trace_seq_putc(s, ' '); | |
840 | ||
841 | trace_seq_printf(s, "%ps() {\n", (void *)call->func); | |
287b6e68 | 842 | |
9d9add34 | 843 | if (trace_seq_has_overflowed(s)) |
83a8df61 FW |
844 | return TRACE_TYPE_PARTIAL_LINE; |
845 | ||
b91facc3 FW |
846 | /* |
847 | * we already consumed the current entry to check the next one | |
848 | * and see if this is a leaf. | |
849 | */ | |
850 | return TRACE_TYPE_NO_CONSUME; | |
287b6e68 FW |
851 | } |
852 | ||
9d9add34 | 853 | static void |
ac5f6c96 | 854 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
d7a8d9e9 | 855 | int type, unsigned long addr, u32 flags) |
83a8df61 | 856 | { |
2fbcdb35 | 857 | struct fgraph_data *data = iter->private; |
83a8df61 | 858 | struct trace_entry *ent = iter->ent; |
983f938a | 859 | struct trace_array *tr = iter->tr; |
ac5f6c96 | 860 | int cpu = iter->cpu; |
83a8df61 | 861 | |
1a056155 | 862 | /* Pid */ |
9d9add34 | 863 | verif_pid(s, ent->pid, cpu, data); |
9005f3eb | 864 | |
9d9add34 | 865 | if (type) |
ac5f6c96 | 866 | /* Interrupt */ |
9d9add34 | 867 | print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
83a8df61 | 868 | |
983f938a | 869 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 870 | return; |
749230b0 | 871 | |
9005f3eb | 872 | /* Absolute time */ |
9d9add34 SRRH |
873 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
874 | print_graph_abs_time(iter->ts, s); | |
9005f3eb | 875 | |
1a056155 | 876 | /* Cpu */ |
9d9add34 SRRH |
877 | if (flags & TRACE_GRAPH_PRINT_CPU) |
878 | print_graph_cpu(s, cpu); | |
11e84acc FW |
879 | |
880 | /* Proc */ | |
d7a8d9e9 | 881 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
9d9add34 SRRH |
882 | print_graph_proc(s, ent->pid); |
883 | trace_seq_puts(s, " | "); | |
1a056155 | 884 | } |
83a8df61 | 885 | |
49ff5903 | 886 | /* Latency format */ |
983f938a | 887 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 888 | print_graph_lat_fmt(s, ent); |
49ff5903 | 889 | |
9d9add34 | 890 | return; |
ac5f6c96 SR |
891 | } |
892 | ||
2bd16212 JO |
893 | /* |
894 | * Entry check for irq code | |
895 | * | |
896 | * returns 1 if | |
897 | * - we are inside irq code | |
25985edc | 898 | * - we just entered irq code |
2bd16212 JO |
899 | * |
900 | * retunns 0 if | |
901 | * - funcgraph-interrupts option is set | |
902 | * - we are not inside irq code | |
903 | */ | |
904 | static int | |
905 | check_irq_entry(struct trace_iterator *iter, u32 flags, | |
906 | unsigned long addr, int depth) | |
907 | { | |
908 | int cpu = iter->cpu; | |
a9d61173 | 909 | int *depth_irq; |
2bd16212 | 910 | struct fgraph_data *data = iter->private; |
2bd16212 | 911 | |
a9d61173 JO |
912 | /* |
913 | * If we are either displaying irqs, or we got called as | |
914 | * a graph event and private data does not exist, | |
915 | * then we bypass the irq check. | |
916 | */ | |
917 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
918 | (!data)) | |
2bd16212 JO |
919 | return 0; |
920 | ||
a9d61173 JO |
921 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
922 | ||
2bd16212 JO |
923 | /* |
924 | * We are inside the irq code | |
925 | */ | |
926 | if (*depth_irq >= 0) | |
927 | return 1; | |
928 | ||
929 | if ((addr < (unsigned long)__irqentry_text_start) || | |
930 | (addr >= (unsigned long)__irqentry_text_end)) | |
931 | return 0; | |
932 | ||
933 | /* | |
934 | * We are entering irq code. | |
935 | */ | |
936 | *depth_irq = depth; | |
937 | return 1; | |
938 | } | |
939 | ||
940 | /* | |
941 | * Return check for irq code | |
942 | * | |
943 | * returns 1 if | |
944 | * - we are inside irq code | |
945 | * - we just left irq code | |
946 | * | |
947 | * returns 0 if | |
948 | * - funcgraph-interrupts option is set | |
949 | * - we are not inside irq code | |
950 | */ | |
951 | static int | |
952 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | |
953 | { | |
954 | int cpu = iter->cpu; | |
a9d61173 | 955 | int *depth_irq; |
2bd16212 | 956 | struct fgraph_data *data = iter->private; |
2bd16212 | 957 | |
a9d61173 JO |
958 | /* |
959 | * If we are either displaying irqs, or we got called as | |
960 | * a graph event and private data does not exist, | |
961 | * then we bypass the irq check. | |
962 | */ | |
963 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
964 | (!data)) | |
2bd16212 JO |
965 | return 0; |
966 | ||
a9d61173 JO |
967 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
968 | ||
2bd16212 JO |
969 | /* |
970 | * We are not inside the irq code. | |
971 | */ | |
972 | if (*depth_irq == -1) | |
973 | return 0; | |
974 | ||
975 | /* | |
976 | * We are inside the irq code, and this is returning entry. | |
977 | * Let's not trace it and clear the entry depth, since | |
978 | * we are out of irq code. | |
979 | * | |
980 | * This condition ensures that we 'leave the irq code' once | |
981 | * we are out of the entry depth. Thus protecting us from | |
982 | * the RETURN entry loss. | |
983 | */ | |
984 | if (*depth_irq >= depth) { | |
985 | *depth_irq = -1; | |
986 | return 1; | |
987 | } | |
988 | ||
989 | /* | |
990 | * We are inside the irq code, and this is not the entry. | |
991 | */ | |
992 | return 1; | |
993 | } | |
994 | ||
ac5f6c96 SR |
995 | static enum print_line_t |
996 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |
d7a8d9e9 | 997 | struct trace_iterator *iter, u32 flags) |
ac5f6c96 | 998 | { |
be1eca39 | 999 | struct fgraph_data *data = iter->private; |
ac5f6c96 SR |
1000 | struct ftrace_graph_ent *call = &field->graph_ent; |
1001 | struct ftrace_graph_ret_entry *leaf_ret; | |
be1eca39 JO |
1002 | static enum print_line_t ret; |
1003 | int cpu = iter->cpu; | |
ac5f6c96 | 1004 | |
2bd16212 JO |
1005 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1006 | return TRACE_TYPE_HANDLED; | |
1007 | ||
9d9add34 | 1008 | print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); |
ac5f6c96 | 1009 | |
b91facc3 FW |
1010 | leaf_ret = get_return_for_leaf(iter, field); |
1011 | if (leaf_ret) | |
d7a8d9e9 | 1012 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
83a8df61 | 1013 | else |
d7a8d9e9 | 1014 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
83a8df61 | 1015 | |
be1eca39 JO |
1016 | if (data) { |
1017 | /* | |
1018 | * If we failed to write our output, then we need to make | |
1019 | * note of it. Because we already consumed our entry. | |
1020 | */ | |
1021 | if (s->full) { | |
1022 | data->failed = 1; | |
1023 | data->cpu = cpu; | |
1024 | } else | |
1025 | data->failed = 0; | |
1026 | } | |
1027 | ||
1028 | return ret; | |
83a8df61 FW |
1029 | } |
1030 | ||
287b6e68 FW |
1031 | static enum print_line_t |
1032 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |
d7a8d9e9 JO |
1033 | struct trace_entry *ent, struct trace_iterator *iter, |
1034 | u32 flags) | |
287b6e68 | 1035 | { |
83a8df61 | 1036 | unsigned long long duration = trace->rettime - trace->calltime; |
2fbcdb35 | 1037 | struct fgraph_data *data = iter->private; |
983f938a | 1038 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
1039 | pid_t pid = ent->pid; |
1040 | int cpu = iter->cpu; | |
f1c7f517 | 1041 | int func_match = 1; |
2fbcdb35 SR |
1042 | int i; |
1043 | ||
2bd16212 JO |
1044 | if (check_irq_return(iter, flags, trace->depth)) |
1045 | return TRACE_TYPE_HANDLED; | |
1046 | ||
2fbcdb35 | 1047 | if (data) { |
f1c7f517 SR |
1048 | struct fgraph_cpu_data *cpu_data; |
1049 | int cpu = iter->cpu; | |
1050 | ||
1051 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 SR |
1052 | |
1053 | /* | |
1054 | * Comments display at + 1 to depth. This is the | |
1055 | * return from a function, we now want the comments | |
1056 | * to display at the same level of the bracket. | |
1057 | */ | |
f1c7f517 SR |
1058 | cpu_data->depth = trace->depth - 1; |
1059 | ||
1060 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { | |
1061 | if (cpu_data->enter_funcs[trace->depth] != trace->func) | |
1062 | func_match = 0; | |
1063 | cpu_data->enter_funcs[trace->depth] = 0; | |
1064 | } | |
2fbcdb35 | 1065 | } |
287b6e68 | 1066 | |
9d9add34 | 1067 | print_graph_prologue(iter, s, 0, 0, flags); |
437f24fb | 1068 | |
ffeb80fc | 1069 | /* Overhead and duration */ |
983f938a | 1070 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 1071 | |
83a8df61 | 1072 | /* Closing brace */ |
9d9add34 SRRH |
1073 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) |
1074 | trace_seq_putc(s, ' '); | |
287b6e68 | 1075 | |
f1c7f517 SR |
1076 | /* |
1077 | * If the return function does not have a matching entry, | |
1078 | * then the entry was lost. Instead of just printing | |
1079 | * the '}' and letting the user guess what function this | |
607e3a29 RE |
1080 | * belongs to, write out the function name. Always do |
1081 | * that if the funcgraph-tail option is enabled. | |
f1c7f517 | 1082 | */ |
9d9add34 SRRH |
1083 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) |
1084 | trace_seq_puts(s, "}\n"); | |
1085 | else | |
1086 | trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | |
fb52607a | 1087 | |
83a8df61 | 1088 | /* Overrun */ |
9d9add34 SRRH |
1089 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) |
1090 | trace_seq_printf(s, " (Overruns: %lu)\n", | |
1091 | trace->overrun); | |
f8b755ac | 1092 | |
9d9add34 SRRH |
1093 | print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1094 | cpu, pid, flags); | |
f8b755ac | 1095 | |
9d9add34 | 1096 | return trace_handle_return(s); |
287b6e68 FW |
1097 | } |
1098 | ||
1fd8f2a3 | 1099 | static enum print_line_t |
d7a8d9e9 JO |
1100 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
1101 | struct trace_iterator *iter, u32 flags) | |
1fd8f2a3 | 1102 | { |
983f938a SRRH |
1103 | struct trace_array *tr = iter->tr; |
1104 | unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); | |
2fbcdb35 | 1105 | struct fgraph_data *data = iter->private; |
5087f8d2 | 1106 | struct trace_event *event; |
2fbcdb35 | 1107 | int depth = 0; |
1fd8f2a3 | 1108 | int ret; |
2fbcdb35 SR |
1109 | int i; |
1110 | ||
1111 | if (data) | |
be1eca39 | 1112 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
9005f3eb | 1113 | |
9d9add34 | 1114 | print_graph_prologue(iter, s, 0, 0, flags); |
d1f9cbd7 | 1115 | |
9005f3eb | 1116 | /* No time */ |
983f938a | 1117 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
1fd8f2a3 | 1118 | |
1fd8f2a3 | 1119 | /* Indentation */ |
2fbcdb35 | 1120 | if (depth > 0) |
9d9add34 SRRH |
1121 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) |
1122 | trace_seq_putc(s, ' '); | |
1fd8f2a3 FW |
1123 | |
1124 | /* The comment */ | |
9d9add34 | 1125 | trace_seq_puts(s, "/* "); |
769b0441 | 1126 | |
5087f8d2 SR |
1127 | switch (iter->ent->type) { |
1128 | case TRACE_BPRINT: | |
1129 | ret = trace_print_bprintk_msg_only(iter); | |
1130 | if (ret != TRACE_TYPE_HANDLED) | |
1131 | return ret; | |
1132 | break; | |
1133 | case TRACE_PRINT: | |
1134 | ret = trace_print_printk_msg_only(iter); | |
1135 | if (ret != TRACE_TYPE_HANDLED) | |
1136 | return ret; | |
1137 | break; | |
1138 | default: | |
1139 | event = ftrace_find_event(ent->type); | |
1140 | if (!event) | |
1141 | return TRACE_TYPE_UNHANDLED; | |
1142 | ||
a9a57763 | 1143 | ret = event->funcs->trace(iter, sym_flags, event); |
5087f8d2 SR |
1144 | if (ret != TRACE_TYPE_HANDLED) |
1145 | return ret; | |
1146 | } | |
1fd8f2a3 | 1147 | |
5ac48378 SRRH |
1148 | if (trace_seq_has_overflowed(s)) |
1149 | goto out; | |
1150 | ||
412d0bb5 | 1151 | /* Strip ending newline */ |
3a161d99 SRRH |
1152 | if (s->buffer[s->seq.len - 1] == '\n') { |
1153 | s->buffer[s->seq.len - 1] = '\0'; | |
1154 | s->seq.len--; | |
412d0bb5 FW |
1155 | } |
1156 | ||
9d9add34 | 1157 | trace_seq_puts(s, " */\n"); |
5ac48378 | 1158 | out: |
9d9add34 | 1159 | return trace_handle_return(s); |
1fd8f2a3 FW |
1160 | } |
1161 | ||
1162 | ||
287b6e68 | 1163 | enum print_line_t |
321e68b0 | 1164 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
287b6e68 | 1165 | { |
be1eca39 JO |
1166 | struct ftrace_graph_ent_entry *field; |
1167 | struct fgraph_data *data = iter->private; | |
287b6e68 | 1168 | struct trace_entry *entry = iter->ent; |
5087f8d2 | 1169 | struct trace_seq *s = &iter->seq; |
be1eca39 JO |
1170 | int cpu = iter->cpu; |
1171 | int ret; | |
1172 | ||
1173 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | |
1174 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | |
1175 | return TRACE_TYPE_HANDLED; | |
1176 | } | |
1177 | ||
1178 | /* | |
1179 | * If the last output failed, there's a possibility we need | |
1180 | * to print out the missing entry which would never go out. | |
1181 | */ | |
1182 | if (data && data->failed) { | |
1183 | field = &data->ent; | |
1184 | iter->cpu = data->cpu; | |
d7a8d9e9 | 1185 | ret = print_graph_entry(field, s, iter, flags); |
be1eca39 JO |
1186 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1187 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | |
1188 | ret = TRACE_TYPE_NO_CONSUME; | |
1189 | } | |
1190 | iter->cpu = cpu; | |
1191 | return ret; | |
1192 | } | |
fb52607a | 1193 | |
287b6e68 FW |
1194 | switch (entry->type) { |
1195 | case TRACE_GRAPH_ENT: { | |
38ceb592 LJ |
1196 | /* |
1197 | * print_graph_entry() may consume the current event, | |
1198 | * thus @field may become invalid, so we need to save it. | |
1199 | * sizeof(struct ftrace_graph_ent_entry) is very small, | |
1200 | * it can be safely saved at the stack. | |
1201 | */ | |
be1eca39 | 1202 | struct ftrace_graph_ent_entry saved; |
287b6e68 | 1203 | trace_assign_type(field, entry); |
38ceb592 | 1204 | saved = *field; |
d7a8d9e9 | 1205 | return print_graph_entry(&saved, s, iter, flags); |
287b6e68 FW |
1206 | } |
1207 | case TRACE_GRAPH_RET: { | |
1208 | struct ftrace_graph_ret_entry *field; | |
1209 | trace_assign_type(field, entry); | |
d7a8d9e9 | 1210 | return print_graph_return(&field->ret, s, entry, iter, flags); |
287b6e68 | 1211 | } |
62b915f1 JO |
1212 | case TRACE_STACK: |
1213 | case TRACE_FN: | |
1214 | /* dont trace stack and functions as comments */ | |
1215 | return TRACE_TYPE_UNHANDLED; | |
1216 | ||
287b6e68 | 1217 | default: |
d7a8d9e9 | 1218 | return print_graph_comment(s, entry, iter, flags); |
fb52607a | 1219 | } |
5087f8d2 SR |
1220 | |
1221 | return TRACE_TYPE_HANDLED; | |
fb52607a FW |
1222 | } |
1223 | ||
d7a8d9e9 JO |
1224 | static enum print_line_t |
1225 | print_graph_function(struct trace_iterator *iter) | |
1226 | { | |
321e68b0 | 1227 | return print_graph_function_flags(iter, tracer_flags.val); |
d7a8d9e9 JO |
1228 | } |
1229 | ||
9106b693 | 1230 | static enum print_line_t |
a9a57763 SR |
1231 | print_graph_function_event(struct trace_iterator *iter, int flags, |
1232 | struct trace_event *event) | |
9106b693 JO |
1233 | { |
1234 | return print_graph_function(iter); | |
1235 | } | |
1236 | ||
d7a8d9e9 | 1237 | static void print_lat_header(struct seq_file *s, u32 flags) |
49ff5903 SR |
1238 | { |
1239 | static const char spaces[] = " " /* 16 spaces */ | |
1240 | " " /* 4 spaces */ | |
1241 | " "; /* 17 spaces */ | |
1242 | int size = 0; | |
1243 | ||
d7a8d9e9 | 1244 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
49ff5903 | 1245 | size += 16; |
d7a8d9e9 | 1246 | if (flags & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1247 | size += 4; |
d7a8d9e9 | 1248 | if (flags & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1249 | size += 17; |
1250 | ||
1251 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | |
1252 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | |
1253 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | |
1254 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | |
199abfab | 1255 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
49ff5903 SR |
1256 | } |
1257 | ||
983f938a SRRH |
1258 | static void __print_graph_headers_flags(struct trace_array *tr, |
1259 | struct seq_file *s, u32 flags) | |
decbec38 | 1260 | { |
983f938a | 1261 | int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; |
49ff5903 SR |
1262 | |
1263 | if (lat) | |
d7a8d9e9 | 1264 | print_lat_header(s, flags); |
49ff5903 | 1265 | |
decbec38 | 1266 | /* 1st line */ |
1177e436 | 1267 | seq_putc(s, '#'); |
d7a8d9e9 | 1268 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1269 | seq_puts(s, " TIME "); |
d7a8d9e9 | 1270 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1271 | seq_puts(s, " CPU"); |
d7a8d9e9 | 1272 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1273 | seq_puts(s, " TASK/PID "); |
49ff5903 | 1274 | if (lat) |
fa6f0cc7 | 1275 | seq_puts(s, "||||"); |
d7a8d9e9 | 1276 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1277 | seq_puts(s, " DURATION "); |
1278 | seq_puts(s, " FUNCTION CALLS\n"); | |
decbec38 FW |
1279 | |
1280 | /* 2nd line */ | |
1177e436 | 1281 | seq_putc(s, '#'); |
d7a8d9e9 | 1282 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1283 | seq_puts(s, " | "); |
d7a8d9e9 | 1284 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1285 | seq_puts(s, " | "); |
d7a8d9e9 | 1286 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1287 | seq_puts(s, " | | "); |
49ff5903 | 1288 | if (lat) |
fa6f0cc7 | 1289 | seq_puts(s, "||||"); |
d7a8d9e9 | 1290 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1291 | seq_puts(s, " | | "); |
1292 | seq_puts(s, " | | | |\n"); | |
decbec38 | 1293 | } |
9005f3eb | 1294 | |
ba1afef6 | 1295 | static void print_graph_headers(struct seq_file *s) |
d7a8d9e9 JO |
1296 | { |
1297 | print_graph_headers_flags(s, tracer_flags.val); | |
1298 | } | |
1299 | ||
0a772620 JO |
1300 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1301 | { | |
1302 | struct trace_iterator *iter = s->private; | |
983f938a | 1303 | struct trace_array *tr = iter->tr; |
0a772620 | 1304 | |
983f938a | 1305 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
749230b0 JO |
1306 | return; |
1307 | ||
983f938a | 1308 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { |
0a772620 JO |
1309 | /* print nothing if the buffers are empty */ |
1310 | if (trace_empty(iter)) | |
1311 | return; | |
1312 | ||
1313 | print_trace_header(s, iter); | |
321e68b0 | 1314 | } |
0a772620 | 1315 | |
983f938a | 1316 | __print_graph_headers_flags(tr, s, flags); |
0a772620 JO |
1317 | } |
1318 | ||
62b915f1 | 1319 | void graph_trace_open(struct trace_iterator *iter) |
9005f3eb | 1320 | { |
2fbcdb35 | 1321 | /* pid and depth on the last trace processed */ |
be1eca39 | 1322 | struct fgraph_data *data; |
ef99b88b | 1323 | gfp_t gfpflags; |
9005f3eb FW |
1324 | int cpu; |
1325 | ||
be1eca39 JO |
1326 | iter->private = NULL; |
1327 | ||
ef99b88b RV |
1328 | /* We can be called in atomic context via ftrace_dump() */ |
1329 | gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; | |
1330 | ||
1331 | data = kzalloc(sizeof(*data), gfpflags); | |
2fbcdb35 | 1332 | if (!data) |
be1eca39 JO |
1333 | goto out_err; |
1334 | ||
ef99b88b | 1335 | data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); |
be1eca39 JO |
1336 | if (!data->cpu_data) |
1337 | goto out_err_free; | |
1338 | ||
1339 | for_each_possible_cpu(cpu) { | |
1340 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | |
1341 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | |
1342 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | |
2bd16212 JO |
1343 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1344 | ||
be1eca39 JO |
1345 | *pid = -1; |
1346 | *depth = 0; | |
1347 | *ignore = 0; | |
2bd16212 | 1348 | *depth_irq = -1; |
be1eca39 | 1349 | } |
9005f3eb | 1350 | |
2fbcdb35 | 1351 | iter->private = data; |
be1eca39 JO |
1352 | |
1353 | return; | |
1354 | ||
1355 | out_err_free: | |
1356 | kfree(data); | |
1357 | out_err: | |
a395d6a7 | 1358 | pr_warn("function graph tracer: not enough memory\n"); |
9005f3eb FW |
1359 | } |
1360 | ||
62b915f1 | 1361 | void graph_trace_close(struct trace_iterator *iter) |
9005f3eb | 1362 | { |
be1eca39 JO |
1363 | struct fgraph_data *data = iter->private; |
1364 | ||
1365 | if (data) { | |
1366 | free_percpu(data->cpu_data); | |
1367 | kfree(data); | |
1368 | } | |
9005f3eb FW |
1369 | } |
1370 | ||
8c1a49ae SRRH |
1371 | static int |
1372 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
b304d044 SR |
1373 | { |
1374 | if (bit == TRACE_GRAPH_PRINT_IRQS) | |
1375 | ftrace_graph_skip_irqs = !set; | |
1376 | ||
55577204 SRRH |
1377 | if (bit == TRACE_GRAPH_SLEEP_TIME) |
1378 | ftrace_graph_sleep_time_control(set); | |
1379 | ||
1380 | if (bit == TRACE_GRAPH_GRAPH_TIME) | |
1381 | ftrace_graph_graph_time_control(set); | |
1382 | ||
b304d044 SR |
1383 | return 0; |
1384 | } | |
1385 | ||
a9a57763 SR |
1386 | static struct trace_event_functions graph_functions = { |
1387 | .trace = print_graph_function_event, | |
1388 | }; | |
1389 | ||
9106b693 JO |
1390 | static struct trace_event graph_trace_entry_event = { |
1391 | .type = TRACE_GRAPH_ENT, | |
a9a57763 | 1392 | .funcs = &graph_functions, |
9106b693 JO |
1393 | }; |
1394 | ||
1395 | static struct trace_event graph_trace_ret_event = { | |
1396 | .type = TRACE_GRAPH_RET, | |
a9a57763 | 1397 | .funcs = &graph_functions |
9106b693 JO |
1398 | }; |
1399 | ||
8f768993 | 1400 | static struct tracer graph_trace __tracer_data = { |
ef18012b | 1401 | .name = "function_graph", |
6508fa76 | 1402 | .update_thresh = graph_trace_update_thresh, |
9005f3eb | 1403 | .open = graph_trace_open, |
be1eca39 | 1404 | .pipe_open = graph_trace_open, |
9005f3eb | 1405 | .close = graph_trace_close, |
be1eca39 | 1406 | .pipe_close = graph_trace_close, |
ef18012b SR |
1407 | .init = graph_trace_init, |
1408 | .reset = graph_trace_reset, | |
decbec38 FW |
1409 | .print_line = print_graph_function, |
1410 | .print_header = print_graph_headers, | |
fb52607a | 1411 | .flags = &tracer_flags, |
b304d044 | 1412 | .set_flag = func_graph_set_flag, |
7447dce9 FW |
1413 | #ifdef CONFIG_FTRACE_SELFTEST |
1414 | .selftest = trace_selftest_startup_function_graph, | |
1415 | #endif | |
fb52607a FW |
1416 | }; |
1417 | ||
8741db53 SR |
1418 | |
1419 | static ssize_t | |
1420 | graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
1421 | loff_t *ppos) | |
1422 | { | |
1423 | unsigned long val; | |
1424 | int ret; | |
1425 | ||
1426 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
1427 | if (ret) | |
1428 | return ret; | |
1429 | ||
1430 | max_depth = val; | |
1431 | ||
1432 | *ppos += cnt; | |
1433 | ||
1434 | return cnt; | |
1435 | } | |
1436 | ||
1437 | static ssize_t | |
1438 | graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, | |
1439 | loff_t *ppos) | |
1440 | { | |
1441 | char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ | |
1442 | int n; | |
1443 | ||
1444 | n = sprintf(buf, "%d\n", max_depth); | |
1445 | ||
1446 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); | |
1447 | } | |
1448 | ||
1449 | static const struct file_operations graph_depth_fops = { | |
1450 | .open = tracing_open_generic, | |
1451 | .write = graph_depth_write, | |
1452 | .read = graph_depth_read, | |
1453 | .llseek = generic_file_llseek, | |
1454 | }; | |
1455 | ||
8434dc93 | 1456 | static __init int init_graph_tracefs(void) |
8741db53 SR |
1457 | { |
1458 | struct dentry *d_tracer; | |
1459 | ||
1460 | d_tracer = tracing_init_dentry(); | |
14a5ae40 | 1461 | if (IS_ERR(d_tracer)) |
8741db53 SR |
1462 | return 0; |
1463 | ||
1464 | trace_create_file("max_graph_depth", 0644, d_tracer, | |
1465 | NULL, &graph_depth_fops); | |
1466 | ||
1467 | return 0; | |
1468 | } | |
8434dc93 | 1469 | fs_initcall(init_graph_tracefs); |
8741db53 | 1470 | |
fb52607a FW |
1471 | static __init int init_graph_trace(void) |
1472 | { | |
0c9e6f63 LJ |
1473 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1474 | ||
9023c930 | 1475 | if (!register_trace_event(&graph_trace_entry_event)) { |
a395d6a7 | 1476 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1477 | return 1; |
1478 | } | |
1479 | ||
9023c930 | 1480 | if (!register_trace_event(&graph_trace_ret_event)) { |
a395d6a7 | 1481 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1482 | return 1; |
1483 | } | |
1484 | ||
fb52607a FW |
1485 | return register_tracer(&graph_trace); |
1486 | } | |
1487 | ||
6f415672 | 1488 | core_initcall(init_graph_trace); |