]>
Commit | Line | Data |
---|---|---|
fb52607a FW |
1 | /* |
2 | * | |
3 | * Function graph tracer. | |
9005f3eb | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
fb52607a FW |
5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | |
7 | * | |
8 | */ | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/uaccess.h> | |
11 | #include <linux/ftrace.h> | |
12 | #include <linux/fs.h> | |
13 | ||
14 | #include "trace.h" | |
f0868d1e | 15 | #include "trace_output.h" |
fb52607a | 16 | |
2fbcdb35 SR |
17 | struct fgraph_data { |
18 | pid_t last_pid; | |
19 | int depth; | |
20 | }; | |
21 | ||
287b6e68 | 22 | #define TRACE_GRAPH_INDENT 2 |
fb52607a | 23 | |
1a056155 | 24 | /* Flag options */ |
fb52607a | 25 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
1a056155 FW |
26 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
27 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | |
11e84acc | 28 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
9005f3eb FW |
29 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
30 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | |
1a056155 | 31 | |
fb52607a | 32 | static struct tracer_opt trace_opts[] = { |
9005f3eb | 33 | /* Display overruns? (for self-debug purpose) */ |
1a056155 FW |
34 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
35 | /* Display CPU ? */ | |
36 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | |
37 | /* Display Overhead ? */ | |
38 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | |
11e84acc FW |
39 | /* Display proc name/pid */ |
40 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | |
9005f3eb FW |
41 | /* Display duration of execution */ |
42 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | |
43 | /* Display absolute time of an entry */ | |
44 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | |
fb52607a FW |
45 | { } /* Empty entry */ |
46 | }; | |
47 | ||
48 | static struct tracer_flags tracer_flags = { | |
11e84acc | 49 | /* Don't display overruns and proc by default */ |
9005f3eb FW |
50 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
51 | TRACE_GRAPH_PRINT_DURATION, | |
fb52607a FW |
52 | .opts = trace_opts |
53 | }; | |
54 | ||
1a0799a8 | 55 | static struct trace_array *graph_array; |
9005f3eb | 56 | |
fb52607a | 57 | |
712406a6 SR |
58 | /* Add a function return address to the trace stack on thread info.*/ |
59 | int | |
71e308a2 SR |
60 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
61 | unsigned long frame_pointer) | |
712406a6 | 62 | { |
5d1a03dc | 63 | unsigned long long calltime; |
712406a6 SR |
64 | int index; |
65 | ||
66 | if (!current->ret_stack) | |
67 | return -EBUSY; | |
68 | ||
82310a32 SR |
69 | /* |
70 | * We must make sure the ret_stack is tested before we read | |
71 | * anything else. | |
72 | */ | |
73 | smp_rmb(); | |
74 | ||
712406a6 SR |
75 | /* The return trace stack is full */ |
76 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
77 | atomic_inc(¤t->trace_overrun); | |
78 | return -EBUSY; | |
79 | } | |
80 | ||
5d1a03dc SR |
81 | calltime = trace_clock_local(); |
82 | ||
712406a6 SR |
83 | index = ++current->curr_ret_stack; |
84 | barrier(); | |
85 | current->ret_stack[index].ret = ret; | |
86 | current->ret_stack[index].func = func; | |
5d1a03dc | 87 | current->ret_stack[index].calltime = calltime; |
a2a16d6a | 88 | current->ret_stack[index].subtime = 0; |
71e308a2 | 89 | current->ret_stack[index].fp = frame_pointer; |
712406a6 SR |
90 | *depth = index; |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
95 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
a2a16d6a | 96 | static void |
71e308a2 SR |
97 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
98 | unsigned long frame_pointer) | |
712406a6 SR |
99 | { |
100 | int index; | |
101 | ||
102 | index = current->curr_ret_stack; | |
103 | ||
104 | if (unlikely(index < 0)) { | |
105 | ftrace_graph_stop(); | |
106 | WARN_ON(1); | |
107 | /* Might as well panic, otherwise we have no where to go */ | |
108 | *ret = (unsigned long)panic; | |
109 | return; | |
110 | } | |
111 | ||
71e308a2 SR |
112 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST |
113 | /* | |
114 | * The arch may choose to record the frame pointer used | |
115 | * and check it here to make sure that it is what we expect it | |
116 | * to be. If gcc does not set the place holder of the return | |
117 | * address in the frame pointer, and does a copy instead, then | |
118 | * the function graph trace will fail. This test detects this | |
119 | * case. | |
120 | * | |
121 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
122 | * gcc do the above. | |
123 | */ | |
124 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
125 | ftrace_graph_stop(); | |
126 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
127 | " from func %pF return to %lx\n", | |
128 | current->ret_stack[index].fp, | |
129 | frame_pointer, | |
130 | (void *)current->ret_stack[index].func, | |
131 | current->ret_stack[index].ret); | |
132 | *ret = (unsigned long)panic; | |
133 | return; | |
134 | } | |
135 | #endif | |
136 | ||
712406a6 SR |
137 | *ret = current->ret_stack[index].ret; |
138 | trace->func = current->ret_stack[index].func; | |
139 | trace->calltime = current->ret_stack[index].calltime; | |
140 | trace->overrun = atomic_read(¤t->trace_overrun); | |
141 | trace->depth = index; | |
712406a6 SR |
142 | } |
143 | ||
144 | /* | |
145 | * Send the trace to the ring-buffer. | |
146 | * @return the original return address. | |
147 | */ | |
71e308a2 | 148 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
712406a6 SR |
149 | { |
150 | struct ftrace_graph_ret trace; | |
151 | unsigned long ret; | |
152 | ||
71e308a2 | 153 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
0012693a | 154 | trace.rettime = trace_clock_local(); |
712406a6 | 155 | ftrace_graph_return(&trace); |
a2a16d6a SR |
156 | barrier(); |
157 | current->curr_ret_stack--; | |
712406a6 SR |
158 | |
159 | if (unlikely(!ret)) { | |
160 | ftrace_graph_stop(); | |
161 | WARN_ON(1); | |
162 | /* Might as well panic. What else to do? */ | |
163 | ret = (unsigned long)panic; | |
164 | } | |
165 | ||
166 | return ret; | |
167 | } | |
168 | ||
1a0799a8 FW |
169 | static int __trace_graph_entry(struct trace_array *tr, |
170 | struct ftrace_graph_ent *trace, | |
171 | unsigned long flags, | |
172 | int pc) | |
173 | { | |
174 | struct ftrace_event_call *call = &event_funcgraph_entry; | |
175 | struct ring_buffer_event *event; | |
e77405ad | 176 | struct ring_buffer *buffer = tr->buffer; |
1a0799a8 FW |
177 | struct ftrace_graph_ent_entry *entry; |
178 | ||
179 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | |
180 | return 0; | |
181 | ||
e77405ad | 182 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
1a0799a8 FW |
183 | sizeof(*entry), flags, pc); |
184 | if (!event) | |
185 | return 0; | |
186 | entry = ring_buffer_event_data(event); | |
187 | entry->graph_ent = *trace; | |
e77405ad SR |
188 | if (!filter_current_check_discard(buffer, call, entry, event)) |
189 | ring_buffer_unlock_commit(buffer, event); | |
1a0799a8 FW |
190 | |
191 | return 1; | |
192 | } | |
193 | ||
194 | int trace_graph_entry(struct ftrace_graph_ent *trace) | |
195 | { | |
196 | struct trace_array *tr = graph_array; | |
197 | struct trace_array_cpu *data; | |
198 | unsigned long flags; | |
199 | long disabled; | |
200 | int ret; | |
201 | int cpu; | |
202 | int pc; | |
203 | ||
204 | if (unlikely(!tr)) | |
205 | return 0; | |
206 | ||
207 | if (!ftrace_trace_task(current)) | |
208 | return 0; | |
209 | ||
210 | if (!ftrace_graph_addr(trace->func)) | |
211 | return 0; | |
212 | ||
213 | local_irq_save(flags); | |
214 | cpu = raw_smp_processor_id(); | |
215 | data = tr->data[cpu]; | |
216 | disabled = atomic_inc_return(&data->disabled); | |
217 | if (likely(disabled == 1)) { | |
218 | pc = preempt_count(); | |
219 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
220 | } else { | |
221 | ret = 0; | |
222 | } | |
223 | /* Only do the atomic if it is not already set */ | |
224 | if (!test_tsk_trace_graph(current)) | |
225 | set_tsk_trace_graph(current); | |
226 | ||
227 | atomic_dec(&data->disabled); | |
228 | local_irq_restore(flags); | |
229 | ||
230 | return ret; | |
231 | } | |
232 | ||
233 | static void __trace_graph_return(struct trace_array *tr, | |
234 | struct ftrace_graph_ret *trace, | |
235 | unsigned long flags, | |
236 | int pc) | |
237 | { | |
238 | struct ftrace_event_call *call = &event_funcgraph_exit; | |
239 | struct ring_buffer_event *event; | |
e77405ad | 240 | struct ring_buffer *buffer = tr->buffer; |
1a0799a8 FW |
241 | struct ftrace_graph_ret_entry *entry; |
242 | ||
243 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | |
244 | return; | |
245 | ||
e77405ad | 246 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
1a0799a8 FW |
247 | sizeof(*entry), flags, pc); |
248 | if (!event) | |
249 | return; | |
250 | entry = ring_buffer_event_data(event); | |
251 | entry->ret = *trace; | |
e77405ad SR |
252 | if (!filter_current_check_discard(buffer, call, entry, event)) |
253 | ring_buffer_unlock_commit(buffer, event); | |
1a0799a8 FW |
254 | } |
255 | ||
256 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
257 | { | |
258 | struct trace_array *tr = graph_array; | |
259 | struct trace_array_cpu *data; | |
260 | unsigned long flags; | |
261 | long disabled; | |
262 | int cpu; | |
263 | int pc; | |
264 | ||
265 | local_irq_save(flags); | |
266 | cpu = raw_smp_processor_id(); | |
267 | data = tr->data[cpu]; | |
268 | disabled = atomic_inc_return(&data->disabled); | |
269 | if (likely(disabled == 1)) { | |
270 | pc = preempt_count(); | |
271 | __trace_graph_return(tr, trace, flags, pc); | |
272 | } | |
273 | if (!trace->depth) | |
274 | clear_tsk_trace_graph(current); | |
275 | atomic_dec(&data->disabled); | |
276 | local_irq_restore(flags); | |
277 | } | |
278 | ||
fb52607a FW |
279 | static int graph_trace_init(struct trace_array *tr) |
280 | { | |
1a0799a8 FW |
281 | int ret; |
282 | ||
283 | graph_array = tr; | |
284 | ret = register_ftrace_graph(&trace_graph_return, | |
285 | &trace_graph_entry); | |
660c7f9b SR |
286 | if (ret) |
287 | return ret; | |
288 | tracing_start_cmdline_record(); | |
289 | ||
290 | return 0; | |
fb52607a FW |
291 | } |
292 | ||
1a0799a8 FW |
293 | void set_graph_array(struct trace_array *tr) |
294 | { | |
295 | graph_array = tr; | |
296 | } | |
297 | ||
fb52607a FW |
298 | static void graph_trace_reset(struct trace_array *tr) |
299 | { | |
660c7f9b SR |
300 | tracing_stop_cmdline_record(); |
301 | unregister_ftrace_graph(); | |
fb52607a FW |
302 | } |
303 | ||
0c9e6f63 | 304 | static int max_bytes_for_cpu; |
1a056155 FW |
305 | |
306 | static enum print_line_t | |
307 | print_graph_cpu(struct trace_seq *s, int cpu) | |
308 | { | |
1a056155 | 309 | int ret; |
1a056155 | 310 | |
d51090b3 IM |
311 | /* |
312 | * Start with a space character - to make it stand out | |
313 | * to the right a bit when trace output is pasted into | |
314 | * email: | |
315 | */ | |
0c9e6f63 | 316 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
1a056155 | 317 | if (!ret) |
d51090b3 IM |
318 | return TRACE_TYPE_PARTIAL_LINE; |
319 | ||
1a056155 FW |
320 | return TRACE_TYPE_HANDLED; |
321 | } | |
322 | ||
11e84acc FW |
323 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
324 | ||
325 | static enum print_line_t | |
326 | print_graph_proc(struct trace_seq *s, pid_t pid) | |
327 | { | |
4ca53085 | 328 | char comm[TASK_COMM_LEN]; |
11e84acc FW |
329 | /* sign + log10(MAX_INT) + '\0' */ |
330 | char pid_str[11]; | |
4ca53085 SR |
331 | int spaces = 0; |
332 | int ret; | |
333 | int len; | |
334 | int i; | |
11e84acc | 335 | |
4ca53085 | 336 | trace_find_cmdline(pid, comm); |
11e84acc FW |
337 | comm[7] = '\0'; |
338 | sprintf(pid_str, "%d", pid); | |
339 | ||
340 | /* 1 stands for the "-" character */ | |
341 | len = strlen(comm) + strlen(pid_str) + 1; | |
342 | ||
343 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | |
344 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | |
345 | ||
346 | /* First spaces to align center */ | |
347 | for (i = 0; i < spaces / 2; i++) { | |
348 | ret = trace_seq_printf(s, " "); | |
349 | if (!ret) | |
350 | return TRACE_TYPE_PARTIAL_LINE; | |
351 | } | |
352 | ||
353 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | |
354 | if (!ret) | |
355 | return TRACE_TYPE_PARTIAL_LINE; | |
356 | ||
357 | /* Last spaces to align center */ | |
358 | for (i = 0; i < spaces - (spaces / 2); i++) { | |
359 | ret = trace_seq_printf(s, " "); | |
360 | if (!ret) | |
361 | return TRACE_TYPE_PARTIAL_LINE; | |
362 | } | |
363 | return TRACE_TYPE_HANDLED; | |
364 | } | |
365 | ||
1a056155 | 366 | |
49ff5903 SR |
367 | static enum print_line_t |
368 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |
369 | { | |
370 | int hardirq, softirq; | |
637e7e86 | 371 | int ret; |
49ff5903 SR |
372 | |
373 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | |
374 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | |
375 | ||
376 | if (!trace_seq_printf(s, " %c%c%c", | |
377 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | |
378 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? | |
379 | 'X' : '.', | |
380 | (entry->flags & TRACE_FLAG_NEED_RESCHED) ? | |
381 | 'N' : '.', | |
382 | (hardirq && softirq) ? 'H' : | |
383 | hardirq ? 'h' : softirq ? 's' : '.')) | |
384 | return 0; | |
385 | ||
637e7e86 SR |
386 | if (entry->lock_depth < 0) |
387 | ret = trace_seq_putc(s, '.'); | |
388 | else | |
389 | ret = trace_seq_printf(s, "%d", entry->lock_depth); | |
390 | if (!ret) | |
391 | return 0; | |
392 | ||
49ff5903 SR |
393 | if (entry->preempt_count) |
394 | return trace_seq_printf(s, "%x", entry->preempt_count); | |
395 | return trace_seq_puts(s, "."); | |
396 | } | |
397 | ||
287b6e68 | 398 | /* If the pid changed since the last trace, output this event */ |
11e84acc | 399 | static enum print_line_t |
2fbcdb35 | 400 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
287b6e68 | 401 | { |
d51090b3 | 402 | pid_t prev_pid; |
9005f3eb | 403 | pid_t *last_pid; |
d51090b3 | 404 | int ret; |
660c7f9b | 405 | |
2fbcdb35 | 406 | if (!data) |
9005f3eb FW |
407 | return TRACE_TYPE_HANDLED; |
408 | ||
2fbcdb35 | 409 | last_pid = &(per_cpu_ptr(data, cpu)->last_pid); |
9005f3eb FW |
410 | |
411 | if (*last_pid == pid) | |
11e84acc | 412 | return TRACE_TYPE_HANDLED; |
fb52607a | 413 | |
9005f3eb FW |
414 | prev_pid = *last_pid; |
415 | *last_pid = pid; | |
d51090b3 | 416 | |
9005f3eb FW |
417 | if (prev_pid == -1) |
418 | return TRACE_TYPE_HANDLED; | |
d51090b3 IM |
419 | /* |
420 | * Context-switch trace line: | |
421 | ||
422 | ------------------------------------------ | |
423 | | 1) migration/0--1 => sshd-1755 | |
424 | ------------------------------------------ | |
425 | ||
426 | */ | |
427 | ret = trace_seq_printf(s, | |
1fd8f2a3 | 428 | " ------------------------------------------\n"); |
11e84acc | 429 | if (!ret) |
810dc732 | 430 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
431 | |
432 | ret = print_graph_cpu(s, cpu); | |
433 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 434 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
435 | |
436 | ret = print_graph_proc(s, prev_pid); | |
437 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 438 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
439 | |
440 | ret = trace_seq_printf(s, " => "); | |
441 | if (!ret) | |
810dc732 | 442 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
443 | |
444 | ret = print_graph_proc(s, pid); | |
445 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 446 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
447 | |
448 | ret = trace_seq_printf(s, | |
449 | "\n ------------------------------------------\n\n"); | |
450 | if (!ret) | |
810dc732 | 451 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc | 452 | |
810dc732 | 453 | return TRACE_TYPE_HANDLED; |
287b6e68 FW |
454 | } |
455 | ||
b91facc3 FW |
456 | static struct ftrace_graph_ret_entry * |
457 | get_return_for_leaf(struct trace_iterator *iter, | |
83a8df61 FW |
458 | struct ftrace_graph_ent_entry *curr) |
459 | { | |
460 | struct ring_buffer_iter *ring_iter; | |
461 | struct ring_buffer_event *event; | |
462 | struct ftrace_graph_ret_entry *next; | |
463 | ||
464 | ring_iter = iter->buffer_iter[iter->cpu]; | |
465 | ||
b91facc3 FW |
466 | /* First peek to compare current entry and the next one */ |
467 | if (ring_iter) | |
468 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
469 | else { | |
470 | /* We need to consume the current entry to see the next one */ | |
471 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | |
472 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | |
473 | NULL); | |
474 | } | |
83a8df61 FW |
475 | |
476 | if (!event) | |
b91facc3 | 477 | return NULL; |
83a8df61 FW |
478 | |
479 | next = ring_buffer_event_data(event); | |
480 | ||
481 | if (next->ent.type != TRACE_GRAPH_RET) | |
b91facc3 | 482 | return NULL; |
83a8df61 FW |
483 | |
484 | if (curr->ent.pid != next->ent.pid || | |
485 | curr->graph_ent.func != next->ret.func) | |
b91facc3 | 486 | return NULL; |
83a8df61 | 487 | |
b91facc3 FW |
488 | /* this is a leaf, now advance the iterator */ |
489 | if (ring_iter) | |
490 | ring_buffer_read(ring_iter, NULL); | |
491 | ||
492 | return next; | |
83a8df61 FW |
493 | } |
494 | ||
9005f3eb FW |
495 | /* Signal a overhead of time execution to the output */ |
496 | static int | |
497 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | |
498 | { | |
499 | /* If duration disappear, we don't need anything */ | |
500 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | |
501 | return 1; | |
502 | ||
503 | /* Non nested entry or return */ | |
504 | if (duration == -1) | |
505 | return trace_seq_printf(s, " "); | |
506 | ||
507 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | |
508 | /* Duration exceeded 100 msecs */ | |
509 | if (duration > 100000ULL) | |
510 | return trace_seq_printf(s, "! "); | |
511 | ||
512 | /* Duration exceeded 10 msecs */ | |
513 | if (duration > 10000ULL) | |
514 | return trace_seq_printf(s, "+ "); | |
515 | } | |
516 | ||
517 | return trace_seq_printf(s, " "); | |
518 | } | |
519 | ||
d1f9cbd7 FW |
520 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
521 | { | |
522 | unsigned long usecs_rem; | |
523 | ||
524 | usecs_rem = do_div(t, NSEC_PER_SEC); | |
525 | usecs_rem /= 1000; | |
526 | ||
527 | return trace_seq_printf(s, "%5lu.%06lu | ", | |
528 | (unsigned long)t, usecs_rem); | |
529 | } | |
530 | ||
f8b755ac | 531 | static enum print_line_t |
d1f9cbd7 | 532 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
9005f3eb | 533 | enum trace_type type, int cpu, pid_t pid) |
f8b755ac FW |
534 | { |
535 | int ret; | |
d1f9cbd7 | 536 | struct trace_seq *s = &iter->seq; |
f8b755ac FW |
537 | |
538 | if (addr < (unsigned long)__irqentry_text_start || | |
539 | addr >= (unsigned long)__irqentry_text_end) | |
540 | return TRACE_TYPE_UNHANDLED; | |
541 | ||
d1f9cbd7 FW |
542 | /* Absolute time */ |
543 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | |
544 | ret = print_graph_abs_time(iter->ts, s); | |
545 | if (!ret) | |
546 | return TRACE_TYPE_PARTIAL_LINE; | |
547 | } | |
548 | ||
9005f3eb FW |
549 | /* Cpu */ |
550 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | |
551 | ret = print_graph_cpu(s, cpu); | |
552 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
553 | return TRACE_TYPE_PARTIAL_LINE; | |
554 | } | |
49ff5903 | 555 | |
9005f3eb FW |
556 | /* Proc */ |
557 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | |
558 | ret = print_graph_proc(s, pid); | |
559 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
560 | return TRACE_TYPE_PARTIAL_LINE; | |
561 | ret = trace_seq_printf(s, " | "); | |
562 | if (!ret) | |
563 | return TRACE_TYPE_PARTIAL_LINE; | |
564 | } | |
f8b755ac | 565 | |
9005f3eb FW |
566 | /* No overhead */ |
567 | ret = print_graph_overhead(-1, s); | |
568 | if (!ret) | |
569 | return TRACE_TYPE_PARTIAL_LINE; | |
f8b755ac | 570 | |
9005f3eb FW |
571 | if (type == TRACE_GRAPH_ENT) |
572 | ret = trace_seq_printf(s, "==========>"); | |
573 | else | |
574 | ret = trace_seq_printf(s, "<=========="); | |
575 | ||
576 | if (!ret) | |
577 | return TRACE_TYPE_PARTIAL_LINE; | |
578 | ||
579 | /* Don't close the duration column if haven't one */ | |
580 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | |
581 | trace_seq_printf(s, " |"); | |
582 | ret = trace_seq_printf(s, "\n"); | |
f8b755ac | 583 | |
f8b755ac FW |
584 | if (!ret) |
585 | return TRACE_TYPE_PARTIAL_LINE; | |
586 | return TRACE_TYPE_HANDLED; | |
587 | } | |
83a8df61 | 588 | |
0706f1c4 SR |
589 | enum print_line_t |
590 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |
83a8df61 FW |
591 | { |
592 | unsigned long nsecs_rem = do_div(duration, 1000); | |
166d3c79 FW |
593 | /* log10(ULONG_MAX) + '\0' */ |
594 | char msecs_str[21]; | |
595 | char nsecs_str[5]; | |
596 | int ret, len; | |
597 | int i; | |
598 | ||
599 | sprintf(msecs_str, "%lu", (unsigned long) duration); | |
600 | ||
601 | /* Print msecs */ | |
9005f3eb | 602 | ret = trace_seq_printf(s, "%s", msecs_str); |
166d3c79 FW |
603 | if (!ret) |
604 | return TRACE_TYPE_PARTIAL_LINE; | |
605 | ||
606 | len = strlen(msecs_str); | |
607 | ||
608 | /* Print nsecs (we don't want to exceed 7 numbers) */ | |
609 | if (len < 7) { | |
610 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | |
611 | ret = trace_seq_printf(s, ".%s", nsecs_str); | |
612 | if (!ret) | |
613 | return TRACE_TYPE_PARTIAL_LINE; | |
614 | len += strlen(nsecs_str); | |
615 | } | |
616 | ||
617 | ret = trace_seq_printf(s, " us "); | |
618 | if (!ret) | |
619 | return TRACE_TYPE_PARTIAL_LINE; | |
620 | ||
621 | /* Print remaining spaces to fit the row's width */ | |
622 | for (i = len; i < 7; i++) { | |
623 | ret = trace_seq_printf(s, " "); | |
624 | if (!ret) | |
625 | return TRACE_TYPE_PARTIAL_LINE; | |
626 | } | |
0706f1c4 SR |
627 | return TRACE_TYPE_HANDLED; |
628 | } | |
629 | ||
630 | static enum print_line_t | |
631 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | |
632 | { | |
633 | int ret; | |
634 | ||
635 | ret = trace_print_graph_duration(duration, s); | |
636 | if (ret != TRACE_TYPE_HANDLED) | |
637 | return ret; | |
166d3c79 FW |
638 | |
639 | ret = trace_seq_printf(s, "| "); | |
640 | if (!ret) | |
641 | return TRACE_TYPE_PARTIAL_LINE; | |
166d3c79 | 642 | |
0706f1c4 | 643 | return TRACE_TYPE_HANDLED; |
83a8df61 FW |
644 | } |
645 | ||
83a8df61 | 646 | /* Case of a leaf function on its call entry */ |
287b6e68 | 647 | static enum print_line_t |
83a8df61 | 648 | print_graph_entry_leaf(struct trace_iterator *iter, |
b91facc3 FW |
649 | struct ftrace_graph_ent_entry *entry, |
650 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | |
fb52607a | 651 | { |
2fbcdb35 | 652 | struct fgraph_data *data = iter->private; |
83a8df61 | 653 | struct ftrace_graph_ret *graph_ret; |
83a8df61 FW |
654 | struct ftrace_graph_ent *call; |
655 | unsigned long long duration; | |
fb52607a | 656 | int ret; |
1a056155 | 657 | int i; |
fb52607a | 658 | |
83a8df61 FW |
659 | graph_ret = &ret_entry->ret; |
660 | call = &entry->graph_ent; | |
661 | duration = graph_ret->rettime - graph_ret->calltime; | |
662 | ||
2fbcdb35 SR |
663 | if (data) { |
664 | int cpu = iter->cpu; | |
665 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | |
666 | ||
667 | /* | |
668 | * Comments display at + 1 to depth. Since | |
669 | * this is a leaf function, keep the comments | |
670 | * equal to this depth. | |
671 | */ | |
672 | *depth = call->depth - 1; | |
673 | } | |
674 | ||
83a8df61 | 675 | /* Overhead */ |
9005f3eb FW |
676 | ret = print_graph_overhead(duration, s); |
677 | if (!ret) | |
678 | return TRACE_TYPE_PARTIAL_LINE; | |
1a056155 FW |
679 | |
680 | /* Duration */ | |
9005f3eb FW |
681 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
682 | ret = print_graph_duration(duration, s); | |
683 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
684 | return TRACE_TYPE_PARTIAL_LINE; | |
685 | } | |
437f24fb | 686 | |
83a8df61 FW |
687 | /* Function */ |
688 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | |
689 | ret = trace_seq_printf(s, " "); | |
690 | if (!ret) | |
691 | return TRACE_TYPE_PARTIAL_LINE; | |
692 | } | |
693 | ||
68baafcf | 694 | ret = trace_seq_printf(s, "%pf();\n", (void *)call->func); |
83a8df61 FW |
695 | if (!ret) |
696 | return TRACE_TYPE_PARTIAL_LINE; | |
697 | ||
698 | return TRACE_TYPE_HANDLED; | |
699 | } | |
700 | ||
701 | static enum print_line_t | |
2fbcdb35 SR |
702 | print_graph_entry_nested(struct trace_iterator *iter, |
703 | struct ftrace_graph_ent_entry *entry, | |
704 | struct trace_seq *s, int cpu) | |
83a8df61 | 705 | { |
83a8df61 | 706 | struct ftrace_graph_ent *call = &entry->graph_ent; |
2fbcdb35 SR |
707 | struct fgraph_data *data = iter->private; |
708 | int ret; | |
709 | int i; | |
710 | ||
711 | if (data) { | |
712 | int cpu = iter->cpu; | |
713 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | |
714 | ||
715 | *depth = call->depth; | |
716 | } | |
83a8df61 FW |
717 | |
718 | /* No overhead */ | |
9005f3eb FW |
719 | ret = print_graph_overhead(-1, s); |
720 | if (!ret) | |
721 | return TRACE_TYPE_PARTIAL_LINE; | |
1a056155 | 722 | |
9005f3eb FW |
723 | /* No time */ |
724 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | |
f8b755ac FW |
725 | ret = trace_seq_printf(s, " | "); |
726 | if (!ret) | |
727 | return TRACE_TYPE_PARTIAL_LINE; | |
f8b755ac FW |
728 | } |
729 | ||
83a8df61 | 730 | /* Function */ |
287b6e68 FW |
731 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
732 | ret = trace_seq_printf(s, " "); | |
fb52607a FW |
733 | if (!ret) |
734 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 FW |
735 | } |
736 | ||
68baafcf | 737 | ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func); |
83a8df61 FW |
738 | if (!ret) |
739 | return TRACE_TYPE_PARTIAL_LINE; | |
740 | ||
b91facc3 FW |
741 | /* |
742 | * we already consumed the current entry to check the next one | |
743 | * and see if this is a leaf. | |
744 | */ | |
745 | return TRACE_TYPE_NO_CONSUME; | |
287b6e68 FW |
746 | } |
747 | ||
83a8df61 | 748 | static enum print_line_t |
ac5f6c96 SR |
749 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
750 | int type, unsigned long addr) | |
83a8df61 | 751 | { |
2fbcdb35 | 752 | struct fgraph_data *data = iter->private; |
83a8df61 | 753 | struct trace_entry *ent = iter->ent; |
ac5f6c96 SR |
754 | int cpu = iter->cpu; |
755 | int ret; | |
83a8df61 | 756 | |
1a056155 | 757 | /* Pid */ |
2fbcdb35 | 758 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
9005f3eb FW |
759 | return TRACE_TYPE_PARTIAL_LINE; |
760 | ||
ac5f6c96 SR |
761 | if (type) { |
762 | /* Interrupt */ | |
763 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); | |
764 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
765 | return TRACE_TYPE_PARTIAL_LINE; | |
766 | } | |
83a8df61 | 767 | |
9005f3eb FW |
768 | /* Absolute time */ |
769 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | |
770 | ret = print_graph_abs_time(iter->ts, s); | |
771 | if (!ret) | |
772 | return TRACE_TYPE_PARTIAL_LINE; | |
773 | } | |
774 | ||
1a056155 FW |
775 | /* Cpu */ |
776 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | |
777 | ret = print_graph_cpu(s, cpu); | |
11e84acc FW |
778 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
779 | return TRACE_TYPE_PARTIAL_LINE; | |
780 | } | |
781 | ||
782 | /* Proc */ | |
783 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | |
00a8bf85 | 784 | ret = print_graph_proc(s, ent->pid); |
11e84acc FW |
785 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
786 | return TRACE_TYPE_PARTIAL_LINE; | |
787 | ||
788 | ret = trace_seq_printf(s, " | "); | |
1a056155 FW |
789 | if (!ret) |
790 | return TRACE_TYPE_PARTIAL_LINE; | |
791 | } | |
83a8df61 | 792 | |
49ff5903 SR |
793 | /* Latency format */ |
794 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | |
795 | ret = print_graph_lat_fmt(s, ent); | |
796 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
797 | return TRACE_TYPE_PARTIAL_LINE; | |
798 | } | |
799 | ||
ac5f6c96 SR |
800 | return 0; |
801 | } | |
802 | ||
803 | static enum print_line_t | |
804 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |
805 | struct trace_iterator *iter) | |
806 | { | |
807 | int cpu = iter->cpu; | |
808 | struct ftrace_graph_ent *call = &field->graph_ent; | |
809 | struct ftrace_graph_ret_entry *leaf_ret; | |
810 | ||
811 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | |
812 | return TRACE_TYPE_PARTIAL_LINE; | |
813 | ||
b91facc3 FW |
814 | leaf_ret = get_return_for_leaf(iter, field); |
815 | if (leaf_ret) | |
816 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | |
83a8df61 | 817 | else |
2fbcdb35 | 818 | return print_graph_entry_nested(iter, field, s, cpu); |
83a8df61 FW |
819 | |
820 | } | |
821 | ||
287b6e68 FW |
822 | static enum print_line_t |
823 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |
9005f3eb | 824 | struct trace_entry *ent, struct trace_iterator *iter) |
287b6e68 | 825 | { |
83a8df61 | 826 | unsigned long long duration = trace->rettime - trace->calltime; |
2fbcdb35 SR |
827 | struct fgraph_data *data = iter->private; |
828 | pid_t pid = ent->pid; | |
829 | int cpu = iter->cpu; | |
830 | int ret; | |
831 | int i; | |
832 | ||
833 | if (data) { | |
834 | int cpu = iter->cpu; | |
835 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | |
836 | ||
837 | /* | |
838 | * Comments display at + 1 to depth. This is the | |
839 | * return from a function, we now want the comments | |
840 | * to display at the same level of the bracket. | |
841 | */ | |
842 | *depth = trace->depth - 1; | |
843 | } | |
287b6e68 | 844 | |
ac5f6c96 | 845 | if (print_graph_prologue(iter, s, 0, 0)) |
437f24fb SR |
846 | return TRACE_TYPE_PARTIAL_LINE; |
847 | ||
83a8df61 | 848 | /* Overhead */ |
9005f3eb FW |
849 | ret = print_graph_overhead(duration, s); |
850 | if (!ret) | |
851 | return TRACE_TYPE_PARTIAL_LINE; | |
1a056155 FW |
852 | |
853 | /* Duration */ | |
9005f3eb FW |
854 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
855 | ret = print_graph_duration(duration, s); | |
856 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
857 | return TRACE_TYPE_PARTIAL_LINE; | |
858 | } | |
83a8df61 FW |
859 | |
860 | /* Closing brace */ | |
287b6e68 FW |
861 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
862 | ret = trace_seq_printf(s, " "); | |
fb52607a FW |
863 | if (!ret) |
864 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 FW |
865 | } |
866 | ||
1a056155 | 867 | ret = trace_seq_printf(s, "}\n"); |
287b6e68 FW |
868 | if (!ret) |
869 | return TRACE_TYPE_PARTIAL_LINE; | |
fb52607a | 870 | |
83a8df61 | 871 | /* Overrun */ |
287b6e68 FW |
872 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |
873 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | |
874 | trace->overrun); | |
fb52607a FW |
875 | if (!ret) |
876 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 | 877 | } |
f8b755ac | 878 | |
d1f9cbd7 | 879 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); |
f8b755ac FW |
880 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
881 | return TRACE_TYPE_PARTIAL_LINE; | |
882 | ||
287b6e68 FW |
883 | return TRACE_TYPE_HANDLED; |
884 | } | |
885 | ||
1fd8f2a3 | 886 | static enum print_line_t |
5087f8d2 SR |
887 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
888 | struct trace_iterator *iter) | |
1fd8f2a3 | 889 | { |
5087f8d2 | 890 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
2fbcdb35 | 891 | struct fgraph_data *data = iter->private; |
5087f8d2 | 892 | struct trace_event *event; |
2fbcdb35 | 893 | int depth = 0; |
1fd8f2a3 | 894 | int ret; |
2fbcdb35 SR |
895 | int i; |
896 | ||
897 | if (data) | |
898 | depth = per_cpu_ptr(data, iter->cpu)->depth; | |
9005f3eb | 899 | |
ac5f6c96 | 900 | if (print_graph_prologue(iter, s, 0, 0)) |
d1f9cbd7 FW |
901 | return TRACE_TYPE_PARTIAL_LINE; |
902 | ||
1fd8f2a3 | 903 | /* No overhead */ |
9005f3eb FW |
904 | ret = print_graph_overhead(-1, s); |
905 | if (!ret) | |
906 | return TRACE_TYPE_PARTIAL_LINE; | |
907 | ||
908 | /* No time */ | |
909 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | |
910 | ret = trace_seq_printf(s, " | "); | |
1fd8f2a3 FW |
911 | if (!ret) |
912 | return TRACE_TYPE_PARTIAL_LINE; | |
913 | } | |
914 | ||
1fd8f2a3 | 915 | /* Indentation */ |
2fbcdb35 SR |
916 | if (depth > 0) |
917 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | |
1fd8f2a3 FW |
918 | ret = trace_seq_printf(s, " "); |
919 | if (!ret) | |
920 | return TRACE_TYPE_PARTIAL_LINE; | |
921 | } | |
922 | ||
923 | /* The comment */ | |
769b0441 FW |
924 | ret = trace_seq_printf(s, "/* "); |
925 | if (!ret) | |
926 | return TRACE_TYPE_PARTIAL_LINE; | |
927 | ||
5087f8d2 SR |
928 | switch (iter->ent->type) { |
929 | case TRACE_BPRINT: | |
930 | ret = trace_print_bprintk_msg_only(iter); | |
931 | if (ret != TRACE_TYPE_HANDLED) | |
932 | return ret; | |
933 | break; | |
934 | case TRACE_PRINT: | |
935 | ret = trace_print_printk_msg_only(iter); | |
936 | if (ret != TRACE_TYPE_HANDLED) | |
937 | return ret; | |
938 | break; | |
939 | default: | |
940 | event = ftrace_find_event(ent->type); | |
941 | if (!event) | |
942 | return TRACE_TYPE_UNHANDLED; | |
943 | ||
944 | ret = event->trace(iter, sym_flags); | |
945 | if (ret != TRACE_TYPE_HANDLED) | |
946 | return ret; | |
947 | } | |
1fd8f2a3 | 948 | |
412d0bb5 FW |
949 | /* Strip ending newline */ |
950 | if (s->buffer[s->len - 1] == '\n') { | |
951 | s->buffer[s->len - 1] = '\0'; | |
952 | s->len--; | |
953 | } | |
954 | ||
1fd8f2a3 FW |
955 | ret = trace_seq_printf(s, " */\n"); |
956 | if (!ret) | |
957 | return TRACE_TYPE_PARTIAL_LINE; | |
958 | ||
959 | return TRACE_TYPE_HANDLED; | |
960 | } | |
961 | ||
962 | ||
287b6e68 FW |
963 | enum print_line_t |
964 | print_graph_function(struct trace_iterator *iter) | |
965 | { | |
287b6e68 | 966 | struct trace_entry *entry = iter->ent; |
5087f8d2 | 967 | struct trace_seq *s = &iter->seq; |
fb52607a | 968 | |
287b6e68 FW |
969 | switch (entry->type) { |
970 | case TRACE_GRAPH_ENT: { | |
38ceb592 LJ |
971 | /* |
972 | * print_graph_entry() may consume the current event, | |
973 | * thus @field may become invalid, so we need to save it. | |
974 | * sizeof(struct ftrace_graph_ent_entry) is very small, | |
975 | * it can be safely saved at the stack. | |
976 | */ | |
977 | struct ftrace_graph_ent_entry *field, saved; | |
287b6e68 | 978 | trace_assign_type(field, entry); |
38ceb592 LJ |
979 | saved = *field; |
980 | return print_graph_entry(&saved, s, iter); | |
287b6e68 FW |
981 | } |
982 | case TRACE_GRAPH_RET: { | |
983 | struct ftrace_graph_ret_entry *field; | |
984 | trace_assign_type(field, entry); | |
9005f3eb | 985 | return print_graph_return(&field->ret, s, entry, iter); |
287b6e68 FW |
986 | } |
987 | default: | |
5087f8d2 | 988 | return print_graph_comment(s, entry, iter); |
fb52607a | 989 | } |
5087f8d2 SR |
990 | |
991 | return TRACE_TYPE_HANDLED; | |
fb52607a FW |
992 | } |
993 | ||
49ff5903 SR |
994 | static void print_lat_header(struct seq_file *s) |
995 | { | |
996 | static const char spaces[] = " " /* 16 spaces */ | |
997 | " " /* 4 spaces */ | |
998 | " "; /* 17 spaces */ | |
999 | int size = 0; | |
1000 | ||
1001 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | |
1002 | size += 16; | |
1003 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | |
1004 | size += 4; | |
1005 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | |
1006 | size += 17; | |
1007 | ||
1008 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | |
1009 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | |
1010 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | |
1011 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | |
637e7e86 SR |
1012 | seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces); |
1013 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | |
49ff5903 SR |
1014 | } |
1015 | ||
decbec38 FW |
1016 | static void print_graph_headers(struct seq_file *s) |
1017 | { | |
49ff5903 SR |
1018 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
1019 | ||
1020 | if (lat) | |
1021 | print_lat_header(s); | |
1022 | ||
decbec38 | 1023 | /* 1st line */ |
49ff5903 | 1024 | seq_printf(s, "#"); |
9005f3eb FW |
1025 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
1026 | seq_printf(s, " TIME "); | |
decbec38 | 1027 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1028 | seq_printf(s, " CPU"); |
decbec38 | 1029 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1030 | seq_printf(s, " TASK/PID "); |
1031 | if (lat) | |
637e7e86 | 1032 | seq_printf(s, "|||||"); |
9005f3eb FW |
1033 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
1034 | seq_printf(s, " DURATION "); | |
1035 | seq_printf(s, " FUNCTION CALLS\n"); | |
decbec38 FW |
1036 | |
1037 | /* 2nd line */ | |
49ff5903 | 1038 | seq_printf(s, "#"); |
9005f3eb FW |
1039 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
1040 | seq_printf(s, " | "); | |
decbec38 | 1041 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1042 | seq_printf(s, " | "); |
decbec38 | 1043 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1044 | seq_printf(s, " | | "); |
1045 | if (lat) | |
637e7e86 | 1046 | seq_printf(s, "|||||"); |
9005f3eb FW |
1047 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
1048 | seq_printf(s, " | | "); | |
1049 | seq_printf(s, " | | | |\n"); | |
decbec38 | 1050 | } |
9005f3eb FW |
1051 | |
1052 | static void graph_trace_open(struct trace_iterator *iter) | |
1053 | { | |
2fbcdb35 SR |
1054 | /* pid and depth on the last trace processed */ |
1055 | struct fgraph_data *data = alloc_percpu(struct fgraph_data); | |
9005f3eb FW |
1056 | int cpu; |
1057 | ||
2fbcdb35 | 1058 | if (!data) |
9005f3eb FW |
1059 | pr_warning("function graph tracer: not enough memory\n"); |
1060 | else | |
1061 | for_each_possible_cpu(cpu) { | |
2fbcdb35 SR |
1062 | pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); |
1063 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | |
9005f3eb | 1064 | *pid = -1; |
2fbcdb35 | 1065 | *depth = 0; |
9005f3eb FW |
1066 | } |
1067 | ||
2fbcdb35 | 1068 | iter->private = data; |
9005f3eb FW |
1069 | } |
1070 | ||
1071 | static void graph_trace_close(struct trace_iterator *iter) | |
1072 | { | |
8293dd6f | 1073 | free_percpu(iter->private); |
9005f3eb FW |
1074 | } |
1075 | ||
fb52607a | 1076 | static struct tracer graph_trace __read_mostly = { |
ef18012b | 1077 | .name = "function_graph", |
9005f3eb FW |
1078 | .open = graph_trace_open, |
1079 | .close = graph_trace_close, | |
6eaaa5d5 | 1080 | .wait_pipe = poll_wait_pipe, |
ef18012b SR |
1081 | .init = graph_trace_init, |
1082 | .reset = graph_trace_reset, | |
decbec38 FW |
1083 | .print_line = print_graph_function, |
1084 | .print_header = print_graph_headers, | |
fb52607a | 1085 | .flags = &tracer_flags, |
7447dce9 FW |
1086 | #ifdef CONFIG_FTRACE_SELFTEST |
1087 | .selftest = trace_selftest_startup_function_graph, | |
1088 | #endif | |
fb52607a FW |
1089 | }; |
1090 | ||
1091 | static __init int init_graph_trace(void) | |
1092 | { | |
0c9e6f63 LJ |
1093 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1094 | ||
fb52607a FW |
1095 | return register_tracer(&graph_trace); |
1096 | } | |
1097 | ||
1098 | device_initcall(init_graph_trace); |