]>
Commit | Line | Data |
---|---|---|
fb52607a FW |
1 | /* |
2 | * | |
3 | * Function graph tracer. | |
9005f3eb | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
fb52607a FW |
5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | |
7 | * | |
8 | */ | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/uaccess.h> | |
11 | #include <linux/ftrace.h> | |
12 | #include <linux/fs.h> | |
13 | ||
14 | #include "trace.h" | |
f0868d1e | 15 | #include "trace_output.h" |
fb52607a | 16 | |
be1eca39 | 17 | struct fgraph_cpu_data { |
2fbcdb35 SR |
18 | pid_t last_pid; |
19 | int depth; | |
be1eca39 JO |
20 | int ignore; |
21 | }; | |
22 | ||
23 | struct fgraph_data { | |
24 | struct fgraph_cpu_data *cpu_data; | |
25 | ||
26 | /* Place to preserve last processed entry. */ | |
27 | struct ftrace_graph_ent_entry ent; | |
28 | struct ftrace_graph_ret_entry ret; | |
29 | int failed; | |
30 | int cpu; | |
2fbcdb35 SR |
31 | }; |
32 | ||
287b6e68 | 33 | #define TRACE_GRAPH_INDENT 2 |
fb52607a | 34 | |
1a056155 | 35 | /* Flag options */ |
fb52607a | 36 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
1a056155 FW |
37 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
38 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | |
11e84acc | 39 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
9005f3eb FW |
40 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
41 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | |
1a056155 | 42 | |
fb52607a | 43 | static struct tracer_opt trace_opts[] = { |
9005f3eb | 44 | /* Display overruns? (for self-debug purpose) */ |
1a056155 FW |
45 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
46 | /* Display CPU ? */ | |
47 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | |
48 | /* Display Overhead ? */ | |
49 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | |
11e84acc FW |
50 | /* Display proc name/pid */ |
51 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | |
9005f3eb FW |
52 | /* Display duration of execution */ |
53 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | |
54 | /* Display absolute time of an entry */ | |
55 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | |
fb52607a FW |
56 | { } /* Empty entry */ |
57 | }; | |
58 | ||
59 | static struct tracer_flags tracer_flags = { | |
11e84acc | 60 | /* Don't display overruns and proc by default */ |
9005f3eb FW |
61 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
62 | TRACE_GRAPH_PRINT_DURATION, | |
fb52607a FW |
63 | .opts = trace_opts |
64 | }; | |
65 | ||
1a0799a8 | 66 | static struct trace_array *graph_array; |
9005f3eb | 67 | |
fb52607a | 68 | |
712406a6 SR |
69 | /* Add a function return address to the trace stack on thread info.*/ |
70 | int | |
71e308a2 SR |
71 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
72 | unsigned long frame_pointer) | |
712406a6 | 73 | { |
5d1a03dc | 74 | unsigned long long calltime; |
712406a6 SR |
75 | int index; |
76 | ||
77 | if (!current->ret_stack) | |
78 | return -EBUSY; | |
79 | ||
82310a32 SR |
80 | /* |
81 | * We must make sure the ret_stack is tested before we read | |
82 | * anything else. | |
83 | */ | |
84 | smp_rmb(); | |
85 | ||
712406a6 SR |
86 | /* The return trace stack is full */ |
87 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
88 | atomic_inc(¤t->trace_overrun); | |
89 | return -EBUSY; | |
90 | } | |
91 | ||
5d1a03dc SR |
92 | calltime = trace_clock_local(); |
93 | ||
712406a6 SR |
94 | index = ++current->curr_ret_stack; |
95 | barrier(); | |
96 | current->ret_stack[index].ret = ret; | |
97 | current->ret_stack[index].func = func; | |
5d1a03dc | 98 | current->ret_stack[index].calltime = calltime; |
a2a16d6a | 99 | current->ret_stack[index].subtime = 0; |
71e308a2 | 100 | current->ret_stack[index].fp = frame_pointer; |
712406a6 SR |
101 | *depth = index; |
102 | ||
103 | return 0; | |
104 | } | |
105 | ||
106 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
a2a16d6a | 107 | static void |
71e308a2 SR |
108 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
109 | unsigned long frame_pointer) | |
712406a6 SR |
110 | { |
111 | int index; | |
112 | ||
113 | index = current->curr_ret_stack; | |
114 | ||
115 | if (unlikely(index < 0)) { | |
116 | ftrace_graph_stop(); | |
117 | WARN_ON(1); | |
118 | /* Might as well panic, otherwise we have no where to go */ | |
119 | *ret = (unsigned long)panic; | |
120 | return; | |
121 | } | |
122 | ||
71e308a2 SR |
123 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST |
124 | /* | |
125 | * The arch may choose to record the frame pointer used | |
126 | * and check it here to make sure that it is what we expect it | |
127 | * to be. If gcc does not set the place holder of the return | |
128 | * address in the frame pointer, and does a copy instead, then | |
129 | * the function graph trace will fail. This test detects this | |
130 | * case. | |
131 | * | |
132 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
133 | * gcc do the above. | |
134 | */ | |
135 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
136 | ftrace_graph_stop(); | |
137 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
b375a11a | 138 | " from func %ps return to %lx\n", |
71e308a2 SR |
139 | current->ret_stack[index].fp, |
140 | frame_pointer, | |
141 | (void *)current->ret_stack[index].func, | |
142 | current->ret_stack[index].ret); | |
143 | *ret = (unsigned long)panic; | |
144 | return; | |
145 | } | |
146 | #endif | |
147 | ||
712406a6 SR |
148 | *ret = current->ret_stack[index].ret; |
149 | trace->func = current->ret_stack[index].func; | |
150 | trace->calltime = current->ret_stack[index].calltime; | |
151 | trace->overrun = atomic_read(¤t->trace_overrun); | |
152 | trace->depth = index; | |
712406a6 SR |
153 | } |
154 | ||
155 | /* | |
156 | * Send the trace to the ring-buffer. | |
157 | * @return the original return address. | |
158 | */ | |
71e308a2 | 159 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
712406a6 SR |
160 | { |
161 | struct ftrace_graph_ret trace; | |
162 | unsigned long ret; | |
163 | ||
71e308a2 | 164 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
0012693a | 165 | trace.rettime = trace_clock_local(); |
712406a6 | 166 | ftrace_graph_return(&trace); |
a2a16d6a SR |
167 | barrier(); |
168 | current->curr_ret_stack--; | |
712406a6 SR |
169 | |
170 | if (unlikely(!ret)) { | |
171 | ftrace_graph_stop(); | |
172 | WARN_ON(1); | |
173 | /* Might as well panic. What else to do? */ | |
174 | ret = (unsigned long)panic; | |
175 | } | |
176 | ||
177 | return ret; | |
178 | } | |
179 | ||
1a0799a8 FW |
180 | static int __trace_graph_entry(struct trace_array *tr, |
181 | struct ftrace_graph_ent *trace, | |
182 | unsigned long flags, | |
183 | int pc) | |
184 | { | |
185 | struct ftrace_event_call *call = &event_funcgraph_entry; | |
186 | struct ring_buffer_event *event; | |
e77405ad | 187 | struct ring_buffer *buffer = tr->buffer; |
1a0799a8 FW |
188 | struct ftrace_graph_ent_entry *entry; |
189 | ||
190 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | |
191 | return 0; | |
192 | ||
e77405ad | 193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
1a0799a8 FW |
194 | sizeof(*entry), flags, pc); |
195 | if (!event) | |
196 | return 0; | |
197 | entry = ring_buffer_event_data(event); | |
198 | entry->graph_ent = *trace; | |
e77405ad SR |
199 | if (!filter_current_check_discard(buffer, call, entry, event)) |
200 | ring_buffer_unlock_commit(buffer, event); | |
1a0799a8 FW |
201 | |
202 | return 1; | |
203 | } | |
204 | ||
205 | int trace_graph_entry(struct ftrace_graph_ent *trace) | |
206 | { | |
207 | struct trace_array *tr = graph_array; | |
208 | struct trace_array_cpu *data; | |
209 | unsigned long flags; | |
210 | long disabled; | |
211 | int ret; | |
212 | int cpu; | |
213 | int pc; | |
214 | ||
215 | if (unlikely(!tr)) | |
216 | return 0; | |
217 | ||
218 | if (!ftrace_trace_task(current)) | |
219 | return 0; | |
220 | ||
221 | if (!ftrace_graph_addr(trace->func)) | |
222 | return 0; | |
223 | ||
224 | local_irq_save(flags); | |
225 | cpu = raw_smp_processor_id(); | |
226 | data = tr->data[cpu]; | |
227 | disabled = atomic_inc_return(&data->disabled); | |
228 | if (likely(disabled == 1)) { | |
229 | pc = preempt_count(); | |
230 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
231 | } else { | |
232 | ret = 0; | |
233 | } | |
234 | /* Only do the atomic if it is not already set */ | |
235 | if (!test_tsk_trace_graph(current)) | |
236 | set_tsk_trace_graph(current); | |
237 | ||
238 | atomic_dec(&data->disabled); | |
239 | local_irq_restore(flags); | |
240 | ||
241 | return ret; | |
242 | } | |
243 | ||
244 | static void __trace_graph_return(struct trace_array *tr, | |
245 | struct ftrace_graph_ret *trace, | |
246 | unsigned long flags, | |
247 | int pc) | |
248 | { | |
249 | struct ftrace_event_call *call = &event_funcgraph_exit; | |
250 | struct ring_buffer_event *event; | |
e77405ad | 251 | struct ring_buffer *buffer = tr->buffer; |
1a0799a8 FW |
252 | struct ftrace_graph_ret_entry *entry; |
253 | ||
254 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | |
255 | return; | |
256 | ||
e77405ad | 257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
1a0799a8 FW |
258 | sizeof(*entry), flags, pc); |
259 | if (!event) | |
260 | return; | |
261 | entry = ring_buffer_event_data(event); | |
262 | entry->ret = *trace; | |
e77405ad SR |
263 | if (!filter_current_check_discard(buffer, call, entry, event)) |
264 | ring_buffer_unlock_commit(buffer, event); | |
1a0799a8 FW |
265 | } |
266 | ||
267 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
268 | { | |
269 | struct trace_array *tr = graph_array; | |
270 | struct trace_array_cpu *data; | |
271 | unsigned long flags; | |
272 | long disabled; | |
273 | int cpu; | |
274 | int pc; | |
275 | ||
276 | local_irq_save(flags); | |
277 | cpu = raw_smp_processor_id(); | |
278 | data = tr->data[cpu]; | |
279 | disabled = atomic_inc_return(&data->disabled); | |
280 | if (likely(disabled == 1)) { | |
281 | pc = preempt_count(); | |
282 | __trace_graph_return(tr, trace, flags, pc); | |
283 | } | |
284 | if (!trace->depth) | |
285 | clear_tsk_trace_graph(current); | |
286 | atomic_dec(&data->disabled); | |
287 | local_irq_restore(flags); | |
288 | } | |
289 | ||
fb52607a FW |
290 | static int graph_trace_init(struct trace_array *tr) |
291 | { | |
1a0799a8 FW |
292 | int ret; |
293 | ||
294 | graph_array = tr; | |
295 | ret = register_ftrace_graph(&trace_graph_return, | |
296 | &trace_graph_entry); | |
660c7f9b SR |
297 | if (ret) |
298 | return ret; | |
299 | tracing_start_cmdline_record(); | |
300 | ||
301 | return 0; | |
fb52607a FW |
302 | } |
303 | ||
1a0799a8 FW |
304 | void set_graph_array(struct trace_array *tr) |
305 | { | |
306 | graph_array = tr; | |
307 | } | |
308 | ||
fb52607a FW |
309 | static void graph_trace_reset(struct trace_array *tr) |
310 | { | |
660c7f9b SR |
311 | tracing_stop_cmdline_record(); |
312 | unregister_ftrace_graph(); | |
fb52607a FW |
313 | } |
314 | ||
0c9e6f63 | 315 | static int max_bytes_for_cpu; |
1a056155 FW |
316 | |
317 | static enum print_line_t | |
318 | print_graph_cpu(struct trace_seq *s, int cpu) | |
319 | { | |
1a056155 | 320 | int ret; |
1a056155 | 321 | |
d51090b3 IM |
322 | /* |
323 | * Start with a space character - to make it stand out | |
324 | * to the right a bit when trace output is pasted into | |
325 | * email: | |
326 | */ | |
0c9e6f63 | 327 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
1a056155 | 328 | if (!ret) |
d51090b3 IM |
329 | return TRACE_TYPE_PARTIAL_LINE; |
330 | ||
1a056155 FW |
331 | return TRACE_TYPE_HANDLED; |
332 | } | |
333 | ||
11e84acc FW |
334 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
335 | ||
336 | static enum print_line_t | |
337 | print_graph_proc(struct trace_seq *s, pid_t pid) | |
338 | { | |
4ca53085 | 339 | char comm[TASK_COMM_LEN]; |
11e84acc FW |
340 | /* sign + log10(MAX_INT) + '\0' */ |
341 | char pid_str[11]; | |
4ca53085 SR |
342 | int spaces = 0; |
343 | int ret; | |
344 | int len; | |
345 | int i; | |
11e84acc | 346 | |
4ca53085 | 347 | trace_find_cmdline(pid, comm); |
11e84acc FW |
348 | comm[7] = '\0'; |
349 | sprintf(pid_str, "%d", pid); | |
350 | ||
351 | /* 1 stands for the "-" character */ | |
352 | len = strlen(comm) + strlen(pid_str) + 1; | |
353 | ||
354 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | |
355 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | |
356 | ||
357 | /* First spaces to align center */ | |
358 | for (i = 0; i < spaces / 2; i++) { | |
359 | ret = trace_seq_printf(s, " "); | |
360 | if (!ret) | |
361 | return TRACE_TYPE_PARTIAL_LINE; | |
362 | } | |
363 | ||
364 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | |
365 | if (!ret) | |
366 | return TRACE_TYPE_PARTIAL_LINE; | |
367 | ||
368 | /* Last spaces to align center */ | |
369 | for (i = 0; i < spaces - (spaces / 2); i++) { | |
370 | ret = trace_seq_printf(s, " "); | |
371 | if (!ret) | |
372 | return TRACE_TYPE_PARTIAL_LINE; | |
373 | } | |
374 | return TRACE_TYPE_HANDLED; | |
375 | } | |
376 | ||
1a056155 | 377 | |
49ff5903 SR |
378 | static enum print_line_t |
379 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |
380 | { | |
f81c972d | 381 | if (!trace_seq_putc(s, ' ')) |
637e7e86 SR |
382 | return 0; |
383 | ||
f81c972d | 384 | return trace_print_lat_fmt(s, entry); |
49ff5903 SR |
385 | } |
386 | ||
287b6e68 | 387 | /* If the pid changed since the last trace, output this event */ |
11e84acc | 388 | static enum print_line_t |
2fbcdb35 | 389 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
287b6e68 | 390 | { |
d51090b3 | 391 | pid_t prev_pid; |
9005f3eb | 392 | pid_t *last_pid; |
d51090b3 | 393 | int ret; |
660c7f9b | 394 | |
2fbcdb35 | 395 | if (!data) |
9005f3eb FW |
396 | return TRACE_TYPE_HANDLED; |
397 | ||
be1eca39 | 398 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
9005f3eb FW |
399 | |
400 | if (*last_pid == pid) | |
11e84acc | 401 | return TRACE_TYPE_HANDLED; |
fb52607a | 402 | |
9005f3eb FW |
403 | prev_pid = *last_pid; |
404 | *last_pid = pid; | |
d51090b3 | 405 | |
9005f3eb FW |
406 | if (prev_pid == -1) |
407 | return TRACE_TYPE_HANDLED; | |
d51090b3 IM |
408 | /* |
409 | * Context-switch trace line: | |
410 | ||
411 | ------------------------------------------ | |
412 | | 1) migration/0--1 => sshd-1755 | |
413 | ------------------------------------------ | |
414 | ||
415 | */ | |
416 | ret = trace_seq_printf(s, | |
1fd8f2a3 | 417 | " ------------------------------------------\n"); |
11e84acc | 418 | if (!ret) |
810dc732 | 419 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
420 | |
421 | ret = print_graph_cpu(s, cpu); | |
422 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 423 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
424 | |
425 | ret = print_graph_proc(s, prev_pid); | |
426 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 427 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
428 | |
429 | ret = trace_seq_printf(s, " => "); | |
430 | if (!ret) | |
810dc732 | 431 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
432 | |
433 | ret = print_graph_proc(s, pid); | |
434 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 435 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
436 | |
437 | ret = trace_seq_printf(s, | |
438 | "\n ------------------------------------------\n\n"); | |
439 | if (!ret) | |
810dc732 | 440 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc | 441 | |
810dc732 | 442 | return TRACE_TYPE_HANDLED; |
287b6e68 FW |
443 | } |
444 | ||
b91facc3 FW |
445 | static struct ftrace_graph_ret_entry * |
446 | get_return_for_leaf(struct trace_iterator *iter, | |
83a8df61 FW |
447 | struct ftrace_graph_ent_entry *curr) |
448 | { | |
be1eca39 JO |
449 | struct fgraph_data *data = iter->private; |
450 | struct ring_buffer_iter *ring_iter = NULL; | |
83a8df61 FW |
451 | struct ring_buffer_event *event; |
452 | struct ftrace_graph_ret_entry *next; | |
453 | ||
be1eca39 JO |
454 | /* |
455 | * If the previous output failed to write to the seq buffer, | |
456 | * then we just reuse the data from before. | |
457 | */ | |
458 | if (data && data->failed) { | |
459 | curr = &data->ent; | |
460 | next = &data->ret; | |
461 | } else { | |
83a8df61 | 462 | |
be1eca39 JO |
463 | ring_iter = iter->buffer_iter[iter->cpu]; |
464 | ||
465 | /* First peek to compare current entry and the next one */ | |
466 | if (ring_iter) | |
467 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
468 | else { | |
469 | /* | |
470 | * We need to consume the current entry to see | |
471 | * the next one. | |
472 | */ | |
473 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | |
474 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | |
475 | NULL); | |
476 | } | |
83a8df61 | 477 | |
be1eca39 JO |
478 | if (!event) |
479 | return NULL; | |
480 | ||
481 | next = ring_buffer_event_data(event); | |
83a8df61 | 482 | |
be1eca39 JO |
483 | if (data) { |
484 | /* | |
485 | * Save current and next entries for later reference | |
486 | * if the output fails. | |
487 | */ | |
488 | data->ent = *curr; | |
489 | data->ret = *next; | |
490 | } | |
491 | } | |
83a8df61 FW |
492 | |
493 | if (next->ent.type != TRACE_GRAPH_RET) | |
b91facc3 | 494 | return NULL; |
83a8df61 FW |
495 | |
496 | if (curr->ent.pid != next->ent.pid || | |
497 | curr->graph_ent.func != next->ret.func) | |
b91facc3 | 498 | return NULL; |
83a8df61 | 499 | |
b91facc3 FW |
500 | /* this is a leaf, now advance the iterator */ |
501 | if (ring_iter) | |
502 | ring_buffer_read(ring_iter, NULL); | |
503 | ||
504 | return next; | |
83a8df61 FW |
505 | } |
506 | ||
9005f3eb FW |
507 | /* Signal a overhead of time execution to the output */ |
508 | static int | |
509 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | |
510 | { | |
511 | /* If duration disappear, we don't need anything */ | |
512 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | |
513 | return 1; | |
514 | ||
515 | /* Non nested entry or return */ | |
516 | if (duration == -1) | |
517 | return trace_seq_printf(s, " "); | |
518 | ||
519 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | |
520 | /* Duration exceeded 100 msecs */ | |
521 | if (duration > 100000ULL) | |
522 | return trace_seq_printf(s, "! "); | |
523 | ||
524 | /* Duration exceeded 10 msecs */ | |
525 | if (duration > 10000ULL) | |
526 | return trace_seq_printf(s, "+ "); | |
527 | } | |
528 | ||
529 | return trace_seq_printf(s, " "); | |
530 | } | |
531 | ||
d1f9cbd7 FW |
532 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
533 | { | |
534 | unsigned long usecs_rem; | |
535 | ||
536 | usecs_rem = do_div(t, NSEC_PER_SEC); | |
537 | usecs_rem /= 1000; | |
538 | ||
539 | return trace_seq_printf(s, "%5lu.%06lu | ", | |
540 | (unsigned long)t, usecs_rem); | |
541 | } | |
542 | ||
f8b755ac | 543 | static enum print_line_t |
d1f9cbd7 | 544 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
9005f3eb | 545 | enum trace_type type, int cpu, pid_t pid) |
f8b755ac FW |
546 | { |
547 | int ret; | |
d1f9cbd7 | 548 | struct trace_seq *s = &iter->seq; |
f8b755ac FW |
549 | |
550 | if (addr < (unsigned long)__irqentry_text_start || | |
551 | addr >= (unsigned long)__irqentry_text_end) | |
552 | return TRACE_TYPE_UNHANDLED; | |
553 | ||
d1f9cbd7 FW |
554 | /* Absolute time */ |
555 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | |
556 | ret = print_graph_abs_time(iter->ts, s); | |
557 | if (!ret) | |
558 | return TRACE_TYPE_PARTIAL_LINE; | |
559 | } | |
560 | ||
9005f3eb FW |
561 | /* Cpu */ |
562 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | |
563 | ret = print_graph_cpu(s, cpu); | |
564 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
565 | return TRACE_TYPE_PARTIAL_LINE; | |
566 | } | |
49ff5903 | 567 | |
9005f3eb FW |
568 | /* Proc */ |
569 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | |
570 | ret = print_graph_proc(s, pid); | |
571 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
572 | return TRACE_TYPE_PARTIAL_LINE; | |
573 | ret = trace_seq_printf(s, " | "); | |
574 | if (!ret) | |
575 | return TRACE_TYPE_PARTIAL_LINE; | |
576 | } | |
f8b755ac | 577 | |
9005f3eb FW |
578 | /* No overhead */ |
579 | ret = print_graph_overhead(-1, s); | |
580 | if (!ret) | |
581 | return TRACE_TYPE_PARTIAL_LINE; | |
f8b755ac | 582 | |
9005f3eb FW |
583 | if (type == TRACE_GRAPH_ENT) |
584 | ret = trace_seq_printf(s, "==========>"); | |
585 | else | |
586 | ret = trace_seq_printf(s, "<=========="); | |
587 | ||
588 | if (!ret) | |
589 | return TRACE_TYPE_PARTIAL_LINE; | |
590 | ||
591 | /* Don't close the duration column if haven't one */ | |
592 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | |
593 | trace_seq_printf(s, " |"); | |
594 | ret = trace_seq_printf(s, "\n"); | |
f8b755ac | 595 | |
f8b755ac FW |
596 | if (!ret) |
597 | return TRACE_TYPE_PARTIAL_LINE; | |
598 | return TRACE_TYPE_HANDLED; | |
599 | } | |
83a8df61 | 600 | |
0706f1c4 SR |
601 | enum print_line_t |
602 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |
83a8df61 FW |
603 | { |
604 | unsigned long nsecs_rem = do_div(duration, 1000); | |
166d3c79 FW |
605 | /* log10(ULONG_MAX) + '\0' */ |
606 | char msecs_str[21]; | |
607 | char nsecs_str[5]; | |
608 | int ret, len; | |
609 | int i; | |
610 | ||
611 | sprintf(msecs_str, "%lu", (unsigned long) duration); | |
612 | ||
613 | /* Print msecs */ | |
9005f3eb | 614 | ret = trace_seq_printf(s, "%s", msecs_str); |
166d3c79 FW |
615 | if (!ret) |
616 | return TRACE_TYPE_PARTIAL_LINE; | |
617 | ||
618 | len = strlen(msecs_str); | |
619 | ||
620 | /* Print nsecs (we don't want to exceed 7 numbers) */ | |
621 | if (len < 7) { | |
622 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | |
623 | ret = trace_seq_printf(s, ".%s", nsecs_str); | |
624 | if (!ret) | |
625 | return TRACE_TYPE_PARTIAL_LINE; | |
626 | len += strlen(nsecs_str); | |
627 | } | |
628 | ||
629 | ret = trace_seq_printf(s, " us "); | |
630 | if (!ret) | |
631 | return TRACE_TYPE_PARTIAL_LINE; | |
632 | ||
633 | /* Print remaining spaces to fit the row's width */ | |
634 | for (i = len; i < 7; i++) { | |
635 | ret = trace_seq_printf(s, " "); | |
636 | if (!ret) | |
637 | return TRACE_TYPE_PARTIAL_LINE; | |
638 | } | |
0706f1c4 SR |
639 | return TRACE_TYPE_HANDLED; |
640 | } | |
641 | ||
642 | static enum print_line_t | |
643 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | |
644 | { | |
645 | int ret; | |
646 | ||
647 | ret = trace_print_graph_duration(duration, s); | |
648 | if (ret != TRACE_TYPE_HANDLED) | |
649 | return ret; | |
166d3c79 FW |
650 | |
651 | ret = trace_seq_printf(s, "| "); | |
652 | if (!ret) | |
653 | return TRACE_TYPE_PARTIAL_LINE; | |
166d3c79 | 654 | |
0706f1c4 | 655 | return TRACE_TYPE_HANDLED; |
83a8df61 FW |
656 | } |
657 | ||
83a8df61 | 658 | /* Case of a leaf function on its call entry */ |
287b6e68 | 659 | static enum print_line_t |
83a8df61 | 660 | print_graph_entry_leaf(struct trace_iterator *iter, |
b91facc3 FW |
661 | struct ftrace_graph_ent_entry *entry, |
662 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | |
fb52607a | 663 | { |
2fbcdb35 | 664 | struct fgraph_data *data = iter->private; |
83a8df61 | 665 | struct ftrace_graph_ret *graph_ret; |
83a8df61 FW |
666 | struct ftrace_graph_ent *call; |
667 | unsigned long long duration; | |
fb52607a | 668 | int ret; |
1a056155 | 669 | int i; |
fb52607a | 670 | |
83a8df61 FW |
671 | graph_ret = &ret_entry->ret; |
672 | call = &entry->graph_ent; | |
673 | duration = graph_ret->rettime - graph_ret->calltime; | |
674 | ||
2fbcdb35 SR |
675 | if (data) { |
676 | int cpu = iter->cpu; | |
be1eca39 | 677 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
2fbcdb35 SR |
678 | |
679 | /* | |
680 | * Comments display at + 1 to depth. Since | |
681 | * this is a leaf function, keep the comments | |
682 | * equal to this depth. | |
683 | */ | |
684 | *depth = call->depth - 1; | |
685 | } | |
686 | ||
83a8df61 | 687 | /* Overhead */ |
9005f3eb FW |
688 | ret = print_graph_overhead(duration, s); |
689 | if (!ret) | |
690 | return TRACE_TYPE_PARTIAL_LINE; | |
1a056155 FW |
691 | |
692 | /* Duration */ | |
9005f3eb FW |
693 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
694 | ret = print_graph_duration(duration, s); | |
695 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
696 | return TRACE_TYPE_PARTIAL_LINE; | |
697 | } | |
437f24fb | 698 | |
83a8df61 FW |
699 | /* Function */ |
700 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | |
701 | ret = trace_seq_printf(s, " "); | |
702 | if (!ret) | |
703 | return TRACE_TYPE_PARTIAL_LINE; | |
704 | } | |
705 | ||
b375a11a | 706 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); |
83a8df61 FW |
707 | if (!ret) |
708 | return TRACE_TYPE_PARTIAL_LINE; | |
709 | ||
710 | return TRACE_TYPE_HANDLED; | |
711 | } | |
712 | ||
713 | static enum print_line_t | |
2fbcdb35 SR |
714 | print_graph_entry_nested(struct trace_iterator *iter, |
715 | struct ftrace_graph_ent_entry *entry, | |
716 | struct trace_seq *s, int cpu) | |
83a8df61 | 717 | { |
83a8df61 | 718 | struct ftrace_graph_ent *call = &entry->graph_ent; |
2fbcdb35 SR |
719 | struct fgraph_data *data = iter->private; |
720 | int ret; | |
721 | int i; | |
722 | ||
723 | if (data) { | |
724 | int cpu = iter->cpu; | |
be1eca39 | 725 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
2fbcdb35 SR |
726 | |
727 | *depth = call->depth; | |
728 | } | |
83a8df61 FW |
729 | |
730 | /* No overhead */ | |
9005f3eb FW |
731 | ret = print_graph_overhead(-1, s); |
732 | if (!ret) | |
733 | return TRACE_TYPE_PARTIAL_LINE; | |
1a056155 | 734 | |
9005f3eb FW |
735 | /* No time */ |
736 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | |
f8b755ac FW |
737 | ret = trace_seq_printf(s, " | "); |
738 | if (!ret) | |
739 | return TRACE_TYPE_PARTIAL_LINE; | |
f8b755ac FW |
740 | } |
741 | ||
83a8df61 | 742 | /* Function */ |
287b6e68 FW |
743 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
744 | ret = trace_seq_printf(s, " "); | |
fb52607a FW |
745 | if (!ret) |
746 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 FW |
747 | } |
748 | ||
b375a11a | 749 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
83a8df61 FW |
750 | if (!ret) |
751 | return TRACE_TYPE_PARTIAL_LINE; | |
752 | ||
b91facc3 FW |
753 | /* |
754 | * we already consumed the current entry to check the next one | |
755 | * and see if this is a leaf. | |
756 | */ | |
757 | return TRACE_TYPE_NO_CONSUME; | |
287b6e68 FW |
758 | } |
759 | ||
83a8df61 | 760 | static enum print_line_t |
ac5f6c96 SR |
761 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
762 | int type, unsigned long addr) | |
83a8df61 | 763 | { |
2fbcdb35 | 764 | struct fgraph_data *data = iter->private; |
83a8df61 | 765 | struct trace_entry *ent = iter->ent; |
ac5f6c96 SR |
766 | int cpu = iter->cpu; |
767 | int ret; | |
83a8df61 | 768 | |
1a056155 | 769 | /* Pid */ |
2fbcdb35 | 770 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
9005f3eb FW |
771 | return TRACE_TYPE_PARTIAL_LINE; |
772 | ||
ac5f6c96 SR |
773 | if (type) { |
774 | /* Interrupt */ | |
775 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid); | |
776 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
777 | return TRACE_TYPE_PARTIAL_LINE; | |
778 | } | |
83a8df61 | 779 | |
9005f3eb FW |
780 | /* Absolute time */ |
781 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | |
782 | ret = print_graph_abs_time(iter->ts, s); | |
783 | if (!ret) | |
784 | return TRACE_TYPE_PARTIAL_LINE; | |
785 | } | |
786 | ||
1a056155 FW |
787 | /* Cpu */ |
788 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | |
789 | ret = print_graph_cpu(s, cpu); | |
11e84acc FW |
790 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
791 | return TRACE_TYPE_PARTIAL_LINE; | |
792 | } | |
793 | ||
794 | /* Proc */ | |
795 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | |
00a8bf85 | 796 | ret = print_graph_proc(s, ent->pid); |
11e84acc FW |
797 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
798 | return TRACE_TYPE_PARTIAL_LINE; | |
799 | ||
800 | ret = trace_seq_printf(s, " | "); | |
1a056155 FW |
801 | if (!ret) |
802 | return TRACE_TYPE_PARTIAL_LINE; | |
803 | } | |
83a8df61 | 804 | |
49ff5903 SR |
805 | /* Latency format */ |
806 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | |
807 | ret = print_graph_lat_fmt(s, ent); | |
808 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
809 | return TRACE_TYPE_PARTIAL_LINE; | |
810 | } | |
811 | ||
ac5f6c96 SR |
812 | return 0; |
813 | } | |
814 | ||
815 | static enum print_line_t | |
816 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |
817 | struct trace_iterator *iter) | |
818 | { | |
be1eca39 | 819 | struct fgraph_data *data = iter->private; |
ac5f6c96 SR |
820 | struct ftrace_graph_ent *call = &field->graph_ent; |
821 | struct ftrace_graph_ret_entry *leaf_ret; | |
be1eca39 JO |
822 | static enum print_line_t ret; |
823 | int cpu = iter->cpu; | |
ac5f6c96 SR |
824 | |
825 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | |
826 | return TRACE_TYPE_PARTIAL_LINE; | |
827 | ||
b91facc3 FW |
828 | leaf_ret = get_return_for_leaf(iter, field); |
829 | if (leaf_ret) | |
be1eca39 | 830 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s); |
83a8df61 | 831 | else |
be1eca39 | 832 | ret = print_graph_entry_nested(iter, field, s, cpu); |
83a8df61 | 833 | |
be1eca39 JO |
834 | if (data) { |
835 | /* | |
836 | * If we failed to write our output, then we need to make | |
837 | * note of it. Because we already consumed our entry. | |
838 | */ | |
839 | if (s->full) { | |
840 | data->failed = 1; | |
841 | data->cpu = cpu; | |
842 | } else | |
843 | data->failed = 0; | |
844 | } | |
845 | ||
846 | return ret; | |
83a8df61 FW |
847 | } |
848 | ||
287b6e68 FW |
849 | static enum print_line_t |
850 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |
9005f3eb | 851 | struct trace_entry *ent, struct trace_iterator *iter) |
287b6e68 | 852 | { |
83a8df61 | 853 | unsigned long long duration = trace->rettime - trace->calltime; |
2fbcdb35 SR |
854 | struct fgraph_data *data = iter->private; |
855 | pid_t pid = ent->pid; | |
856 | int cpu = iter->cpu; | |
857 | int ret; | |
858 | int i; | |
859 | ||
860 | if (data) { | |
861 | int cpu = iter->cpu; | |
be1eca39 | 862 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
2fbcdb35 SR |
863 | |
864 | /* | |
865 | * Comments display at + 1 to depth. This is the | |
866 | * return from a function, we now want the comments | |
867 | * to display at the same level of the bracket. | |
868 | */ | |
869 | *depth = trace->depth - 1; | |
870 | } | |
287b6e68 | 871 | |
ac5f6c96 | 872 | if (print_graph_prologue(iter, s, 0, 0)) |
437f24fb SR |
873 | return TRACE_TYPE_PARTIAL_LINE; |
874 | ||
83a8df61 | 875 | /* Overhead */ |
9005f3eb FW |
876 | ret = print_graph_overhead(duration, s); |
877 | if (!ret) | |
878 | return TRACE_TYPE_PARTIAL_LINE; | |
1a056155 FW |
879 | |
880 | /* Duration */ | |
9005f3eb FW |
881 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
882 | ret = print_graph_duration(duration, s); | |
883 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
884 | return TRACE_TYPE_PARTIAL_LINE; | |
885 | } | |
83a8df61 FW |
886 | |
887 | /* Closing brace */ | |
287b6e68 FW |
888 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
889 | ret = trace_seq_printf(s, " "); | |
fb52607a FW |
890 | if (!ret) |
891 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 FW |
892 | } |
893 | ||
1a056155 | 894 | ret = trace_seq_printf(s, "}\n"); |
287b6e68 FW |
895 | if (!ret) |
896 | return TRACE_TYPE_PARTIAL_LINE; | |
fb52607a | 897 | |
83a8df61 | 898 | /* Overrun */ |
287b6e68 FW |
899 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { |
900 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | |
901 | trace->overrun); | |
fb52607a FW |
902 | if (!ret) |
903 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 | 904 | } |
f8b755ac | 905 | |
d1f9cbd7 | 906 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); |
f8b755ac FW |
907 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
908 | return TRACE_TYPE_PARTIAL_LINE; | |
909 | ||
287b6e68 FW |
910 | return TRACE_TYPE_HANDLED; |
911 | } | |
912 | ||
1fd8f2a3 | 913 | static enum print_line_t |
5087f8d2 SR |
914 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
915 | struct trace_iterator *iter) | |
1fd8f2a3 | 916 | { |
5087f8d2 | 917 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
2fbcdb35 | 918 | struct fgraph_data *data = iter->private; |
5087f8d2 | 919 | struct trace_event *event; |
2fbcdb35 | 920 | int depth = 0; |
1fd8f2a3 | 921 | int ret; |
2fbcdb35 SR |
922 | int i; |
923 | ||
924 | if (data) | |
be1eca39 | 925 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
9005f3eb | 926 | |
ac5f6c96 | 927 | if (print_graph_prologue(iter, s, 0, 0)) |
d1f9cbd7 FW |
928 | return TRACE_TYPE_PARTIAL_LINE; |
929 | ||
1fd8f2a3 | 930 | /* No overhead */ |
9005f3eb FW |
931 | ret = print_graph_overhead(-1, s); |
932 | if (!ret) | |
933 | return TRACE_TYPE_PARTIAL_LINE; | |
934 | ||
935 | /* No time */ | |
936 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | |
937 | ret = trace_seq_printf(s, " | "); | |
1fd8f2a3 FW |
938 | if (!ret) |
939 | return TRACE_TYPE_PARTIAL_LINE; | |
940 | } | |
941 | ||
1fd8f2a3 | 942 | /* Indentation */ |
2fbcdb35 SR |
943 | if (depth > 0) |
944 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | |
1fd8f2a3 FW |
945 | ret = trace_seq_printf(s, " "); |
946 | if (!ret) | |
947 | return TRACE_TYPE_PARTIAL_LINE; | |
948 | } | |
949 | ||
950 | /* The comment */ | |
769b0441 FW |
951 | ret = trace_seq_printf(s, "/* "); |
952 | if (!ret) | |
953 | return TRACE_TYPE_PARTIAL_LINE; | |
954 | ||
5087f8d2 SR |
955 | switch (iter->ent->type) { |
956 | case TRACE_BPRINT: | |
957 | ret = trace_print_bprintk_msg_only(iter); | |
958 | if (ret != TRACE_TYPE_HANDLED) | |
959 | return ret; | |
960 | break; | |
961 | case TRACE_PRINT: | |
962 | ret = trace_print_printk_msg_only(iter); | |
963 | if (ret != TRACE_TYPE_HANDLED) | |
964 | return ret; | |
965 | break; | |
966 | default: | |
967 | event = ftrace_find_event(ent->type); | |
968 | if (!event) | |
969 | return TRACE_TYPE_UNHANDLED; | |
970 | ||
971 | ret = event->trace(iter, sym_flags); | |
972 | if (ret != TRACE_TYPE_HANDLED) | |
973 | return ret; | |
974 | } | |
1fd8f2a3 | 975 | |
412d0bb5 FW |
976 | /* Strip ending newline */ |
977 | if (s->buffer[s->len - 1] == '\n') { | |
978 | s->buffer[s->len - 1] = '\0'; | |
979 | s->len--; | |
980 | } | |
981 | ||
1fd8f2a3 FW |
982 | ret = trace_seq_printf(s, " */\n"); |
983 | if (!ret) | |
984 | return TRACE_TYPE_PARTIAL_LINE; | |
985 | ||
986 | return TRACE_TYPE_HANDLED; | |
987 | } | |
988 | ||
989 | ||
287b6e68 FW |
990 | enum print_line_t |
991 | print_graph_function(struct trace_iterator *iter) | |
992 | { | |
be1eca39 JO |
993 | struct ftrace_graph_ent_entry *field; |
994 | struct fgraph_data *data = iter->private; | |
287b6e68 | 995 | struct trace_entry *entry = iter->ent; |
5087f8d2 | 996 | struct trace_seq *s = &iter->seq; |
be1eca39 JO |
997 | int cpu = iter->cpu; |
998 | int ret; | |
999 | ||
1000 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | |
1001 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | |
1002 | return TRACE_TYPE_HANDLED; | |
1003 | } | |
1004 | ||
1005 | /* | |
1006 | * If the last output failed, there's a possibility we need | |
1007 | * to print out the missing entry which would never go out. | |
1008 | */ | |
1009 | if (data && data->failed) { | |
1010 | field = &data->ent; | |
1011 | iter->cpu = data->cpu; | |
1012 | ret = print_graph_entry(field, s, iter); | |
1013 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { | |
1014 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | |
1015 | ret = TRACE_TYPE_NO_CONSUME; | |
1016 | } | |
1017 | iter->cpu = cpu; | |
1018 | return ret; | |
1019 | } | |
fb52607a | 1020 | |
287b6e68 FW |
1021 | switch (entry->type) { |
1022 | case TRACE_GRAPH_ENT: { | |
38ceb592 LJ |
1023 | /* |
1024 | * print_graph_entry() may consume the current event, | |
1025 | * thus @field may become invalid, so we need to save it. | |
1026 | * sizeof(struct ftrace_graph_ent_entry) is very small, | |
1027 | * it can be safely saved at the stack. | |
1028 | */ | |
be1eca39 | 1029 | struct ftrace_graph_ent_entry saved; |
287b6e68 | 1030 | trace_assign_type(field, entry); |
38ceb592 LJ |
1031 | saved = *field; |
1032 | return print_graph_entry(&saved, s, iter); | |
287b6e68 FW |
1033 | } |
1034 | case TRACE_GRAPH_RET: { | |
1035 | struct ftrace_graph_ret_entry *field; | |
1036 | trace_assign_type(field, entry); | |
9005f3eb | 1037 | return print_graph_return(&field->ret, s, entry, iter); |
287b6e68 FW |
1038 | } |
1039 | default: | |
5087f8d2 | 1040 | return print_graph_comment(s, entry, iter); |
fb52607a | 1041 | } |
5087f8d2 SR |
1042 | |
1043 | return TRACE_TYPE_HANDLED; | |
fb52607a FW |
1044 | } |
1045 | ||
49ff5903 SR |
1046 | static void print_lat_header(struct seq_file *s) |
1047 | { | |
1048 | static const char spaces[] = " " /* 16 spaces */ | |
1049 | " " /* 4 spaces */ | |
1050 | " "; /* 17 spaces */ | |
1051 | int size = 0; | |
1052 | ||
1053 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | |
1054 | size += 16; | |
1055 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | |
1056 | size += 4; | |
1057 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | |
1058 | size += 17; | |
1059 | ||
1060 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | |
1061 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | |
1062 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | |
1063 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | |
637e7e86 SR |
1064 | seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces); |
1065 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | |
49ff5903 SR |
1066 | } |
1067 | ||
decbec38 FW |
1068 | static void print_graph_headers(struct seq_file *s) |
1069 | { | |
49ff5903 SR |
1070 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
1071 | ||
1072 | if (lat) | |
1073 | print_lat_header(s); | |
1074 | ||
decbec38 | 1075 | /* 1st line */ |
49ff5903 | 1076 | seq_printf(s, "#"); |
9005f3eb FW |
1077 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
1078 | seq_printf(s, " TIME "); | |
decbec38 | 1079 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1080 | seq_printf(s, " CPU"); |
decbec38 | 1081 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1082 | seq_printf(s, " TASK/PID "); |
1083 | if (lat) | |
637e7e86 | 1084 | seq_printf(s, "|||||"); |
9005f3eb FW |
1085 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
1086 | seq_printf(s, " DURATION "); | |
1087 | seq_printf(s, " FUNCTION CALLS\n"); | |
decbec38 FW |
1088 | |
1089 | /* 2nd line */ | |
49ff5903 | 1090 | seq_printf(s, "#"); |
9005f3eb FW |
1091 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
1092 | seq_printf(s, " | "); | |
decbec38 | 1093 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1094 | seq_printf(s, " | "); |
decbec38 | 1095 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1096 | seq_printf(s, " | | "); |
1097 | if (lat) | |
637e7e86 | 1098 | seq_printf(s, "|||||"); |
9005f3eb FW |
1099 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
1100 | seq_printf(s, " | | "); | |
1101 | seq_printf(s, " | | | |\n"); | |
decbec38 | 1102 | } |
9005f3eb FW |
1103 | |
1104 | static void graph_trace_open(struct trace_iterator *iter) | |
1105 | { | |
2fbcdb35 | 1106 | /* pid and depth on the last trace processed */ |
be1eca39 | 1107 | struct fgraph_data *data; |
9005f3eb FW |
1108 | int cpu; |
1109 | ||
be1eca39 JO |
1110 | iter->private = NULL; |
1111 | ||
1112 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
2fbcdb35 | 1113 | if (!data) |
be1eca39 JO |
1114 | goto out_err; |
1115 | ||
1116 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); | |
1117 | if (!data->cpu_data) | |
1118 | goto out_err_free; | |
1119 | ||
1120 | for_each_possible_cpu(cpu) { | |
1121 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | |
1122 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | |
1123 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | |
1124 | *pid = -1; | |
1125 | *depth = 0; | |
1126 | *ignore = 0; | |
1127 | } | |
9005f3eb | 1128 | |
2fbcdb35 | 1129 | iter->private = data; |
be1eca39 JO |
1130 | |
1131 | return; | |
1132 | ||
1133 | out_err_free: | |
1134 | kfree(data); | |
1135 | out_err: | |
1136 | pr_warning("function graph tracer: not enough memory\n"); | |
9005f3eb FW |
1137 | } |
1138 | ||
1139 | static void graph_trace_close(struct trace_iterator *iter) | |
1140 | { | |
be1eca39 JO |
1141 | struct fgraph_data *data = iter->private; |
1142 | ||
1143 | if (data) { | |
1144 | free_percpu(data->cpu_data); | |
1145 | kfree(data); | |
1146 | } | |
9005f3eb FW |
1147 | } |
1148 | ||
fb52607a | 1149 | static struct tracer graph_trace __read_mostly = { |
ef18012b | 1150 | .name = "function_graph", |
9005f3eb | 1151 | .open = graph_trace_open, |
be1eca39 | 1152 | .pipe_open = graph_trace_open, |
9005f3eb | 1153 | .close = graph_trace_close, |
be1eca39 | 1154 | .pipe_close = graph_trace_close, |
6eaaa5d5 | 1155 | .wait_pipe = poll_wait_pipe, |
ef18012b SR |
1156 | .init = graph_trace_init, |
1157 | .reset = graph_trace_reset, | |
decbec38 FW |
1158 | .print_line = print_graph_function, |
1159 | .print_header = print_graph_headers, | |
fb52607a | 1160 | .flags = &tracer_flags, |
7447dce9 FW |
1161 | #ifdef CONFIG_FTRACE_SELFTEST |
1162 | .selftest = trace_selftest_startup_function_graph, | |
1163 | #endif | |
fb52607a FW |
1164 | }; |
1165 | ||
1166 | static __init int init_graph_trace(void) | |
1167 | { | |
0c9e6f63 LJ |
1168 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1169 | ||
fb52607a FW |
1170 | return register_tracer(&graph_trace); |
1171 | } | |
1172 | ||
1173 | device_initcall(init_graph_trace); |