]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/trace/trace_functions_graph.c
Fix common misspellings
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / trace_functions_graph.c
1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 /* When set, irq functions will be ignored */
19 static int ftrace_graph_skip_irqs;
20
21 struct fgraph_cpu_data {
22 pid_t last_pid;
23 int depth;
24 int depth_irq;
25 int ignore;
26 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
27 };
28
29 struct fgraph_data {
30 struct fgraph_cpu_data __percpu *cpu_data;
31
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent;
34 struct ftrace_graph_ret_entry ret;
35 int failed;
36 int cpu;
37 };
38
39 #define TRACE_GRAPH_INDENT 2
40
41 /* Flag options */
42 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
43 #define TRACE_GRAPH_PRINT_CPU 0x2
44 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45 #define TRACE_GRAPH_PRINT_PROC 0x8
46 #define TRACE_GRAPH_PRINT_DURATION 0x10
47 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48 #define TRACE_GRAPH_PRINT_IRQS 0x40
49
50 static struct tracer_opt trace_opts[] = {
51 /* Display overruns? (for self-debug purpose) */
52 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
53 /* Display CPU ? */
54 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
55 /* Display Overhead ? */
56 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
57 /* Display proc name/pid */
58 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
59 /* Display duration of execution */
60 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
61 /* Display absolute time of an entry */
62 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
63 /* Display interrupts */
64 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
65 { } /* Empty entry */
66 };
67
68 static struct tracer_flags tracer_flags = {
69 /* Don't display overruns and proc by default */
70 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
71 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
72 .opts = trace_opts
73 };
74
75 static struct trace_array *graph_array;
76
77
78 /* Add a function return address to the trace stack on thread info.*/
79 int
80 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
81 unsigned long frame_pointer)
82 {
83 unsigned long long calltime;
84 int index;
85
86 if (!current->ret_stack)
87 return -EBUSY;
88
89 /*
90 * We must make sure the ret_stack is tested before we read
91 * anything else.
92 */
93 smp_rmb();
94
95 /* The return trace stack is full */
96 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
97 atomic_inc(&current->trace_overrun);
98 return -EBUSY;
99 }
100
101 calltime = trace_clock_local();
102
103 index = ++current->curr_ret_stack;
104 barrier();
105 current->ret_stack[index].ret = ret;
106 current->ret_stack[index].func = func;
107 current->ret_stack[index].calltime = calltime;
108 current->ret_stack[index].subtime = 0;
109 current->ret_stack[index].fp = frame_pointer;
110 *depth = index;
111
112 return 0;
113 }
114
115 /* Retrieve a function return address to the trace stack on thread info.*/
116 static void
117 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
118 unsigned long frame_pointer)
119 {
120 int index;
121
122 index = current->curr_ret_stack;
123
124 if (unlikely(index < 0)) {
125 ftrace_graph_stop();
126 WARN_ON(1);
127 /* Might as well panic, otherwise we have no where to go */
128 *ret = (unsigned long)panic;
129 return;
130 }
131
132 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
133 /*
134 * The arch may choose to record the frame pointer used
135 * and check it here to make sure that it is what we expect it
136 * to be. If gcc does not set the place holder of the return
137 * address in the frame pointer, and does a copy instead, then
138 * the function graph trace will fail. This test detects this
139 * case.
140 *
141 * Currently, x86_32 with optimize for size (-Os) makes the latest
142 * gcc do the above.
143 */
144 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
145 ftrace_graph_stop();
146 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
147 " from func %ps return to %lx\n",
148 current->ret_stack[index].fp,
149 frame_pointer,
150 (void *)current->ret_stack[index].func,
151 current->ret_stack[index].ret);
152 *ret = (unsigned long)panic;
153 return;
154 }
155 #endif
156
157 *ret = current->ret_stack[index].ret;
158 trace->func = current->ret_stack[index].func;
159 trace->calltime = current->ret_stack[index].calltime;
160 trace->overrun = atomic_read(&current->trace_overrun);
161 trace->depth = index;
162 }
163
164 /*
165 * Send the trace to the ring-buffer.
166 * @return the original return address.
167 */
168 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
169 {
170 struct ftrace_graph_ret trace;
171 unsigned long ret;
172
173 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
174 trace.rettime = trace_clock_local();
175 ftrace_graph_return(&trace);
176 barrier();
177 current->curr_ret_stack--;
178
179 if (unlikely(!ret)) {
180 ftrace_graph_stop();
181 WARN_ON(1);
182 /* Might as well panic. What else to do? */
183 ret = (unsigned long)panic;
184 }
185
186 return ret;
187 }
188
189 int __trace_graph_entry(struct trace_array *tr,
190 struct ftrace_graph_ent *trace,
191 unsigned long flags,
192 int pc)
193 {
194 struct ftrace_event_call *call = &event_funcgraph_entry;
195 struct ring_buffer_event *event;
196 struct ring_buffer *buffer = tr->buffer;
197 struct ftrace_graph_ent_entry *entry;
198
199 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
200 return 0;
201
202 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
203 sizeof(*entry), flags, pc);
204 if (!event)
205 return 0;
206 entry = ring_buffer_event_data(event);
207 entry->graph_ent = *trace;
208 if (!filter_current_check_discard(buffer, call, entry, event))
209 ring_buffer_unlock_commit(buffer, event);
210
211 return 1;
212 }
213
214 static inline int ftrace_graph_ignore_irqs(void)
215 {
216 if (!ftrace_graph_skip_irqs)
217 return 0;
218
219 return in_irq();
220 }
221
222 int trace_graph_entry(struct ftrace_graph_ent *trace)
223 {
224 struct trace_array *tr = graph_array;
225 struct trace_array_cpu *data;
226 unsigned long flags;
227 long disabled;
228 int ret;
229 int cpu;
230 int pc;
231
232 if (!ftrace_trace_task(current))
233 return 0;
234
235 /* trace it when it is-nested-in or is a function enabled. */
236 if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
237 ftrace_graph_ignore_irqs())
238 return 0;
239
240 local_irq_save(flags);
241 cpu = raw_smp_processor_id();
242 data = tr->data[cpu];
243 disabled = atomic_inc_return(&data->disabled);
244 if (likely(disabled == 1)) {
245 pc = preempt_count();
246 ret = __trace_graph_entry(tr, trace, flags, pc);
247 } else {
248 ret = 0;
249 }
250
251 atomic_dec(&data->disabled);
252 local_irq_restore(flags);
253
254 return ret;
255 }
256
257 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
258 {
259 if (tracing_thresh)
260 return 1;
261 else
262 return trace_graph_entry(trace);
263 }
264
265 static void
266 __trace_graph_function(struct trace_array *tr,
267 unsigned long ip, unsigned long flags, int pc)
268 {
269 u64 time = trace_clock_local();
270 struct ftrace_graph_ent ent = {
271 .func = ip,
272 .depth = 0,
273 };
274 struct ftrace_graph_ret ret = {
275 .func = ip,
276 .depth = 0,
277 .calltime = time,
278 .rettime = time,
279 };
280
281 __trace_graph_entry(tr, &ent, flags, pc);
282 __trace_graph_return(tr, &ret, flags, pc);
283 }
284
285 void
286 trace_graph_function(struct trace_array *tr,
287 unsigned long ip, unsigned long parent_ip,
288 unsigned long flags, int pc)
289 {
290 __trace_graph_function(tr, ip, flags, pc);
291 }
292
293 void __trace_graph_return(struct trace_array *tr,
294 struct ftrace_graph_ret *trace,
295 unsigned long flags,
296 int pc)
297 {
298 struct ftrace_event_call *call = &event_funcgraph_exit;
299 struct ring_buffer_event *event;
300 struct ring_buffer *buffer = tr->buffer;
301 struct ftrace_graph_ret_entry *entry;
302
303 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
304 return;
305
306 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
307 sizeof(*entry), flags, pc);
308 if (!event)
309 return;
310 entry = ring_buffer_event_data(event);
311 entry->ret = *trace;
312 if (!filter_current_check_discard(buffer, call, entry, event))
313 ring_buffer_unlock_commit(buffer, event);
314 }
315
316 void trace_graph_return(struct ftrace_graph_ret *trace)
317 {
318 struct trace_array *tr = graph_array;
319 struct trace_array_cpu *data;
320 unsigned long flags;
321 long disabled;
322 int cpu;
323 int pc;
324
325 local_irq_save(flags);
326 cpu = raw_smp_processor_id();
327 data = tr->data[cpu];
328 disabled = atomic_inc_return(&data->disabled);
329 if (likely(disabled == 1)) {
330 pc = preempt_count();
331 __trace_graph_return(tr, trace, flags, pc);
332 }
333 atomic_dec(&data->disabled);
334 local_irq_restore(flags);
335 }
336
337 void set_graph_array(struct trace_array *tr)
338 {
339 graph_array = tr;
340
341 /* Make graph_array visible before we start tracing */
342
343 smp_mb();
344 }
345
346 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
347 {
348 if (tracing_thresh &&
349 (trace->rettime - trace->calltime < tracing_thresh))
350 return;
351 else
352 trace_graph_return(trace);
353 }
354
355 static int graph_trace_init(struct trace_array *tr)
356 {
357 int ret;
358
359 set_graph_array(tr);
360 if (tracing_thresh)
361 ret = register_ftrace_graph(&trace_graph_thresh_return,
362 &trace_graph_thresh_entry);
363 else
364 ret = register_ftrace_graph(&trace_graph_return,
365 &trace_graph_entry);
366 if (ret)
367 return ret;
368 tracing_start_cmdline_record();
369
370 return 0;
371 }
372
373 static void graph_trace_reset(struct trace_array *tr)
374 {
375 tracing_stop_cmdline_record();
376 unregister_ftrace_graph();
377 }
378
379 static int max_bytes_for_cpu;
380
381 static enum print_line_t
382 print_graph_cpu(struct trace_seq *s, int cpu)
383 {
384 int ret;
385
386 /*
387 * Start with a space character - to make it stand out
388 * to the right a bit when trace output is pasted into
389 * email:
390 */
391 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
392 if (!ret)
393 return TRACE_TYPE_PARTIAL_LINE;
394
395 return TRACE_TYPE_HANDLED;
396 }
397
398 #define TRACE_GRAPH_PROCINFO_LENGTH 14
399
400 static enum print_line_t
401 print_graph_proc(struct trace_seq *s, pid_t pid)
402 {
403 char comm[TASK_COMM_LEN];
404 /* sign + log10(MAX_INT) + '\0' */
405 char pid_str[11];
406 int spaces = 0;
407 int ret;
408 int len;
409 int i;
410
411 trace_find_cmdline(pid, comm);
412 comm[7] = '\0';
413 sprintf(pid_str, "%d", pid);
414
415 /* 1 stands for the "-" character */
416 len = strlen(comm) + strlen(pid_str) + 1;
417
418 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
419 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
420
421 /* First spaces to align center */
422 for (i = 0; i < spaces / 2; i++) {
423 ret = trace_seq_printf(s, " ");
424 if (!ret)
425 return TRACE_TYPE_PARTIAL_LINE;
426 }
427
428 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
429 if (!ret)
430 return TRACE_TYPE_PARTIAL_LINE;
431
432 /* Last spaces to align center */
433 for (i = 0; i < spaces - (spaces / 2); i++) {
434 ret = trace_seq_printf(s, " ");
435 if (!ret)
436 return TRACE_TYPE_PARTIAL_LINE;
437 }
438 return TRACE_TYPE_HANDLED;
439 }
440
441
442 static enum print_line_t
443 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
444 {
445 if (!trace_seq_putc(s, ' '))
446 return 0;
447
448 return trace_print_lat_fmt(s, entry);
449 }
450
451 /* If the pid changed since the last trace, output this event */
452 static enum print_line_t
453 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
454 {
455 pid_t prev_pid;
456 pid_t *last_pid;
457 int ret;
458
459 if (!data)
460 return TRACE_TYPE_HANDLED;
461
462 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
463
464 if (*last_pid == pid)
465 return TRACE_TYPE_HANDLED;
466
467 prev_pid = *last_pid;
468 *last_pid = pid;
469
470 if (prev_pid == -1)
471 return TRACE_TYPE_HANDLED;
472 /*
473 * Context-switch trace line:
474
475 ------------------------------------------
476 | 1) migration/0--1 => sshd-1755
477 ------------------------------------------
478
479 */
480 ret = trace_seq_printf(s,
481 " ------------------------------------------\n");
482 if (!ret)
483 return TRACE_TYPE_PARTIAL_LINE;
484
485 ret = print_graph_cpu(s, cpu);
486 if (ret == TRACE_TYPE_PARTIAL_LINE)
487 return TRACE_TYPE_PARTIAL_LINE;
488
489 ret = print_graph_proc(s, prev_pid);
490 if (ret == TRACE_TYPE_PARTIAL_LINE)
491 return TRACE_TYPE_PARTIAL_LINE;
492
493 ret = trace_seq_printf(s, " => ");
494 if (!ret)
495 return TRACE_TYPE_PARTIAL_LINE;
496
497 ret = print_graph_proc(s, pid);
498 if (ret == TRACE_TYPE_PARTIAL_LINE)
499 return TRACE_TYPE_PARTIAL_LINE;
500
501 ret = trace_seq_printf(s,
502 "\n ------------------------------------------\n\n");
503 if (!ret)
504 return TRACE_TYPE_PARTIAL_LINE;
505
506 return TRACE_TYPE_HANDLED;
507 }
508
509 static struct ftrace_graph_ret_entry *
510 get_return_for_leaf(struct trace_iterator *iter,
511 struct ftrace_graph_ent_entry *curr)
512 {
513 struct fgraph_data *data = iter->private;
514 struct ring_buffer_iter *ring_iter = NULL;
515 struct ring_buffer_event *event;
516 struct ftrace_graph_ret_entry *next;
517
518 /*
519 * If the previous output failed to write to the seq buffer,
520 * then we just reuse the data from before.
521 */
522 if (data && data->failed) {
523 curr = &data->ent;
524 next = &data->ret;
525 } else {
526
527 ring_iter = iter->buffer_iter[iter->cpu];
528
529 /* First peek to compare current entry and the next one */
530 if (ring_iter)
531 event = ring_buffer_iter_peek(ring_iter, NULL);
532 else {
533 /*
534 * We need to consume the current entry to see
535 * the next one.
536 */
537 ring_buffer_consume(iter->tr->buffer, iter->cpu,
538 NULL, NULL);
539 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
540 NULL, NULL);
541 }
542
543 if (!event)
544 return NULL;
545
546 next = ring_buffer_event_data(event);
547
548 if (data) {
549 /*
550 * Save current and next entries for later reference
551 * if the output fails.
552 */
553 data->ent = *curr;
554 /*
555 * If the next event is not a return type, then
556 * we only care about what type it is. Otherwise we can
557 * safely copy the entire event.
558 */
559 if (next->ent.type == TRACE_GRAPH_RET)
560 data->ret = *next;
561 else
562 data->ret.ent.type = next->ent.type;
563 }
564 }
565
566 if (next->ent.type != TRACE_GRAPH_RET)
567 return NULL;
568
569 if (curr->ent.pid != next->ent.pid ||
570 curr->graph_ent.func != next->ret.func)
571 return NULL;
572
573 /* this is a leaf, now advance the iterator */
574 if (ring_iter)
575 ring_buffer_read(ring_iter, NULL);
576
577 return next;
578 }
579
580 /* Signal a overhead of time execution to the output */
581 static int
582 print_graph_overhead(unsigned long long duration, struct trace_seq *s,
583 u32 flags)
584 {
585 /* If duration disappear, we don't need anything */
586 if (!(flags & TRACE_GRAPH_PRINT_DURATION))
587 return 1;
588
589 /* Non nested entry or return */
590 if (duration == -1)
591 return trace_seq_printf(s, " ");
592
593 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
594 /* Duration exceeded 100 msecs */
595 if (duration > 100000ULL)
596 return trace_seq_printf(s, "! ");
597
598 /* Duration exceeded 10 msecs */
599 if (duration > 10000ULL)
600 return trace_seq_printf(s, "+ ");
601 }
602
603 return trace_seq_printf(s, " ");
604 }
605
606 static int print_graph_abs_time(u64 t, struct trace_seq *s)
607 {
608 unsigned long usecs_rem;
609
610 usecs_rem = do_div(t, NSEC_PER_SEC);
611 usecs_rem /= 1000;
612
613 return trace_seq_printf(s, "%5lu.%06lu | ",
614 (unsigned long)t, usecs_rem);
615 }
616
617 static enum print_line_t
618 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
619 enum trace_type type, int cpu, pid_t pid, u32 flags)
620 {
621 int ret;
622 struct trace_seq *s = &iter->seq;
623
624 if (addr < (unsigned long)__irqentry_text_start ||
625 addr >= (unsigned long)__irqentry_text_end)
626 return TRACE_TYPE_UNHANDLED;
627
628 /* Absolute time */
629 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
630 ret = print_graph_abs_time(iter->ts, s);
631 if (!ret)
632 return TRACE_TYPE_PARTIAL_LINE;
633 }
634
635 /* Cpu */
636 if (flags & TRACE_GRAPH_PRINT_CPU) {
637 ret = print_graph_cpu(s, cpu);
638 if (ret == TRACE_TYPE_PARTIAL_LINE)
639 return TRACE_TYPE_PARTIAL_LINE;
640 }
641
642 /* Proc */
643 if (flags & TRACE_GRAPH_PRINT_PROC) {
644 ret = print_graph_proc(s, pid);
645 if (ret == TRACE_TYPE_PARTIAL_LINE)
646 return TRACE_TYPE_PARTIAL_LINE;
647 ret = trace_seq_printf(s, " | ");
648 if (!ret)
649 return TRACE_TYPE_PARTIAL_LINE;
650 }
651
652 /* No overhead */
653 ret = print_graph_overhead(-1, s, flags);
654 if (!ret)
655 return TRACE_TYPE_PARTIAL_LINE;
656
657 if (type == TRACE_GRAPH_ENT)
658 ret = trace_seq_printf(s, "==========>");
659 else
660 ret = trace_seq_printf(s, "<==========");
661
662 if (!ret)
663 return TRACE_TYPE_PARTIAL_LINE;
664
665 /* Don't close the duration column if haven't one */
666 if (flags & TRACE_GRAPH_PRINT_DURATION)
667 trace_seq_printf(s, " |");
668 ret = trace_seq_printf(s, "\n");
669
670 if (!ret)
671 return TRACE_TYPE_PARTIAL_LINE;
672 return TRACE_TYPE_HANDLED;
673 }
674
675 enum print_line_t
676 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
677 {
678 unsigned long nsecs_rem = do_div(duration, 1000);
679 /* log10(ULONG_MAX) + '\0' */
680 char msecs_str[21];
681 char nsecs_str[5];
682 int ret, len;
683 int i;
684
685 sprintf(msecs_str, "%lu", (unsigned long) duration);
686
687 /* Print msecs */
688 ret = trace_seq_printf(s, "%s", msecs_str);
689 if (!ret)
690 return TRACE_TYPE_PARTIAL_LINE;
691
692 len = strlen(msecs_str);
693
694 /* Print nsecs (we don't want to exceed 7 numbers) */
695 if (len < 7) {
696 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
697
698 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
699 ret = trace_seq_printf(s, ".%s", nsecs_str);
700 if (!ret)
701 return TRACE_TYPE_PARTIAL_LINE;
702 len += strlen(nsecs_str);
703 }
704
705 ret = trace_seq_printf(s, " us ");
706 if (!ret)
707 return TRACE_TYPE_PARTIAL_LINE;
708
709 /* Print remaining spaces to fit the row's width */
710 for (i = len; i < 7; i++) {
711 ret = trace_seq_printf(s, " ");
712 if (!ret)
713 return TRACE_TYPE_PARTIAL_LINE;
714 }
715 return TRACE_TYPE_HANDLED;
716 }
717
718 static enum print_line_t
719 print_graph_duration(unsigned long long duration, struct trace_seq *s)
720 {
721 int ret;
722
723 ret = trace_print_graph_duration(duration, s);
724 if (ret != TRACE_TYPE_HANDLED)
725 return ret;
726
727 ret = trace_seq_printf(s, "| ");
728 if (!ret)
729 return TRACE_TYPE_PARTIAL_LINE;
730
731 return TRACE_TYPE_HANDLED;
732 }
733
734 /* Case of a leaf function on its call entry */
735 static enum print_line_t
736 print_graph_entry_leaf(struct trace_iterator *iter,
737 struct ftrace_graph_ent_entry *entry,
738 struct ftrace_graph_ret_entry *ret_entry,
739 struct trace_seq *s, u32 flags)
740 {
741 struct fgraph_data *data = iter->private;
742 struct ftrace_graph_ret *graph_ret;
743 struct ftrace_graph_ent *call;
744 unsigned long long duration;
745 int ret;
746 int i;
747
748 graph_ret = &ret_entry->ret;
749 call = &entry->graph_ent;
750 duration = graph_ret->rettime - graph_ret->calltime;
751
752 if (data) {
753 struct fgraph_cpu_data *cpu_data;
754 int cpu = iter->cpu;
755
756 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
757
758 /*
759 * Comments display at + 1 to depth. Since
760 * this is a leaf function, keep the comments
761 * equal to this depth.
762 */
763 cpu_data->depth = call->depth - 1;
764
765 /* No need to keep this function around for this depth */
766 if (call->depth < FTRACE_RETFUNC_DEPTH)
767 cpu_data->enter_funcs[call->depth] = 0;
768 }
769
770 /* Overhead */
771 ret = print_graph_overhead(duration, s, flags);
772 if (!ret)
773 return TRACE_TYPE_PARTIAL_LINE;
774
775 /* Duration */
776 if (flags & TRACE_GRAPH_PRINT_DURATION) {
777 ret = print_graph_duration(duration, s);
778 if (ret == TRACE_TYPE_PARTIAL_LINE)
779 return TRACE_TYPE_PARTIAL_LINE;
780 }
781
782 /* Function */
783 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
784 ret = trace_seq_printf(s, " ");
785 if (!ret)
786 return TRACE_TYPE_PARTIAL_LINE;
787 }
788
789 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
790 if (!ret)
791 return TRACE_TYPE_PARTIAL_LINE;
792
793 return TRACE_TYPE_HANDLED;
794 }
795
796 static enum print_line_t
797 print_graph_entry_nested(struct trace_iterator *iter,
798 struct ftrace_graph_ent_entry *entry,
799 struct trace_seq *s, int cpu, u32 flags)
800 {
801 struct ftrace_graph_ent *call = &entry->graph_ent;
802 struct fgraph_data *data = iter->private;
803 int ret;
804 int i;
805
806 if (data) {
807 struct fgraph_cpu_data *cpu_data;
808 int cpu = iter->cpu;
809
810 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
811 cpu_data->depth = call->depth;
812
813 /* Save this function pointer to see if the exit matches */
814 if (call->depth < FTRACE_RETFUNC_DEPTH)
815 cpu_data->enter_funcs[call->depth] = call->func;
816 }
817
818 /* No overhead */
819 ret = print_graph_overhead(-1, s, flags);
820 if (!ret)
821 return TRACE_TYPE_PARTIAL_LINE;
822
823 /* No time */
824 if (flags & TRACE_GRAPH_PRINT_DURATION) {
825 ret = trace_seq_printf(s, " | ");
826 if (!ret)
827 return TRACE_TYPE_PARTIAL_LINE;
828 }
829
830 /* Function */
831 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
832 ret = trace_seq_printf(s, " ");
833 if (!ret)
834 return TRACE_TYPE_PARTIAL_LINE;
835 }
836
837 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
838 if (!ret)
839 return TRACE_TYPE_PARTIAL_LINE;
840
841 /*
842 * we already consumed the current entry to check the next one
843 * and see if this is a leaf.
844 */
845 return TRACE_TYPE_NO_CONSUME;
846 }
847
848 static enum print_line_t
849 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
850 int type, unsigned long addr, u32 flags)
851 {
852 struct fgraph_data *data = iter->private;
853 struct trace_entry *ent = iter->ent;
854 int cpu = iter->cpu;
855 int ret;
856
857 /* Pid */
858 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
859 return TRACE_TYPE_PARTIAL_LINE;
860
861 if (type) {
862 /* Interrupt */
863 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
864 if (ret == TRACE_TYPE_PARTIAL_LINE)
865 return TRACE_TYPE_PARTIAL_LINE;
866 }
867
868 /* Absolute time */
869 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
870 ret = print_graph_abs_time(iter->ts, s);
871 if (!ret)
872 return TRACE_TYPE_PARTIAL_LINE;
873 }
874
875 /* Cpu */
876 if (flags & TRACE_GRAPH_PRINT_CPU) {
877 ret = print_graph_cpu(s, cpu);
878 if (ret == TRACE_TYPE_PARTIAL_LINE)
879 return TRACE_TYPE_PARTIAL_LINE;
880 }
881
882 /* Proc */
883 if (flags & TRACE_GRAPH_PRINT_PROC) {
884 ret = print_graph_proc(s, ent->pid);
885 if (ret == TRACE_TYPE_PARTIAL_LINE)
886 return TRACE_TYPE_PARTIAL_LINE;
887
888 ret = trace_seq_printf(s, " | ");
889 if (!ret)
890 return TRACE_TYPE_PARTIAL_LINE;
891 }
892
893 /* Latency format */
894 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
895 ret = print_graph_lat_fmt(s, ent);
896 if (ret == TRACE_TYPE_PARTIAL_LINE)
897 return TRACE_TYPE_PARTIAL_LINE;
898 }
899
900 return 0;
901 }
902
903 /*
904 * Entry check for irq code
905 *
906 * returns 1 if
907 * - we are inside irq code
908 * - we just entered irq code
909 *
910 * retunns 0 if
911 * - funcgraph-interrupts option is set
912 * - we are not inside irq code
913 */
914 static int
915 check_irq_entry(struct trace_iterator *iter, u32 flags,
916 unsigned long addr, int depth)
917 {
918 int cpu = iter->cpu;
919 int *depth_irq;
920 struct fgraph_data *data = iter->private;
921
922 /*
923 * If we are either displaying irqs, or we got called as
924 * a graph event and private data does not exist,
925 * then we bypass the irq check.
926 */
927 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
928 (!data))
929 return 0;
930
931 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
932
933 /*
934 * We are inside the irq code
935 */
936 if (*depth_irq >= 0)
937 return 1;
938
939 if ((addr < (unsigned long)__irqentry_text_start) ||
940 (addr >= (unsigned long)__irqentry_text_end))
941 return 0;
942
943 /*
944 * We are entering irq code.
945 */
946 *depth_irq = depth;
947 return 1;
948 }
949
950 /*
951 * Return check for irq code
952 *
953 * returns 1 if
954 * - we are inside irq code
955 * - we just left irq code
956 *
957 * returns 0 if
958 * - funcgraph-interrupts option is set
959 * - we are not inside irq code
960 */
961 static int
962 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
963 {
964 int cpu = iter->cpu;
965 int *depth_irq;
966 struct fgraph_data *data = iter->private;
967
968 /*
969 * If we are either displaying irqs, or we got called as
970 * a graph event and private data does not exist,
971 * then we bypass the irq check.
972 */
973 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
974 (!data))
975 return 0;
976
977 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
978
979 /*
980 * We are not inside the irq code.
981 */
982 if (*depth_irq == -1)
983 return 0;
984
985 /*
986 * We are inside the irq code, and this is returning entry.
987 * Let's not trace it and clear the entry depth, since
988 * we are out of irq code.
989 *
990 * This condition ensures that we 'leave the irq code' once
991 * we are out of the entry depth. Thus protecting us from
992 * the RETURN entry loss.
993 */
994 if (*depth_irq >= depth) {
995 *depth_irq = -1;
996 return 1;
997 }
998
999 /*
1000 * We are inside the irq code, and this is not the entry.
1001 */
1002 return 1;
1003 }
1004
1005 static enum print_line_t
1006 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1007 struct trace_iterator *iter, u32 flags)
1008 {
1009 struct fgraph_data *data = iter->private;
1010 struct ftrace_graph_ent *call = &field->graph_ent;
1011 struct ftrace_graph_ret_entry *leaf_ret;
1012 static enum print_line_t ret;
1013 int cpu = iter->cpu;
1014
1015 if (check_irq_entry(iter, flags, call->func, call->depth))
1016 return TRACE_TYPE_HANDLED;
1017
1018 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1019 return TRACE_TYPE_PARTIAL_LINE;
1020
1021 leaf_ret = get_return_for_leaf(iter, field);
1022 if (leaf_ret)
1023 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1024 else
1025 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1026
1027 if (data) {
1028 /*
1029 * If we failed to write our output, then we need to make
1030 * note of it. Because we already consumed our entry.
1031 */
1032 if (s->full) {
1033 data->failed = 1;
1034 data->cpu = cpu;
1035 } else
1036 data->failed = 0;
1037 }
1038
1039 return ret;
1040 }
1041
1042 static enum print_line_t
1043 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1044 struct trace_entry *ent, struct trace_iterator *iter,
1045 u32 flags)
1046 {
1047 unsigned long long duration = trace->rettime - trace->calltime;
1048 struct fgraph_data *data = iter->private;
1049 pid_t pid = ent->pid;
1050 int cpu = iter->cpu;
1051 int func_match = 1;
1052 int ret;
1053 int i;
1054
1055 if (check_irq_return(iter, flags, trace->depth))
1056 return TRACE_TYPE_HANDLED;
1057
1058 if (data) {
1059 struct fgraph_cpu_data *cpu_data;
1060 int cpu = iter->cpu;
1061
1062 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1063
1064 /*
1065 * Comments display at + 1 to depth. This is the
1066 * return from a function, we now want the comments
1067 * to display at the same level of the bracket.
1068 */
1069 cpu_data->depth = trace->depth - 1;
1070
1071 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1072 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1073 func_match = 0;
1074 cpu_data->enter_funcs[trace->depth] = 0;
1075 }
1076 }
1077
1078 if (print_graph_prologue(iter, s, 0, 0, flags))
1079 return TRACE_TYPE_PARTIAL_LINE;
1080
1081 /* Overhead */
1082 ret = print_graph_overhead(duration, s, flags);
1083 if (!ret)
1084 return TRACE_TYPE_PARTIAL_LINE;
1085
1086 /* Duration */
1087 if (flags & TRACE_GRAPH_PRINT_DURATION) {
1088 ret = print_graph_duration(duration, s);
1089 if (ret == TRACE_TYPE_PARTIAL_LINE)
1090 return TRACE_TYPE_PARTIAL_LINE;
1091 }
1092
1093 /* Closing brace */
1094 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1095 ret = trace_seq_printf(s, " ");
1096 if (!ret)
1097 return TRACE_TYPE_PARTIAL_LINE;
1098 }
1099
1100 /*
1101 * If the return function does not have a matching entry,
1102 * then the entry was lost. Instead of just printing
1103 * the '}' and letting the user guess what function this
1104 * belongs to, write out the function name.
1105 */
1106 if (func_match) {
1107 ret = trace_seq_printf(s, "}\n");
1108 if (!ret)
1109 return TRACE_TYPE_PARTIAL_LINE;
1110 } else {
1111 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1112 if (!ret)
1113 return TRACE_TYPE_PARTIAL_LINE;
1114 }
1115
1116 /* Overrun */
1117 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1118 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1119 trace->overrun);
1120 if (!ret)
1121 return TRACE_TYPE_PARTIAL_LINE;
1122 }
1123
1124 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1125 cpu, pid, flags);
1126 if (ret == TRACE_TYPE_PARTIAL_LINE)
1127 return TRACE_TYPE_PARTIAL_LINE;
1128
1129 return TRACE_TYPE_HANDLED;
1130 }
1131
1132 static enum print_line_t
1133 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1134 struct trace_iterator *iter, u32 flags)
1135 {
1136 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1137 struct fgraph_data *data = iter->private;
1138 struct trace_event *event;
1139 int depth = 0;
1140 int ret;
1141 int i;
1142
1143 if (data)
1144 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1145
1146 if (print_graph_prologue(iter, s, 0, 0, flags))
1147 return TRACE_TYPE_PARTIAL_LINE;
1148
1149 /* No overhead */
1150 ret = print_graph_overhead(-1, s, flags);
1151 if (!ret)
1152 return TRACE_TYPE_PARTIAL_LINE;
1153
1154 /* No time */
1155 if (flags & TRACE_GRAPH_PRINT_DURATION) {
1156 ret = trace_seq_printf(s, " | ");
1157 if (!ret)
1158 return TRACE_TYPE_PARTIAL_LINE;
1159 }
1160
1161 /* Indentation */
1162 if (depth > 0)
1163 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1164 ret = trace_seq_printf(s, " ");
1165 if (!ret)
1166 return TRACE_TYPE_PARTIAL_LINE;
1167 }
1168
1169 /* The comment */
1170 ret = trace_seq_printf(s, "/* ");
1171 if (!ret)
1172 return TRACE_TYPE_PARTIAL_LINE;
1173
1174 switch (iter->ent->type) {
1175 case TRACE_BPRINT:
1176 ret = trace_print_bprintk_msg_only(iter);
1177 if (ret != TRACE_TYPE_HANDLED)
1178 return ret;
1179 break;
1180 case TRACE_PRINT:
1181 ret = trace_print_printk_msg_only(iter);
1182 if (ret != TRACE_TYPE_HANDLED)
1183 return ret;
1184 break;
1185 default:
1186 event = ftrace_find_event(ent->type);
1187 if (!event)
1188 return TRACE_TYPE_UNHANDLED;
1189
1190 ret = event->funcs->trace(iter, sym_flags, event);
1191 if (ret != TRACE_TYPE_HANDLED)
1192 return ret;
1193 }
1194
1195 /* Strip ending newline */
1196 if (s->buffer[s->len - 1] == '\n') {
1197 s->buffer[s->len - 1] = '\0';
1198 s->len--;
1199 }
1200
1201 ret = trace_seq_printf(s, " */\n");
1202 if (!ret)
1203 return TRACE_TYPE_PARTIAL_LINE;
1204
1205 return TRACE_TYPE_HANDLED;
1206 }
1207
1208
1209 enum print_line_t
1210 __print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1211 {
1212 struct ftrace_graph_ent_entry *field;
1213 struct fgraph_data *data = iter->private;
1214 struct trace_entry *entry = iter->ent;
1215 struct trace_seq *s = &iter->seq;
1216 int cpu = iter->cpu;
1217 int ret;
1218
1219 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1220 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1221 return TRACE_TYPE_HANDLED;
1222 }
1223
1224 /*
1225 * If the last output failed, there's a possibility we need
1226 * to print out the missing entry which would never go out.
1227 */
1228 if (data && data->failed) {
1229 field = &data->ent;
1230 iter->cpu = data->cpu;
1231 ret = print_graph_entry(field, s, iter, flags);
1232 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1233 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1234 ret = TRACE_TYPE_NO_CONSUME;
1235 }
1236 iter->cpu = cpu;
1237 return ret;
1238 }
1239
1240 switch (entry->type) {
1241 case TRACE_GRAPH_ENT: {
1242 /*
1243 * print_graph_entry() may consume the current event,
1244 * thus @field may become invalid, so we need to save it.
1245 * sizeof(struct ftrace_graph_ent_entry) is very small,
1246 * it can be safely saved at the stack.
1247 */
1248 struct ftrace_graph_ent_entry saved;
1249 trace_assign_type(field, entry);
1250 saved = *field;
1251 return print_graph_entry(&saved, s, iter, flags);
1252 }
1253 case TRACE_GRAPH_RET: {
1254 struct ftrace_graph_ret_entry *field;
1255 trace_assign_type(field, entry);
1256 return print_graph_return(&field->ret, s, entry, iter, flags);
1257 }
1258 case TRACE_STACK:
1259 case TRACE_FN:
1260 /* dont trace stack and functions as comments */
1261 return TRACE_TYPE_UNHANDLED;
1262
1263 default:
1264 return print_graph_comment(s, entry, iter, flags);
1265 }
1266
1267 return TRACE_TYPE_HANDLED;
1268 }
1269
1270 static enum print_line_t
1271 print_graph_function(struct trace_iterator *iter)
1272 {
1273 return __print_graph_function_flags(iter, tracer_flags.val);
1274 }
1275
1276 enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
1277 u32 flags)
1278 {
1279 if (trace_flags & TRACE_ITER_LATENCY_FMT)
1280 flags |= TRACE_GRAPH_PRINT_DURATION;
1281 else
1282 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
1283
1284 return __print_graph_function_flags(iter, flags);
1285 }
1286
1287 static enum print_line_t
1288 print_graph_function_event(struct trace_iterator *iter, int flags,
1289 struct trace_event *event)
1290 {
1291 return print_graph_function(iter);
1292 }
1293
1294 static void print_lat_header(struct seq_file *s, u32 flags)
1295 {
1296 static const char spaces[] = " " /* 16 spaces */
1297 " " /* 4 spaces */
1298 " "; /* 17 spaces */
1299 int size = 0;
1300
1301 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1302 size += 16;
1303 if (flags & TRACE_GRAPH_PRINT_CPU)
1304 size += 4;
1305 if (flags & TRACE_GRAPH_PRINT_PROC)
1306 size += 17;
1307
1308 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1309 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1310 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1311 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1312 seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
1313 seq_printf(s, "#%.*s|||| / \n", size, spaces);
1314 }
1315
1316 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1317 {
1318 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1319
1320 if (lat)
1321 print_lat_header(s, flags);
1322
1323 /* 1st line */
1324 seq_printf(s, "#");
1325 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1326 seq_printf(s, " TIME ");
1327 if (flags & TRACE_GRAPH_PRINT_CPU)
1328 seq_printf(s, " CPU");
1329 if (flags & TRACE_GRAPH_PRINT_PROC)
1330 seq_printf(s, " TASK/PID ");
1331 if (lat)
1332 seq_printf(s, "|||||");
1333 if (flags & TRACE_GRAPH_PRINT_DURATION)
1334 seq_printf(s, " DURATION ");
1335 seq_printf(s, " FUNCTION CALLS\n");
1336
1337 /* 2nd line */
1338 seq_printf(s, "#");
1339 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1340 seq_printf(s, " | ");
1341 if (flags & TRACE_GRAPH_PRINT_CPU)
1342 seq_printf(s, " | ");
1343 if (flags & TRACE_GRAPH_PRINT_PROC)
1344 seq_printf(s, " | | ");
1345 if (lat)
1346 seq_printf(s, "|||||");
1347 if (flags & TRACE_GRAPH_PRINT_DURATION)
1348 seq_printf(s, " | | ");
1349 seq_printf(s, " | | | |\n");
1350 }
1351
1352 void print_graph_headers(struct seq_file *s)
1353 {
1354 print_graph_headers_flags(s, tracer_flags.val);
1355 }
1356
1357 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1358 {
1359 struct trace_iterator *iter = s->private;
1360
1361 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1362 /* print nothing if the buffers are empty */
1363 if (trace_empty(iter))
1364 return;
1365
1366 print_trace_header(s, iter);
1367 flags |= TRACE_GRAPH_PRINT_DURATION;
1368 } else
1369 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
1370
1371 __print_graph_headers_flags(s, flags);
1372 }
1373
1374 void graph_trace_open(struct trace_iterator *iter)
1375 {
1376 /* pid and depth on the last trace processed */
1377 struct fgraph_data *data;
1378 int cpu;
1379
1380 iter->private = NULL;
1381
1382 data = kzalloc(sizeof(*data), GFP_KERNEL);
1383 if (!data)
1384 goto out_err;
1385
1386 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1387 if (!data->cpu_data)
1388 goto out_err_free;
1389
1390 for_each_possible_cpu(cpu) {
1391 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1392 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1393 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1394 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1395
1396 *pid = -1;
1397 *depth = 0;
1398 *ignore = 0;
1399 *depth_irq = -1;
1400 }
1401
1402 iter->private = data;
1403
1404 return;
1405
1406 out_err_free:
1407 kfree(data);
1408 out_err:
1409 pr_warning("function graph tracer: not enough memory\n");
1410 }
1411
1412 void graph_trace_close(struct trace_iterator *iter)
1413 {
1414 struct fgraph_data *data = iter->private;
1415
1416 if (data) {
1417 free_percpu(data->cpu_data);
1418 kfree(data);
1419 }
1420 }
1421
1422 static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1423 {
1424 if (bit == TRACE_GRAPH_PRINT_IRQS)
1425 ftrace_graph_skip_irqs = !set;
1426
1427 return 0;
1428 }
1429
1430 static struct trace_event_functions graph_functions = {
1431 .trace = print_graph_function_event,
1432 };
1433
1434 static struct trace_event graph_trace_entry_event = {
1435 .type = TRACE_GRAPH_ENT,
1436 .funcs = &graph_functions,
1437 };
1438
1439 static struct trace_event graph_trace_ret_event = {
1440 .type = TRACE_GRAPH_RET,
1441 .funcs = &graph_functions
1442 };
1443
1444 static struct tracer graph_trace __read_mostly = {
1445 .name = "function_graph",
1446 .open = graph_trace_open,
1447 .pipe_open = graph_trace_open,
1448 .close = graph_trace_close,
1449 .pipe_close = graph_trace_close,
1450 .wait_pipe = poll_wait_pipe,
1451 .init = graph_trace_init,
1452 .reset = graph_trace_reset,
1453 .print_line = print_graph_function,
1454 .print_header = print_graph_headers,
1455 .flags = &tracer_flags,
1456 .set_flag = func_graph_set_flag,
1457 #ifdef CONFIG_FTRACE_SELFTEST
1458 .selftest = trace_selftest_startup_function_graph,
1459 #endif
1460 };
1461
1462 static __init int init_graph_trace(void)
1463 {
1464 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1465
1466 if (!register_ftrace_event(&graph_trace_entry_event)) {
1467 pr_warning("Warning: could not register graph trace events\n");
1468 return 1;
1469 }
1470
1471 if (!register_ftrace_event(&graph_trace_ret_event)) {
1472 pr_warning("Warning: could not register graph trace events\n");
1473 return 1;
1474 }
1475
1476 return register_tracer(&graph_trace);
1477 }
1478
1479 device_initcall(init_graph_trace);