]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/trace_functions_graph.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livep...
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_functions_graph.c
1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/slab.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 static bool kill_ftrace_graph;
18
19 /**
20 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
21 *
22 * ftrace_graph_stop() is called when a severe error is detected in
23 * the function graph tracing. This function is called by the critical
24 * paths of function graph to keep those paths from doing any more harm.
25 */
26 bool ftrace_graph_is_dead(void)
27 {
28 return kill_ftrace_graph;
29 }
30
31 /**
32 * ftrace_graph_stop - set to permanently disable function graph tracincg
33 *
34 * In case of an error int function graph tracing, this is called
35 * to try to keep function graph tracing from causing any more harm.
36 * Usually this is pretty severe and this is called to try to at least
37 * get a warning out to the user.
38 */
39 void ftrace_graph_stop(void)
40 {
41 kill_ftrace_graph = true;
42 }
43
44 /* When set, irq functions will be ignored */
45 static int ftrace_graph_skip_irqs;
46
47 struct fgraph_cpu_data {
48 pid_t last_pid;
49 int depth;
50 int depth_irq;
51 int ignore;
52 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
53 };
54
55 struct fgraph_data {
56 struct fgraph_cpu_data __percpu *cpu_data;
57
58 /* Place to preserve last processed entry. */
59 struct ftrace_graph_ent_entry ent;
60 struct ftrace_graph_ret_entry ret;
61 int failed;
62 int cpu;
63 };
64
65 #define TRACE_GRAPH_INDENT 2
66
67 static unsigned int max_depth;
68
69 static struct tracer_opt trace_opts[] = {
70 /* Display overruns? (for self-debug purpose) */
71 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
72 /* Display CPU ? */
73 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
74 /* Display Overhead ? */
75 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
76 /* Display proc name/pid */
77 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
78 /* Display duration of execution */
79 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
80 /* Display absolute time of an entry */
81 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
82 /* Display interrupts */
83 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
84 /* Display function name after trailing } */
85 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
86 { } /* Empty entry */
87 };
88
89 static struct tracer_flags tracer_flags = {
90 /* Don't display overruns, proc, or tail by default */
91 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
92 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
93 .opts = trace_opts
94 };
95
96 static struct trace_array *graph_array;
97
98 /*
99 * DURATION column is being also used to display IRQ signs,
100 * following values are used by print_graph_irq and others
101 * to fill in space into DURATION column.
102 */
103 enum {
104 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
105 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
106 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
107 };
108
109 static void
110 print_graph_duration(unsigned long long duration, struct trace_seq *s,
111 u32 flags);
112
113 /* Add a function return address to the trace stack on thread info.*/
114 int
115 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
116 unsigned long frame_pointer)
117 {
118 unsigned long long calltime;
119 int index;
120
121 if (unlikely(ftrace_graph_is_dead()))
122 return -EBUSY;
123
124 if (!current->ret_stack)
125 return -EBUSY;
126
127 /*
128 * We must make sure the ret_stack is tested before we read
129 * anything else.
130 */
131 smp_rmb();
132
133 /* The return trace stack is full */
134 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
135 atomic_inc(&current->trace_overrun);
136 return -EBUSY;
137 }
138
139 /*
140 * The curr_ret_stack is an index to ftrace return stack of
141 * current task. Its value should be in [0, FTRACE_RETFUNC_
142 * DEPTH) when the function graph tracer is used. To support
143 * filtering out specific functions, it makes the index
144 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
145 * so when it sees a negative index the ftrace will ignore
146 * the record. And the index gets recovered when returning
147 * from the filtered function by adding the FTRACE_NOTRACE_
148 * DEPTH and then it'll continue to record functions normally.
149 *
150 * The curr_ret_stack is initialized to -1 and get increased
151 * in this function. So it can be less than -1 only if it was
152 * filtered out via ftrace_graph_notrace_addr() which can be
153 * set from set_graph_notrace file in tracefs by user.
154 */
155 if (current->curr_ret_stack < -1)
156 return -EBUSY;
157
158 calltime = trace_clock_local();
159
160 index = ++current->curr_ret_stack;
161 if (ftrace_graph_notrace_addr(func))
162 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
163 barrier();
164 current->ret_stack[index].ret = ret;
165 current->ret_stack[index].func = func;
166 current->ret_stack[index].calltime = calltime;
167 current->ret_stack[index].subtime = 0;
168 current->ret_stack[index].fp = frame_pointer;
169 *depth = current->curr_ret_stack;
170
171 return 0;
172 }
173
174 /* Retrieve a function return address to the trace stack on thread info.*/
175 static void
176 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
177 unsigned long frame_pointer)
178 {
179 int index;
180
181 index = current->curr_ret_stack;
182
183 /*
184 * A negative index here means that it's just returned from a
185 * notrace'd function. Recover index to get an original
186 * return address. See ftrace_push_return_trace().
187 *
188 * TODO: Need to check whether the stack gets corrupted.
189 */
190 if (index < 0)
191 index += FTRACE_NOTRACE_DEPTH;
192
193 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
194 ftrace_graph_stop();
195 WARN_ON(1);
196 /* Might as well panic, otherwise we have no where to go */
197 *ret = (unsigned long)panic;
198 return;
199 }
200
201 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
202 /*
203 * The arch may choose to record the frame pointer used
204 * and check it here to make sure that it is what we expect it
205 * to be. If gcc does not set the place holder of the return
206 * address in the frame pointer, and does a copy instead, then
207 * the function graph trace will fail. This test detects this
208 * case.
209 *
210 * Currently, x86_32 with optimize for size (-Os) makes the latest
211 * gcc do the above.
212 *
213 * Note, -mfentry does not use frame pointers, and this test
214 * is not needed if CC_USING_FENTRY is set.
215 */
216 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
217 ftrace_graph_stop();
218 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
219 " from func %ps return to %lx\n",
220 current->ret_stack[index].fp,
221 frame_pointer,
222 (void *)current->ret_stack[index].func,
223 current->ret_stack[index].ret);
224 *ret = (unsigned long)panic;
225 return;
226 }
227 #endif
228
229 *ret = current->ret_stack[index].ret;
230 trace->func = current->ret_stack[index].func;
231 trace->calltime = current->ret_stack[index].calltime;
232 trace->overrun = atomic_read(&current->trace_overrun);
233 trace->depth = index;
234 }
235
236 /*
237 * Send the trace to the ring-buffer.
238 * @return the original return address.
239 */
240 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
241 {
242 struct ftrace_graph_ret trace;
243 unsigned long ret;
244
245 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
246 trace.rettime = trace_clock_local();
247 barrier();
248 current->curr_ret_stack--;
249 /*
250 * The curr_ret_stack can be less than -1 only if it was
251 * filtered out and it's about to return from the function.
252 * Recover the index and continue to trace normal functions.
253 */
254 if (current->curr_ret_stack < -1) {
255 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
256 return ret;
257 }
258
259 /*
260 * The trace should run after decrementing the ret counter
261 * in case an interrupt were to come in. We don't want to
262 * lose the interrupt if max_depth is set.
263 */
264 ftrace_graph_return(&trace);
265
266 if (unlikely(!ret)) {
267 ftrace_graph_stop();
268 WARN_ON(1);
269 /* Might as well panic. What else to do? */
270 ret = (unsigned long)panic;
271 }
272
273 return ret;
274 }
275
276 int __trace_graph_entry(struct trace_array *tr,
277 struct ftrace_graph_ent *trace,
278 unsigned long flags,
279 int pc)
280 {
281 struct trace_event_call *call = &event_funcgraph_entry;
282 struct ring_buffer_event *event;
283 struct ring_buffer *buffer = tr->trace_buffer.buffer;
284 struct ftrace_graph_ent_entry *entry;
285
286 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
287 return 0;
288
289 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
290 sizeof(*entry), flags, pc);
291 if (!event)
292 return 0;
293 entry = ring_buffer_event_data(event);
294 entry->graph_ent = *trace;
295 if (!call_filter_check_discard(call, entry, buffer, event))
296 __buffer_unlock_commit(buffer, event);
297
298 return 1;
299 }
300
301 static inline int ftrace_graph_ignore_irqs(void)
302 {
303 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
304 return 0;
305
306 return in_irq();
307 }
308
309 int trace_graph_entry(struct ftrace_graph_ent *trace)
310 {
311 struct trace_array *tr = graph_array;
312 struct trace_array_cpu *data;
313 unsigned long flags;
314 long disabled;
315 int ret;
316 int cpu;
317 int pc;
318
319 if (!ftrace_trace_task(current))
320 return 0;
321
322 /* trace it when it is-nested-in or is a function enabled. */
323 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
324 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
325 (max_depth && trace->depth >= max_depth))
326 return 0;
327
328 /*
329 * Do not trace a function if it's filtered by set_graph_notrace.
330 * Make the index of ret stack negative to indicate that it should
331 * ignore further functions. But it needs its own ret stack entry
332 * to recover the original index in order to continue tracing after
333 * returning from the function.
334 */
335 if (ftrace_graph_notrace_addr(trace->func))
336 return 1;
337
338 local_irq_save(flags);
339 cpu = raw_smp_processor_id();
340 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
341 disabled = atomic_inc_return(&data->disabled);
342 if (likely(disabled == 1)) {
343 pc = preempt_count();
344 ret = __trace_graph_entry(tr, trace, flags, pc);
345 } else {
346 ret = 0;
347 }
348
349 atomic_dec(&data->disabled);
350 local_irq_restore(flags);
351
352 return ret;
353 }
354
355 static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
356 {
357 if (tracing_thresh)
358 return 1;
359 else
360 return trace_graph_entry(trace);
361 }
362
363 static void
364 __trace_graph_function(struct trace_array *tr,
365 unsigned long ip, unsigned long flags, int pc)
366 {
367 u64 time = trace_clock_local();
368 struct ftrace_graph_ent ent = {
369 .func = ip,
370 .depth = 0,
371 };
372 struct ftrace_graph_ret ret = {
373 .func = ip,
374 .depth = 0,
375 .calltime = time,
376 .rettime = time,
377 };
378
379 __trace_graph_entry(tr, &ent, flags, pc);
380 __trace_graph_return(tr, &ret, flags, pc);
381 }
382
383 void
384 trace_graph_function(struct trace_array *tr,
385 unsigned long ip, unsigned long parent_ip,
386 unsigned long flags, int pc)
387 {
388 __trace_graph_function(tr, ip, flags, pc);
389 }
390
391 void __trace_graph_return(struct trace_array *tr,
392 struct ftrace_graph_ret *trace,
393 unsigned long flags,
394 int pc)
395 {
396 struct trace_event_call *call = &event_funcgraph_exit;
397 struct ring_buffer_event *event;
398 struct ring_buffer *buffer = tr->trace_buffer.buffer;
399 struct ftrace_graph_ret_entry *entry;
400
401 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
402 return;
403
404 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
405 sizeof(*entry), flags, pc);
406 if (!event)
407 return;
408 entry = ring_buffer_event_data(event);
409 entry->ret = *trace;
410 if (!call_filter_check_discard(call, entry, buffer, event))
411 __buffer_unlock_commit(buffer, event);
412 }
413
414 void trace_graph_return(struct ftrace_graph_ret *trace)
415 {
416 struct trace_array *tr = graph_array;
417 struct trace_array_cpu *data;
418 unsigned long flags;
419 long disabled;
420 int cpu;
421 int pc;
422
423 local_irq_save(flags);
424 cpu = raw_smp_processor_id();
425 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
426 disabled = atomic_inc_return(&data->disabled);
427 if (likely(disabled == 1)) {
428 pc = preempt_count();
429 __trace_graph_return(tr, trace, flags, pc);
430 }
431 atomic_dec(&data->disabled);
432 local_irq_restore(flags);
433 }
434
435 void set_graph_array(struct trace_array *tr)
436 {
437 graph_array = tr;
438
439 /* Make graph_array visible before we start tracing */
440
441 smp_mb();
442 }
443
444 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
445 {
446 if (tracing_thresh &&
447 (trace->rettime - trace->calltime < tracing_thresh))
448 return;
449 else
450 trace_graph_return(trace);
451 }
452
453 static int graph_trace_init(struct trace_array *tr)
454 {
455 int ret;
456
457 set_graph_array(tr);
458 if (tracing_thresh)
459 ret = register_ftrace_graph(&trace_graph_thresh_return,
460 &trace_graph_thresh_entry);
461 else
462 ret = register_ftrace_graph(&trace_graph_return,
463 &trace_graph_entry);
464 if (ret)
465 return ret;
466 tracing_start_cmdline_record();
467
468 return 0;
469 }
470
471 static void graph_trace_reset(struct trace_array *tr)
472 {
473 tracing_stop_cmdline_record();
474 unregister_ftrace_graph();
475 }
476
477 static int graph_trace_update_thresh(struct trace_array *tr)
478 {
479 graph_trace_reset(tr);
480 return graph_trace_init(tr);
481 }
482
483 static int max_bytes_for_cpu;
484
485 static void print_graph_cpu(struct trace_seq *s, int cpu)
486 {
487 /*
488 * Start with a space character - to make it stand out
489 * to the right a bit when trace output is pasted into
490 * email:
491 */
492 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
493 }
494
495 #define TRACE_GRAPH_PROCINFO_LENGTH 14
496
497 static void print_graph_proc(struct trace_seq *s, pid_t pid)
498 {
499 char comm[TASK_COMM_LEN];
500 /* sign + log10(MAX_INT) + '\0' */
501 char pid_str[11];
502 int spaces = 0;
503 int len;
504 int i;
505
506 trace_find_cmdline(pid, comm);
507 comm[7] = '\0';
508 sprintf(pid_str, "%d", pid);
509
510 /* 1 stands for the "-" character */
511 len = strlen(comm) + strlen(pid_str) + 1;
512
513 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
514 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
515
516 /* First spaces to align center */
517 for (i = 0; i < spaces / 2; i++)
518 trace_seq_putc(s, ' ');
519
520 trace_seq_printf(s, "%s-%s", comm, pid_str);
521
522 /* Last spaces to align center */
523 for (i = 0; i < spaces - (spaces / 2); i++)
524 trace_seq_putc(s, ' ');
525 }
526
527
528 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
529 {
530 trace_seq_putc(s, ' ');
531 trace_print_lat_fmt(s, entry);
532 }
533
534 /* If the pid changed since the last trace, output this event */
535 static void
536 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
537 {
538 pid_t prev_pid;
539 pid_t *last_pid;
540
541 if (!data)
542 return;
543
544 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
545
546 if (*last_pid == pid)
547 return;
548
549 prev_pid = *last_pid;
550 *last_pid = pid;
551
552 if (prev_pid == -1)
553 return;
554 /*
555 * Context-switch trace line:
556
557 ------------------------------------------
558 | 1) migration/0--1 => sshd-1755
559 ------------------------------------------
560
561 */
562 trace_seq_puts(s, " ------------------------------------------\n");
563 print_graph_cpu(s, cpu);
564 print_graph_proc(s, prev_pid);
565 trace_seq_puts(s, " => ");
566 print_graph_proc(s, pid);
567 trace_seq_puts(s, "\n ------------------------------------------\n\n");
568 }
569
570 static struct ftrace_graph_ret_entry *
571 get_return_for_leaf(struct trace_iterator *iter,
572 struct ftrace_graph_ent_entry *curr)
573 {
574 struct fgraph_data *data = iter->private;
575 struct ring_buffer_iter *ring_iter = NULL;
576 struct ring_buffer_event *event;
577 struct ftrace_graph_ret_entry *next;
578
579 /*
580 * If the previous output failed to write to the seq buffer,
581 * then we just reuse the data from before.
582 */
583 if (data && data->failed) {
584 curr = &data->ent;
585 next = &data->ret;
586 } else {
587
588 ring_iter = trace_buffer_iter(iter, iter->cpu);
589
590 /* First peek to compare current entry and the next one */
591 if (ring_iter)
592 event = ring_buffer_iter_peek(ring_iter, NULL);
593 else {
594 /*
595 * We need to consume the current entry to see
596 * the next one.
597 */
598 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
599 NULL, NULL);
600 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
601 NULL, NULL);
602 }
603
604 if (!event)
605 return NULL;
606
607 next = ring_buffer_event_data(event);
608
609 if (data) {
610 /*
611 * Save current and next entries for later reference
612 * if the output fails.
613 */
614 data->ent = *curr;
615 /*
616 * If the next event is not a return type, then
617 * we only care about what type it is. Otherwise we can
618 * safely copy the entire event.
619 */
620 if (next->ent.type == TRACE_GRAPH_RET)
621 data->ret = *next;
622 else
623 data->ret.ent.type = next->ent.type;
624 }
625 }
626
627 if (next->ent.type != TRACE_GRAPH_RET)
628 return NULL;
629
630 if (curr->ent.pid != next->ent.pid ||
631 curr->graph_ent.func != next->ret.func)
632 return NULL;
633
634 /* this is a leaf, now advance the iterator */
635 if (ring_iter)
636 ring_buffer_read(ring_iter, NULL);
637
638 return next;
639 }
640
641 static void print_graph_abs_time(u64 t, struct trace_seq *s)
642 {
643 unsigned long usecs_rem;
644
645 usecs_rem = do_div(t, NSEC_PER_SEC);
646 usecs_rem /= 1000;
647
648 trace_seq_printf(s, "%5lu.%06lu | ",
649 (unsigned long)t, usecs_rem);
650 }
651
652 static void
653 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
654 enum trace_type type, int cpu, pid_t pid, u32 flags)
655 {
656 struct trace_seq *s = &iter->seq;
657 struct trace_entry *ent = iter->ent;
658
659 if (addr < (unsigned long)__irqentry_text_start ||
660 addr >= (unsigned long)__irqentry_text_end)
661 return;
662
663 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
664 /* Absolute time */
665 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
666 print_graph_abs_time(iter->ts, s);
667
668 /* Cpu */
669 if (flags & TRACE_GRAPH_PRINT_CPU)
670 print_graph_cpu(s, cpu);
671
672 /* Proc */
673 if (flags & TRACE_GRAPH_PRINT_PROC) {
674 print_graph_proc(s, pid);
675 trace_seq_puts(s, " | ");
676 }
677
678 /* Latency format */
679 if (trace_flags & TRACE_ITER_LATENCY_FMT)
680 print_graph_lat_fmt(s, ent);
681 }
682
683 /* No overhead */
684 print_graph_duration(0, s, flags | FLAGS_FILL_START);
685
686 if (type == TRACE_GRAPH_ENT)
687 trace_seq_puts(s, "==========>");
688 else
689 trace_seq_puts(s, "<==========");
690
691 print_graph_duration(0, s, flags | FLAGS_FILL_END);
692 trace_seq_putc(s, '\n');
693 }
694
695 void
696 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
697 {
698 unsigned long nsecs_rem = do_div(duration, 1000);
699 /* log10(ULONG_MAX) + '\0' */
700 char usecs_str[21];
701 char nsecs_str[5];
702 int len;
703 int i;
704
705 sprintf(usecs_str, "%lu", (unsigned long) duration);
706
707 /* Print msecs */
708 trace_seq_printf(s, "%s", usecs_str);
709
710 len = strlen(usecs_str);
711
712 /* Print nsecs (we don't want to exceed 7 numbers) */
713 if (len < 7) {
714 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
715
716 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
717 trace_seq_printf(s, ".%s", nsecs_str);
718 len += strlen(nsecs_str);
719 }
720
721 trace_seq_puts(s, " us ");
722
723 /* Print remaining spaces to fit the row's width */
724 for (i = len; i < 7; i++)
725 trace_seq_putc(s, ' ');
726 }
727
728 static void
729 print_graph_duration(unsigned long long duration, struct trace_seq *s,
730 u32 flags)
731 {
732 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
733 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
734 return;
735
736 /* No real adata, just filling the column with spaces */
737 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
738 case FLAGS_FILL_FULL:
739 trace_seq_puts(s, " | ");
740 return;
741 case FLAGS_FILL_START:
742 trace_seq_puts(s, " ");
743 return;
744 case FLAGS_FILL_END:
745 trace_seq_puts(s, " |");
746 return;
747 }
748
749 /* Signal a overhead of time execution to the output */
750 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
751 trace_seq_printf(s, "%c ", trace_find_mark(duration));
752 else
753 trace_seq_puts(s, " ");
754
755 trace_print_graph_duration(duration, s);
756 trace_seq_puts(s, "| ");
757 }
758
759 /* Case of a leaf function on its call entry */
760 static enum print_line_t
761 print_graph_entry_leaf(struct trace_iterator *iter,
762 struct ftrace_graph_ent_entry *entry,
763 struct ftrace_graph_ret_entry *ret_entry,
764 struct trace_seq *s, u32 flags)
765 {
766 struct fgraph_data *data = iter->private;
767 struct ftrace_graph_ret *graph_ret;
768 struct ftrace_graph_ent *call;
769 unsigned long long duration;
770 int i;
771
772 graph_ret = &ret_entry->ret;
773 call = &entry->graph_ent;
774 duration = graph_ret->rettime - graph_ret->calltime;
775
776 if (data) {
777 struct fgraph_cpu_data *cpu_data;
778 int cpu = iter->cpu;
779
780 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
781
782 /*
783 * Comments display at + 1 to depth. Since
784 * this is a leaf function, keep the comments
785 * equal to this depth.
786 */
787 cpu_data->depth = call->depth - 1;
788
789 /* No need to keep this function around for this depth */
790 if (call->depth < FTRACE_RETFUNC_DEPTH)
791 cpu_data->enter_funcs[call->depth] = 0;
792 }
793
794 /* Overhead and duration */
795 print_graph_duration(duration, s, flags);
796
797 /* Function */
798 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
799 trace_seq_putc(s, ' ');
800
801 trace_seq_printf(s, "%ps();\n", (void *)call->func);
802
803 return trace_handle_return(s);
804 }
805
806 static enum print_line_t
807 print_graph_entry_nested(struct trace_iterator *iter,
808 struct ftrace_graph_ent_entry *entry,
809 struct trace_seq *s, int cpu, u32 flags)
810 {
811 struct ftrace_graph_ent *call = &entry->graph_ent;
812 struct fgraph_data *data = iter->private;
813 int i;
814
815 if (data) {
816 struct fgraph_cpu_data *cpu_data;
817 int cpu = iter->cpu;
818
819 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
820 cpu_data->depth = call->depth;
821
822 /* Save this function pointer to see if the exit matches */
823 if (call->depth < FTRACE_RETFUNC_DEPTH)
824 cpu_data->enter_funcs[call->depth] = call->func;
825 }
826
827 /* No time */
828 print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
829
830 /* Function */
831 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
832 trace_seq_putc(s, ' ');
833
834 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
835
836 if (trace_seq_has_overflowed(s))
837 return TRACE_TYPE_PARTIAL_LINE;
838
839 /*
840 * we already consumed the current entry to check the next one
841 * and see if this is a leaf.
842 */
843 return TRACE_TYPE_NO_CONSUME;
844 }
845
846 static void
847 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
848 int type, unsigned long addr, u32 flags)
849 {
850 struct fgraph_data *data = iter->private;
851 struct trace_entry *ent = iter->ent;
852 int cpu = iter->cpu;
853
854 /* Pid */
855 verif_pid(s, ent->pid, cpu, data);
856
857 if (type)
858 /* Interrupt */
859 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
860
861 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
862 return;
863
864 /* Absolute time */
865 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
866 print_graph_abs_time(iter->ts, s);
867
868 /* Cpu */
869 if (flags & TRACE_GRAPH_PRINT_CPU)
870 print_graph_cpu(s, cpu);
871
872 /* Proc */
873 if (flags & TRACE_GRAPH_PRINT_PROC) {
874 print_graph_proc(s, ent->pid);
875 trace_seq_puts(s, " | ");
876 }
877
878 /* Latency format */
879 if (trace_flags & TRACE_ITER_LATENCY_FMT)
880 print_graph_lat_fmt(s, ent);
881
882 return;
883 }
884
885 /*
886 * Entry check for irq code
887 *
888 * returns 1 if
889 * - we are inside irq code
890 * - we just entered irq code
891 *
892 * retunns 0 if
893 * - funcgraph-interrupts option is set
894 * - we are not inside irq code
895 */
896 static int
897 check_irq_entry(struct trace_iterator *iter, u32 flags,
898 unsigned long addr, int depth)
899 {
900 int cpu = iter->cpu;
901 int *depth_irq;
902 struct fgraph_data *data = iter->private;
903
904 /*
905 * If we are either displaying irqs, or we got called as
906 * a graph event and private data does not exist,
907 * then we bypass the irq check.
908 */
909 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
910 (!data))
911 return 0;
912
913 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
914
915 /*
916 * We are inside the irq code
917 */
918 if (*depth_irq >= 0)
919 return 1;
920
921 if ((addr < (unsigned long)__irqentry_text_start) ||
922 (addr >= (unsigned long)__irqentry_text_end))
923 return 0;
924
925 /*
926 * We are entering irq code.
927 */
928 *depth_irq = depth;
929 return 1;
930 }
931
932 /*
933 * Return check for irq code
934 *
935 * returns 1 if
936 * - we are inside irq code
937 * - we just left irq code
938 *
939 * returns 0 if
940 * - funcgraph-interrupts option is set
941 * - we are not inside irq code
942 */
943 static int
944 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
945 {
946 int cpu = iter->cpu;
947 int *depth_irq;
948 struct fgraph_data *data = iter->private;
949
950 /*
951 * If we are either displaying irqs, or we got called as
952 * a graph event and private data does not exist,
953 * then we bypass the irq check.
954 */
955 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
956 (!data))
957 return 0;
958
959 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
960
961 /*
962 * We are not inside the irq code.
963 */
964 if (*depth_irq == -1)
965 return 0;
966
967 /*
968 * We are inside the irq code, and this is returning entry.
969 * Let's not trace it and clear the entry depth, since
970 * we are out of irq code.
971 *
972 * This condition ensures that we 'leave the irq code' once
973 * we are out of the entry depth. Thus protecting us from
974 * the RETURN entry loss.
975 */
976 if (*depth_irq >= depth) {
977 *depth_irq = -1;
978 return 1;
979 }
980
981 /*
982 * We are inside the irq code, and this is not the entry.
983 */
984 return 1;
985 }
986
987 static enum print_line_t
988 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
989 struct trace_iterator *iter, u32 flags)
990 {
991 struct fgraph_data *data = iter->private;
992 struct ftrace_graph_ent *call = &field->graph_ent;
993 struct ftrace_graph_ret_entry *leaf_ret;
994 static enum print_line_t ret;
995 int cpu = iter->cpu;
996
997 if (check_irq_entry(iter, flags, call->func, call->depth))
998 return TRACE_TYPE_HANDLED;
999
1000 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1001
1002 leaf_ret = get_return_for_leaf(iter, field);
1003 if (leaf_ret)
1004 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1005 else
1006 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1007
1008 if (data) {
1009 /*
1010 * If we failed to write our output, then we need to make
1011 * note of it. Because we already consumed our entry.
1012 */
1013 if (s->full) {
1014 data->failed = 1;
1015 data->cpu = cpu;
1016 } else
1017 data->failed = 0;
1018 }
1019
1020 return ret;
1021 }
1022
1023 static enum print_line_t
1024 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1025 struct trace_entry *ent, struct trace_iterator *iter,
1026 u32 flags)
1027 {
1028 unsigned long long duration = trace->rettime - trace->calltime;
1029 struct fgraph_data *data = iter->private;
1030 pid_t pid = ent->pid;
1031 int cpu = iter->cpu;
1032 int func_match = 1;
1033 int i;
1034
1035 if (check_irq_return(iter, flags, trace->depth))
1036 return TRACE_TYPE_HANDLED;
1037
1038 if (data) {
1039 struct fgraph_cpu_data *cpu_data;
1040 int cpu = iter->cpu;
1041
1042 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1043
1044 /*
1045 * Comments display at + 1 to depth. This is the
1046 * return from a function, we now want the comments
1047 * to display at the same level of the bracket.
1048 */
1049 cpu_data->depth = trace->depth - 1;
1050
1051 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1052 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1053 func_match = 0;
1054 cpu_data->enter_funcs[trace->depth] = 0;
1055 }
1056 }
1057
1058 print_graph_prologue(iter, s, 0, 0, flags);
1059
1060 /* Overhead and duration */
1061 print_graph_duration(duration, s, flags);
1062
1063 /* Closing brace */
1064 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1065 trace_seq_putc(s, ' ');
1066
1067 /*
1068 * If the return function does not have a matching entry,
1069 * then the entry was lost. Instead of just printing
1070 * the '}' and letting the user guess what function this
1071 * belongs to, write out the function name. Always do
1072 * that if the funcgraph-tail option is enabled.
1073 */
1074 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1075 trace_seq_puts(s, "}\n");
1076 else
1077 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1078
1079 /* Overrun */
1080 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1081 trace_seq_printf(s, " (Overruns: %lu)\n",
1082 trace->overrun);
1083
1084 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1085 cpu, pid, flags);
1086
1087 return trace_handle_return(s);
1088 }
1089
1090 static enum print_line_t
1091 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1092 struct trace_iterator *iter, u32 flags)
1093 {
1094 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1095 struct fgraph_data *data = iter->private;
1096 struct trace_event *event;
1097 int depth = 0;
1098 int ret;
1099 int i;
1100
1101 if (data)
1102 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1103
1104 print_graph_prologue(iter, s, 0, 0, flags);
1105
1106 /* No time */
1107 print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1108
1109 /* Indentation */
1110 if (depth > 0)
1111 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1112 trace_seq_putc(s, ' ');
1113
1114 /* The comment */
1115 trace_seq_puts(s, "/* ");
1116
1117 switch (iter->ent->type) {
1118 case TRACE_BPRINT:
1119 ret = trace_print_bprintk_msg_only(iter);
1120 if (ret != TRACE_TYPE_HANDLED)
1121 return ret;
1122 break;
1123 case TRACE_PRINT:
1124 ret = trace_print_printk_msg_only(iter);
1125 if (ret != TRACE_TYPE_HANDLED)
1126 return ret;
1127 break;
1128 default:
1129 event = ftrace_find_event(ent->type);
1130 if (!event)
1131 return TRACE_TYPE_UNHANDLED;
1132
1133 ret = event->funcs->trace(iter, sym_flags, event);
1134 if (ret != TRACE_TYPE_HANDLED)
1135 return ret;
1136 }
1137
1138 if (trace_seq_has_overflowed(s))
1139 goto out;
1140
1141 /* Strip ending newline */
1142 if (s->buffer[s->seq.len - 1] == '\n') {
1143 s->buffer[s->seq.len - 1] = '\0';
1144 s->seq.len--;
1145 }
1146
1147 trace_seq_puts(s, " */\n");
1148 out:
1149 return trace_handle_return(s);
1150 }
1151
1152
1153 enum print_line_t
1154 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1155 {
1156 struct ftrace_graph_ent_entry *field;
1157 struct fgraph_data *data = iter->private;
1158 struct trace_entry *entry = iter->ent;
1159 struct trace_seq *s = &iter->seq;
1160 int cpu = iter->cpu;
1161 int ret;
1162
1163 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1164 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1165 return TRACE_TYPE_HANDLED;
1166 }
1167
1168 /*
1169 * If the last output failed, there's a possibility we need
1170 * to print out the missing entry which would never go out.
1171 */
1172 if (data && data->failed) {
1173 field = &data->ent;
1174 iter->cpu = data->cpu;
1175 ret = print_graph_entry(field, s, iter, flags);
1176 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1177 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1178 ret = TRACE_TYPE_NO_CONSUME;
1179 }
1180 iter->cpu = cpu;
1181 return ret;
1182 }
1183
1184 switch (entry->type) {
1185 case TRACE_GRAPH_ENT: {
1186 /*
1187 * print_graph_entry() may consume the current event,
1188 * thus @field may become invalid, so we need to save it.
1189 * sizeof(struct ftrace_graph_ent_entry) is very small,
1190 * it can be safely saved at the stack.
1191 */
1192 struct ftrace_graph_ent_entry saved;
1193 trace_assign_type(field, entry);
1194 saved = *field;
1195 return print_graph_entry(&saved, s, iter, flags);
1196 }
1197 case TRACE_GRAPH_RET: {
1198 struct ftrace_graph_ret_entry *field;
1199 trace_assign_type(field, entry);
1200 return print_graph_return(&field->ret, s, entry, iter, flags);
1201 }
1202 case TRACE_STACK:
1203 case TRACE_FN:
1204 /* dont trace stack and functions as comments */
1205 return TRACE_TYPE_UNHANDLED;
1206
1207 default:
1208 return print_graph_comment(s, entry, iter, flags);
1209 }
1210
1211 return TRACE_TYPE_HANDLED;
1212 }
1213
1214 static enum print_line_t
1215 print_graph_function(struct trace_iterator *iter)
1216 {
1217 return print_graph_function_flags(iter, tracer_flags.val);
1218 }
1219
1220 static enum print_line_t
1221 print_graph_function_event(struct trace_iterator *iter, int flags,
1222 struct trace_event *event)
1223 {
1224 return print_graph_function(iter);
1225 }
1226
1227 static void print_lat_header(struct seq_file *s, u32 flags)
1228 {
1229 static const char spaces[] = " " /* 16 spaces */
1230 " " /* 4 spaces */
1231 " "; /* 17 spaces */
1232 int size = 0;
1233
1234 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1235 size += 16;
1236 if (flags & TRACE_GRAPH_PRINT_CPU)
1237 size += 4;
1238 if (flags & TRACE_GRAPH_PRINT_PROC)
1239 size += 17;
1240
1241 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1242 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1243 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1244 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1245 seq_printf(s, "#%.*s||| / \n", size, spaces);
1246 }
1247
1248 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1249 {
1250 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1251
1252 if (lat)
1253 print_lat_header(s, flags);
1254
1255 /* 1st line */
1256 seq_putc(s, '#');
1257 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1258 seq_puts(s, " TIME ");
1259 if (flags & TRACE_GRAPH_PRINT_CPU)
1260 seq_puts(s, " CPU");
1261 if (flags & TRACE_GRAPH_PRINT_PROC)
1262 seq_puts(s, " TASK/PID ");
1263 if (lat)
1264 seq_puts(s, "||||");
1265 if (flags & TRACE_GRAPH_PRINT_DURATION)
1266 seq_puts(s, " DURATION ");
1267 seq_puts(s, " FUNCTION CALLS\n");
1268
1269 /* 2nd line */
1270 seq_putc(s, '#');
1271 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1272 seq_puts(s, " | ");
1273 if (flags & TRACE_GRAPH_PRINT_CPU)
1274 seq_puts(s, " | ");
1275 if (flags & TRACE_GRAPH_PRINT_PROC)
1276 seq_puts(s, " | | ");
1277 if (lat)
1278 seq_puts(s, "||||");
1279 if (flags & TRACE_GRAPH_PRINT_DURATION)
1280 seq_puts(s, " | | ");
1281 seq_puts(s, " | | | |\n");
1282 }
1283
1284 static void print_graph_headers(struct seq_file *s)
1285 {
1286 print_graph_headers_flags(s, tracer_flags.val);
1287 }
1288
1289 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1290 {
1291 struct trace_iterator *iter = s->private;
1292
1293 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1294 return;
1295
1296 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1297 /* print nothing if the buffers are empty */
1298 if (trace_empty(iter))
1299 return;
1300
1301 print_trace_header(s, iter);
1302 }
1303
1304 __print_graph_headers_flags(s, flags);
1305 }
1306
1307 void graph_trace_open(struct trace_iterator *iter)
1308 {
1309 /* pid and depth on the last trace processed */
1310 struct fgraph_data *data;
1311 gfp_t gfpflags;
1312 int cpu;
1313
1314 iter->private = NULL;
1315
1316 /* We can be called in atomic context via ftrace_dump() */
1317 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1318
1319 data = kzalloc(sizeof(*data), gfpflags);
1320 if (!data)
1321 goto out_err;
1322
1323 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1324 if (!data->cpu_data)
1325 goto out_err_free;
1326
1327 for_each_possible_cpu(cpu) {
1328 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1329 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1330 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1331 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1332
1333 *pid = -1;
1334 *depth = 0;
1335 *ignore = 0;
1336 *depth_irq = -1;
1337 }
1338
1339 iter->private = data;
1340
1341 return;
1342
1343 out_err_free:
1344 kfree(data);
1345 out_err:
1346 pr_warning("function graph tracer: not enough memory\n");
1347 }
1348
1349 void graph_trace_close(struct trace_iterator *iter)
1350 {
1351 struct fgraph_data *data = iter->private;
1352
1353 if (data) {
1354 free_percpu(data->cpu_data);
1355 kfree(data);
1356 }
1357 }
1358
1359 static int
1360 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1361 {
1362 if (bit == TRACE_GRAPH_PRINT_IRQS)
1363 ftrace_graph_skip_irqs = !set;
1364
1365 return 0;
1366 }
1367
1368 static struct trace_event_functions graph_functions = {
1369 .trace = print_graph_function_event,
1370 };
1371
1372 static struct trace_event graph_trace_entry_event = {
1373 .type = TRACE_GRAPH_ENT,
1374 .funcs = &graph_functions,
1375 };
1376
1377 static struct trace_event graph_trace_ret_event = {
1378 .type = TRACE_GRAPH_RET,
1379 .funcs = &graph_functions
1380 };
1381
1382 static struct tracer graph_trace __tracer_data = {
1383 .name = "function_graph",
1384 .update_thresh = graph_trace_update_thresh,
1385 .open = graph_trace_open,
1386 .pipe_open = graph_trace_open,
1387 .close = graph_trace_close,
1388 .pipe_close = graph_trace_close,
1389 .init = graph_trace_init,
1390 .reset = graph_trace_reset,
1391 .print_line = print_graph_function,
1392 .print_header = print_graph_headers,
1393 .flags = &tracer_flags,
1394 .set_flag = func_graph_set_flag,
1395 #ifdef CONFIG_FTRACE_SELFTEST
1396 .selftest = trace_selftest_startup_function_graph,
1397 #endif
1398 };
1399
1400
1401 static ssize_t
1402 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1403 loff_t *ppos)
1404 {
1405 unsigned long val;
1406 int ret;
1407
1408 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1409 if (ret)
1410 return ret;
1411
1412 max_depth = val;
1413
1414 *ppos += cnt;
1415
1416 return cnt;
1417 }
1418
1419 static ssize_t
1420 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1421 loff_t *ppos)
1422 {
1423 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1424 int n;
1425
1426 n = sprintf(buf, "%d\n", max_depth);
1427
1428 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1429 }
1430
1431 static const struct file_operations graph_depth_fops = {
1432 .open = tracing_open_generic,
1433 .write = graph_depth_write,
1434 .read = graph_depth_read,
1435 .llseek = generic_file_llseek,
1436 };
1437
1438 static __init int init_graph_tracefs(void)
1439 {
1440 struct dentry *d_tracer;
1441
1442 d_tracer = tracing_init_dentry();
1443 if (IS_ERR(d_tracer))
1444 return 0;
1445
1446 trace_create_file("max_graph_depth", 0644, d_tracer,
1447 NULL, &graph_depth_fops);
1448
1449 return 0;
1450 }
1451 fs_initcall(init_graph_tracefs);
1452
1453 static __init int init_graph_trace(void)
1454 {
1455 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1456
1457 if (!register_trace_event(&graph_trace_entry_event)) {
1458 pr_warning("Warning: could not register graph trace events\n");
1459 return 1;
1460 }
1461
1462 if (!register_trace_event(&graph_trace_ret_event)) {
1463 pr_warning("Warning: could not register graph trace events\n");
1464 return 1;
1465 }
1466
1467 return register_tracer(&graph_trace);
1468 }
1469
1470 core_initcall(init_graph_trace);