]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fb52607a FW |
2 | /* |
3 | * | |
4 | * Function graph tracer. | |
9005f3eb | 5 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
fb52607a FW |
6 | * Mostly borrowed from function tracer which |
7 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
9 | */ | |
fb52607a FW |
10 | #include <linux/uaccess.h> |
11 | #include <linux/ftrace.h> | |
be7635e7 | 12 | #include <linux/interrupt.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
fb52607a FW |
14 | #include <linux/fs.h> |
15 | ||
16 | #include "trace.h" | |
f0868d1e | 17 | #include "trace_output.h" |
fb52607a | 18 | |
1b2f121c SRRH |
19 | static bool kill_ftrace_graph; |
20 | ||
21 | /** | |
22 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called | |
23 | * | |
24 | * ftrace_graph_stop() is called when a severe error is detected in | |
25 | * the function graph tracing. This function is called by the critical | |
26 | * paths of function graph to keep those paths from doing any more harm. | |
27 | */ | |
28 | bool ftrace_graph_is_dead(void) | |
29 | { | |
30 | return kill_ftrace_graph; | |
31 | } | |
32 | ||
33 | /** | |
34 | * ftrace_graph_stop - set to permanently disable function graph tracincg | |
35 | * | |
36 | * In case of an error int function graph tracing, this is called | |
37 | * to try to keep function graph tracing from causing any more harm. | |
38 | * Usually this is pretty severe and this is called to try to at least | |
39 | * get a warning out to the user. | |
40 | */ | |
41 | void ftrace_graph_stop(void) | |
42 | { | |
43 | kill_ftrace_graph = true; | |
1b2f121c SRRH |
44 | } |
45 | ||
b304d044 SR |
46 | /* When set, irq functions will be ignored */ |
47 | static int ftrace_graph_skip_irqs; | |
48 | ||
be1eca39 | 49 | struct fgraph_cpu_data { |
2fbcdb35 SR |
50 | pid_t last_pid; |
51 | int depth; | |
2bd16212 | 52 | int depth_irq; |
be1eca39 | 53 | int ignore; |
f1c7f517 | 54 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
be1eca39 JO |
55 | }; |
56 | ||
57 | struct fgraph_data { | |
6016ee13 | 58 | struct fgraph_cpu_data __percpu *cpu_data; |
be1eca39 JO |
59 | |
60 | /* Place to preserve last processed entry. */ | |
61 | struct ftrace_graph_ent_entry ent; | |
62 | struct ftrace_graph_ret_entry ret; | |
63 | int failed; | |
64 | int cpu; | |
2fbcdb35 SR |
65 | }; |
66 | ||
287b6e68 | 67 | #define TRACE_GRAPH_INDENT 2 |
fb52607a | 68 | |
1a414428 | 69 | unsigned int fgraph_max_depth; |
8741db53 | 70 | |
fb52607a | 71 | static struct tracer_opt trace_opts[] = { |
9005f3eb | 72 | /* Display overruns? (for self-debug purpose) */ |
1a056155 FW |
73 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
74 | /* Display CPU ? */ | |
75 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | |
76 | /* Display Overhead ? */ | |
77 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | |
11e84acc FW |
78 | /* Display proc name/pid */ |
79 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | |
9005f3eb FW |
80 | /* Display duration of execution */ |
81 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | |
82 | /* Display absolute time of an entry */ | |
83 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | |
2bd16212 JO |
84 | /* Display interrupts */ |
85 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | |
607e3a29 RE |
86 | /* Display function name after trailing } */ |
87 | { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, | |
55577204 SRRH |
88 | /* Include sleep time (scheduled out) between entry and return */ |
89 | { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, | |
90 | /* Include time within nested functions */ | |
91 | { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, | |
fb52607a FW |
92 | { } /* Empty entry */ |
93 | }; | |
94 | ||
95 | static struct tracer_flags tracer_flags = { | |
607e3a29 | 96 | /* Don't display overruns, proc, or tail by default */ |
9005f3eb | 97 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
55577204 SRRH |
98 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | |
99 | TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, | |
fb52607a FW |
100 | .opts = trace_opts |
101 | }; | |
102 | ||
1a0799a8 | 103 | static struct trace_array *graph_array; |
9005f3eb | 104 | |
ffeb80fc JO |
105 | /* |
106 | * DURATION column is being also used to display IRQ signs, | |
107 | * following values are used by print_graph_irq and others | |
108 | * to fill in space into DURATION column. | |
109 | */ | |
110 | enum { | |
6fc84ea7 SRRH |
111 | FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
112 | FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
113 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
ffeb80fc JO |
114 | }; |
115 | ||
9d9add34 | 116 | static void |
983f938a SRRH |
117 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
118 | struct trace_seq *s, u32 flags); | |
fb52607a | 119 | |
712406a6 SR |
120 | /* Add a function return address to the trace stack on thread info.*/ |
121 | int | |
71e308a2 | 122 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
9a7c348b | 123 | unsigned long frame_pointer, unsigned long *retp) |
712406a6 | 124 | { |
5d1a03dc | 125 | unsigned long long calltime; |
712406a6 SR |
126 | int index; |
127 | ||
1b2f121c SRRH |
128 | if (unlikely(ftrace_graph_is_dead())) |
129 | return -EBUSY; | |
130 | ||
712406a6 SR |
131 | if (!current->ret_stack) |
132 | return -EBUSY; | |
133 | ||
82310a32 SR |
134 | /* |
135 | * We must make sure the ret_stack is tested before we read | |
136 | * anything else. | |
137 | */ | |
138 | smp_rmb(); | |
139 | ||
712406a6 SR |
140 | /* The return trace stack is full */ |
141 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
142 | atomic_inc(¤t->trace_overrun); | |
143 | return -EBUSY; | |
144 | } | |
145 | ||
29ad23b0 NK |
146 | /* |
147 | * The curr_ret_stack is an index to ftrace return stack of | |
148 | * current task. Its value should be in [0, FTRACE_RETFUNC_ | |
149 | * DEPTH) when the function graph tracer is used. To support | |
150 | * filtering out specific functions, it makes the index | |
151 | * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) | |
152 | * so when it sees a negative index the ftrace will ignore | |
153 | * the record. And the index gets recovered when returning | |
154 | * from the filtered function by adding the FTRACE_NOTRACE_ | |
155 | * DEPTH and then it'll continue to record functions normally. | |
156 | * | |
157 | * The curr_ret_stack is initialized to -1 and get increased | |
158 | * in this function. So it can be less than -1 only if it was | |
159 | * filtered out via ftrace_graph_notrace_addr() which can be | |
8434dc93 | 160 | * set from set_graph_notrace file in tracefs by user. |
29ad23b0 NK |
161 | */ |
162 | if (current->curr_ret_stack < -1) | |
163 | return -EBUSY; | |
164 | ||
5d1a03dc SR |
165 | calltime = trace_clock_local(); |
166 | ||
712406a6 | 167 | index = ++current->curr_ret_stack; |
29ad23b0 NK |
168 | if (ftrace_graph_notrace_addr(func)) |
169 | current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; | |
712406a6 SR |
170 | barrier(); |
171 | current->ret_stack[index].ret = ret; | |
172 | current->ret_stack[index].func = func; | |
5d1a03dc | 173 | current->ret_stack[index].calltime = calltime; |
daa460a8 | 174 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 | 175 | current->ret_stack[index].fp = frame_pointer; |
9a7c348b JP |
176 | #endif |
177 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
178 | current->ret_stack[index].retp = retp; | |
daa460a8 | 179 | #endif |
29ad23b0 | 180 | *depth = current->curr_ret_stack; |
712406a6 SR |
181 | |
182 | return 0; | |
183 | } | |
184 | ||
185 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
a2a16d6a | 186 | static void |
71e308a2 SR |
187 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
188 | unsigned long frame_pointer) | |
712406a6 SR |
189 | { |
190 | int index; | |
191 | ||
192 | index = current->curr_ret_stack; | |
193 | ||
29ad23b0 NK |
194 | /* |
195 | * A negative index here means that it's just returned from a | |
196 | * notrace'd function. Recover index to get an original | |
197 | * return address. See ftrace_push_return_trace(). | |
198 | * | |
199 | * TODO: Need to check whether the stack gets corrupted. | |
200 | */ | |
201 | if (index < 0) | |
202 | index += FTRACE_NOTRACE_DEPTH; | |
203 | ||
204 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { | |
712406a6 SR |
205 | ftrace_graph_stop(); |
206 | WARN_ON(1); | |
207 | /* Might as well panic, otherwise we have no where to go */ | |
208 | *ret = (unsigned long)panic; | |
209 | return; | |
210 | } | |
211 | ||
e4a744ef | 212 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 SR |
213 | /* |
214 | * The arch may choose to record the frame pointer used | |
215 | * and check it here to make sure that it is what we expect it | |
216 | * to be. If gcc does not set the place holder of the return | |
217 | * address in the frame pointer, and does a copy instead, then | |
218 | * the function graph trace will fail. This test detects this | |
219 | * case. | |
220 | * | |
221 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
222 | * gcc do the above. | |
781d0624 SR |
223 | * |
224 | * Note, -mfentry does not use frame pointers, and this test | |
225 | * is not needed if CC_USING_FENTRY is set. | |
71e308a2 SR |
226 | */ |
227 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
228 | ftrace_graph_stop(); | |
229 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
b375a11a | 230 | " from func %ps return to %lx\n", |
71e308a2 SR |
231 | current->ret_stack[index].fp, |
232 | frame_pointer, | |
233 | (void *)current->ret_stack[index].func, | |
234 | current->ret_stack[index].ret); | |
235 | *ret = (unsigned long)panic; | |
236 | return; | |
237 | } | |
238 | #endif | |
239 | ||
712406a6 SR |
240 | *ret = current->ret_stack[index].ret; |
241 | trace->func = current->ret_stack[index].func; | |
242 | trace->calltime = current->ret_stack[index].calltime; | |
243 | trace->overrun = atomic_read(¤t->trace_overrun); | |
244 | trace->depth = index; | |
712406a6 SR |
245 | } |
246 | ||
247 | /* | |
248 | * Send the trace to the ring-buffer. | |
249 | * @return the original return address. | |
250 | */ | |
71e308a2 | 251 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
712406a6 SR |
252 | { |
253 | struct ftrace_graph_ret trace; | |
254 | unsigned long ret; | |
255 | ||
71e308a2 | 256 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
0012693a | 257 | trace.rettime = trace_clock_local(); |
a2a16d6a SR |
258 | barrier(); |
259 | current->curr_ret_stack--; | |
29ad23b0 NK |
260 | /* |
261 | * The curr_ret_stack can be less than -1 only if it was | |
262 | * filtered out and it's about to return from the function. | |
263 | * Recover the index and continue to trace normal functions. | |
264 | */ | |
265 | if (current->curr_ret_stack < -1) { | |
266 | current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; | |
267 | return ret; | |
268 | } | |
712406a6 | 269 | |
03274a3f SRRH |
270 | /* |
271 | * The trace should run after decrementing the ret counter | |
272 | * in case an interrupt were to come in. We don't want to | |
273 | * lose the interrupt if max_depth is set. | |
274 | */ | |
275 | ftrace_graph_return(&trace); | |
276 | ||
712406a6 SR |
277 | if (unlikely(!ret)) { |
278 | ftrace_graph_stop(); | |
279 | WARN_ON(1); | |
280 | /* Might as well panic. What else to do? */ | |
281 | ret = (unsigned long)panic; | |
282 | } | |
283 | ||
284 | return ret; | |
285 | } | |
286 | ||
223918e3 JP |
287 | /** |
288 | * ftrace_graph_ret_addr - convert a potentially modified stack return address | |
289 | * to its original value | |
290 | * | |
291 | * This function can be called by stack unwinding code to convert a found stack | |
292 | * return address ('ret') to its original value, in case the function graph | |
293 | * tracer has modified it to be 'return_to_handler'. If the address hasn't | |
294 | * been modified, the unchanged value of 'ret' is returned. | |
295 | * | |
296 | * 'idx' is a state variable which should be initialized by the caller to zero | |
297 | * before the first call. | |
298 | * | |
299 | * 'retp' is a pointer to the return address on the stack. It's ignored if | |
300 | * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined. | |
301 | */ | |
302 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
303 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
304 | unsigned long ret, unsigned long *retp) | |
305 | { | |
306 | int index = task->curr_ret_stack; | |
307 | int i; | |
308 | ||
309 | if (ret != (unsigned long)return_to_handler) | |
310 | return ret; | |
311 | ||
312 | if (index < -1) | |
313 | index += FTRACE_NOTRACE_DEPTH; | |
314 | ||
315 | if (index < 0) | |
316 | return ret; | |
317 | ||
318 | for (i = 0; i <= index; i++) | |
319 | if (task->ret_stack[i].retp == retp) | |
320 | return task->ret_stack[i].ret; | |
321 | ||
322 | return ret; | |
323 | } | |
324 | #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
325 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
326 | unsigned long ret, unsigned long *retp) | |
327 | { | |
328 | int task_idx; | |
329 | ||
330 | if (ret != (unsigned long)return_to_handler) | |
331 | return ret; | |
332 | ||
333 | task_idx = task->curr_ret_stack; | |
334 | ||
335 | if (!task->ret_stack || task_idx < *idx) | |
336 | return ret; | |
337 | ||
338 | task_idx -= *idx; | |
339 | (*idx)++; | |
340 | ||
341 | return task->ret_stack[task_idx].ret; | |
342 | } | |
343 | #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
344 | ||
62b915f1 | 345 | int __trace_graph_entry(struct trace_array *tr, |
1a0799a8 FW |
346 | struct ftrace_graph_ent *trace, |
347 | unsigned long flags, | |
348 | int pc) | |
349 | { | |
2425bcb9 | 350 | struct trace_event_call *call = &event_funcgraph_entry; |
1a0799a8 | 351 | struct ring_buffer_event *event; |
12883efb | 352 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
353 | struct ftrace_graph_ent_entry *entry; |
354 | ||
e77405ad | 355 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
1a0799a8 FW |
356 | sizeof(*entry), flags, pc); |
357 | if (!event) | |
358 | return 0; | |
359 | entry = ring_buffer_event_data(event); | |
360 | entry->graph_ent = *trace; | |
f306cc82 | 361 | if (!call_filter_check_discard(call, entry, buffer, event)) |
52ffabe3 | 362 | trace_buffer_unlock_commit_nostack(buffer, event); |
1a0799a8 FW |
363 | |
364 | return 1; | |
365 | } | |
366 | ||
b304d044 SR |
367 | static inline int ftrace_graph_ignore_irqs(void) |
368 | { | |
e4a3f541 | 369 | if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) |
b304d044 SR |
370 | return 0; |
371 | ||
372 | return in_irq(); | |
373 | } | |
374 | ||
1a0799a8 FW |
375 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
376 | { | |
377 | struct trace_array *tr = graph_array; | |
378 | struct trace_array_cpu *data; | |
379 | unsigned long flags; | |
380 | long disabled; | |
381 | int ret; | |
382 | int cpu; | |
383 | int pc; | |
384 | ||
345ddcc8 | 385 | if (!ftrace_trace_task(tr)) |
1a0799a8 FW |
386 | return 0; |
387 | ||
1a414428 SRRH |
388 | if (ftrace_graph_ignore_func(trace)) |
389 | return 0; | |
390 | ||
391 | if (ftrace_graph_ignore_irqs()) | |
1a0799a8 FW |
392 | return 0; |
393 | ||
29ad23b0 NK |
394 | /* |
395 | * Do not trace a function if it's filtered by set_graph_notrace. | |
396 | * Make the index of ret stack negative to indicate that it should | |
397 | * ignore further functions. But it needs its own ret stack entry | |
398 | * to recover the original index in order to continue tracing after | |
399 | * returning from the function. | |
400 | */ | |
401 | if (ftrace_graph_notrace_addr(trace->func)) | |
402 | return 1; | |
403 | ||
7fa8b717 JF |
404 | /* |
405 | * Stop here if tracing_threshold is set. We only write function return | |
406 | * events to the ring buffer. | |
407 | */ | |
408 | if (tracing_thresh) | |
409 | return 1; | |
410 | ||
1a0799a8 FW |
411 | local_irq_save(flags); |
412 | cpu = raw_smp_processor_id(); | |
12883efb | 413 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
414 | disabled = atomic_inc_return(&data->disabled); |
415 | if (likely(disabled == 1)) { | |
416 | pc = preempt_count(); | |
417 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
418 | } else { | |
419 | ret = 0; | |
420 | } | |
1a0799a8 FW |
421 | |
422 | atomic_dec(&data->disabled); | |
423 | local_irq_restore(flags); | |
424 | ||
425 | return ret; | |
426 | } | |
427 | ||
0a772620 JO |
428 | static void |
429 | __trace_graph_function(struct trace_array *tr, | |
430 | unsigned long ip, unsigned long flags, int pc) | |
431 | { | |
432 | u64 time = trace_clock_local(); | |
433 | struct ftrace_graph_ent ent = { | |
434 | .func = ip, | |
435 | .depth = 0, | |
436 | }; | |
437 | struct ftrace_graph_ret ret = { | |
438 | .func = ip, | |
439 | .depth = 0, | |
440 | .calltime = time, | |
441 | .rettime = time, | |
442 | }; | |
443 | ||
444 | __trace_graph_entry(tr, &ent, flags, pc); | |
445 | __trace_graph_return(tr, &ret, flags, pc); | |
446 | } | |
447 | ||
448 | void | |
449 | trace_graph_function(struct trace_array *tr, | |
450 | unsigned long ip, unsigned long parent_ip, | |
451 | unsigned long flags, int pc) | |
452 | { | |
0a772620 JO |
453 | __trace_graph_function(tr, ip, flags, pc); |
454 | } | |
455 | ||
62b915f1 | 456 | void __trace_graph_return(struct trace_array *tr, |
1a0799a8 FW |
457 | struct ftrace_graph_ret *trace, |
458 | unsigned long flags, | |
459 | int pc) | |
460 | { | |
2425bcb9 | 461 | struct trace_event_call *call = &event_funcgraph_exit; |
1a0799a8 | 462 | struct ring_buffer_event *event; |
12883efb | 463 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
464 | struct ftrace_graph_ret_entry *entry; |
465 | ||
e77405ad | 466 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
1a0799a8 FW |
467 | sizeof(*entry), flags, pc); |
468 | if (!event) | |
469 | return; | |
470 | entry = ring_buffer_event_data(event); | |
471 | entry->ret = *trace; | |
f306cc82 | 472 | if (!call_filter_check_discard(call, entry, buffer, event)) |
52ffabe3 | 473 | trace_buffer_unlock_commit_nostack(buffer, event); |
1a0799a8 FW |
474 | } |
475 | ||
476 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
477 | { | |
478 | struct trace_array *tr = graph_array; | |
479 | struct trace_array_cpu *data; | |
480 | unsigned long flags; | |
481 | long disabled; | |
482 | int cpu; | |
483 | int pc; | |
484 | ||
485 | local_irq_save(flags); | |
486 | cpu = raw_smp_processor_id(); | |
12883efb | 487 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
488 | disabled = atomic_inc_return(&data->disabled); |
489 | if (likely(disabled == 1)) { | |
490 | pc = preempt_count(); | |
491 | __trace_graph_return(tr, trace, flags, pc); | |
492 | } | |
1a0799a8 FW |
493 | atomic_dec(&data->disabled); |
494 | local_irq_restore(flags); | |
495 | } | |
496 | ||
24a53652 FW |
497 | void set_graph_array(struct trace_array *tr) |
498 | { | |
499 | graph_array = tr; | |
500 | ||
501 | /* Make graph_array visible before we start tracing */ | |
502 | ||
503 | smp_mb(); | |
504 | } | |
505 | ||
ba1afef6 | 506 | static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
0e950173 TB |
507 | { |
508 | if (tracing_thresh && | |
509 | (trace->rettime - trace->calltime < tracing_thresh)) | |
510 | return; | |
511 | else | |
512 | trace_graph_return(trace); | |
513 | } | |
514 | ||
fb52607a FW |
515 | static int graph_trace_init(struct trace_array *tr) |
516 | { | |
1a0799a8 FW |
517 | int ret; |
518 | ||
24a53652 | 519 | set_graph_array(tr); |
0e950173 TB |
520 | if (tracing_thresh) |
521 | ret = register_ftrace_graph(&trace_graph_thresh_return, | |
7fa8b717 | 522 | &trace_graph_entry); |
0e950173 TB |
523 | else |
524 | ret = register_ftrace_graph(&trace_graph_return, | |
525 | &trace_graph_entry); | |
660c7f9b SR |
526 | if (ret) |
527 | return ret; | |
528 | tracing_start_cmdline_record(); | |
529 | ||
530 | return 0; | |
fb52607a FW |
531 | } |
532 | ||
533 | static void graph_trace_reset(struct trace_array *tr) | |
534 | { | |
660c7f9b SR |
535 | tracing_stop_cmdline_record(); |
536 | unregister_ftrace_graph(); | |
fb52607a FW |
537 | } |
538 | ||
ba1afef6 | 539 | static int graph_trace_update_thresh(struct trace_array *tr) |
6508fa76 SF |
540 | { |
541 | graph_trace_reset(tr); | |
542 | return graph_trace_init(tr); | |
543 | } | |
544 | ||
0c9e6f63 | 545 | static int max_bytes_for_cpu; |
1a056155 | 546 | |
9d9add34 | 547 | static void print_graph_cpu(struct trace_seq *s, int cpu) |
1a056155 | 548 | { |
d51090b3 IM |
549 | /* |
550 | * Start with a space character - to make it stand out | |
551 | * to the right a bit when trace output is pasted into | |
552 | * email: | |
553 | */ | |
9d9add34 | 554 | trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
1a056155 FW |
555 | } |
556 | ||
11e84acc FW |
557 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
558 | ||
9d9add34 | 559 | static void print_graph_proc(struct trace_seq *s, pid_t pid) |
11e84acc | 560 | { |
4ca53085 | 561 | char comm[TASK_COMM_LEN]; |
11e84acc FW |
562 | /* sign + log10(MAX_INT) + '\0' */ |
563 | char pid_str[11]; | |
4ca53085 | 564 | int spaces = 0; |
4ca53085 SR |
565 | int len; |
566 | int i; | |
11e84acc | 567 | |
4ca53085 | 568 | trace_find_cmdline(pid, comm); |
11e84acc FW |
569 | comm[7] = '\0'; |
570 | sprintf(pid_str, "%d", pid); | |
571 | ||
572 | /* 1 stands for the "-" character */ | |
573 | len = strlen(comm) + strlen(pid_str) + 1; | |
574 | ||
575 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | |
576 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | |
577 | ||
578 | /* First spaces to align center */ | |
9d9add34 SRRH |
579 | for (i = 0; i < spaces / 2; i++) |
580 | trace_seq_putc(s, ' '); | |
11e84acc | 581 | |
9d9add34 | 582 | trace_seq_printf(s, "%s-%s", comm, pid_str); |
11e84acc FW |
583 | |
584 | /* Last spaces to align center */ | |
9d9add34 SRRH |
585 | for (i = 0; i < spaces - (spaces / 2); i++) |
586 | trace_seq_putc(s, ' '); | |
11e84acc FW |
587 | } |
588 | ||
1a056155 | 589 | |
9d9add34 | 590 | static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
49ff5903 | 591 | { |
9d9add34 SRRH |
592 | trace_seq_putc(s, ' '); |
593 | trace_print_lat_fmt(s, entry); | |
49ff5903 SR |
594 | } |
595 | ||
287b6e68 | 596 | /* If the pid changed since the last trace, output this event */ |
9d9add34 | 597 | static void |
2fbcdb35 | 598 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
287b6e68 | 599 | { |
d51090b3 | 600 | pid_t prev_pid; |
9005f3eb | 601 | pid_t *last_pid; |
660c7f9b | 602 | |
2fbcdb35 | 603 | if (!data) |
9d9add34 | 604 | return; |
9005f3eb | 605 | |
be1eca39 | 606 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
9005f3eb FW |
607 | |
608 | if (*last_pid == pid) | |
9d9add34 | 609 | return; |
fb52607a | 610 | |
9005f3eb FW |
611 | prev_pid = *last_pid; |
612 | *last_pid = pid; | |
d51090b3 | 613 | |
9005f3eb | 614 | if (prev_pid == -1) |
9d9add34 | 615 | return; |
d51090b3 IM |
616 | /* |
617 | * Context-switch trace line: | |
618 | ||
619 | ------------------------------------------ | |
620 | | 1) migration/0--1 => sshd-1755 | |
621 | ------------------------------------------ | |
622 | ||
623 | */ | |
9d9add34 SRRH |
624 | trace_seq_puts(s, " ------------------------------------------\n"); |
625 | print_graph_cpu(s, cpu); | |
626 | print_graph_proc(s, prev_pid); | |
627 | trace_seq_puts(s, " => "); | |
628 | print_graph_proc(s, pid); | |
629 | trace_seq_puts(s, "\n ------------------------------------------\n\n"); | |
287b6e68 FW |
630 | } |
631 | ||
b91facc3 FW |
632 | static struct ftrace_graph_ret_entry * |
633 | get_return_for_leaf(struct trace_iterator *iter, | |
83a8df61 FW |
634 | struct ftrace_graph_ent_entry *curr) |
635 | { | |
be1eca39 JO |
636 | struct fgraph_data *data = iter->private; |
637 | struct ring_buffer_iter *ring_iter = NULL; | |
83a8df61 FW |
638 | struct ring_buffer_event *event; |
639 | struct ftrace_graph_ret_entry *next; | |
640 | ||
be1eca39 JO |
641 | /* |
642 | * If the previous output failed to write to the seq buffer, | |
643 | * then we just reuse the data from before. | |
644 | */ | |
645 | if (data && data->failed) { | |
646 | curr = &data->ent; | |
647 | next = &data->ret; | |
648 | } else { | |
83a8df61 | 649 | |
6d158a81 | 650 | ring_iter = trace_buffer_iter(iter, iter->cpu); |
be1eca39 JO |
651 | |
652 | /* First peek to compare current entry and the next one */ | |
653 | if (ring_iter) | |
654 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
655 | else { | |
656 | /* | |
657 | * We need to consume the current entry to see | |
658 | * the next one. | |
659 | */ | |
12883efb | 660 | ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 661 | NULL, NULL); |
12883efb | 662 | event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 663 | NULL, NULL); |
be1eca39 | 664 | } |
83a8df61 | 665 | |
be1eca39 JO |
666 | if (!event) |
667 | return NULL; | |
668 | ||
669 | next = ring_buffer_event_data(event); | |
83a8df61 | 670 | |
be1eca39 JO |
671 | if (data) { |
672 | /* | |
673 | * Save current and next entries for later reference | |
674 | * if the output fails. | |
675 | */ | |
676 | data->ent = *curr; | |
575570f0 SL |
677 | /* |
678 | * If the next event is not a return type, then | |
679 | * we only care about what type it is. Otherwise we can | |
680 | * safely copy the entire event. | |
681 | */ | |
682 | if (next->ent.type == TRACE_GRAPH_RET) | |
683 | data->ret = *next; | |
684 | else | |
685 | data->ret.ent.type = next->ent.type; | |
be1eca39 JO |
686 | } |
687 | } | |
83a8df61 FW |
688 | |
689 | if (next->ent.type != TRACE_GRAPH_RET) | |
b91facc3 | 690 | return NULL; |
83a8df61 FW |
691 | |
692 | if (curr->ent.pid != next->ent.pid || | |
693 | curr->graph_ent.func != next->ret.func) | |
b91facc3 | 694 | return NULL; |
83a8df61 | 695 | |
b91facc3 FW |
696 | /* this is a leaf, now advance the iterator */ |
697 | if (ring_iter) | |
698 | ring_buffer_read(ring_iter, NULL); | |
699 | ||
700 | return next; | |
83a8df61 FW |
701 | } |
702 | ||
9d9add34 | 703 | static void print_graph_abs_time(u64 t, struct trace_seq *s) |
d1f9cbd7 FW |
704 | { |
705 | unsigned long usecs_rem; | |
706 | ||
707 | usecs_rem = do_div(t, NSEC_PER_SEC); | |
708 | usecs_rem /= 1000; | |
709 | ||
9d9add34 SRRH |
710 | trace_seq_printf(s, "%5lu.%06lu | ", |
711 | (unsigned long)t, usecs_rem); | |
d1f9cbd7 FW |
712 | } |
713 | ||
9d9add34 | 714 | static void |
d1f9cbd7 | 715 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
d7a8d9e9 | 716 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
f8b755ac | 717 | { |
983f938a | 718 | struct trace_array *tr = iter->tr; |
d1f9cbd7 | 719 | struct trace_seq *s = &iter->seq; |
678f845e | 720 | struct trace_entry *ent = iter->ent; |
f8b755ac FW |
721 | |
722 | if (addr < (unsigned long)__irqentry_text_start || | |
723 | addr >= (unsigned long)__irqentry_text_end) | |
9d9add34 | 724 | return; |
f8b755ac | 725 | |
983f938a | 726 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { |
749230b0 | 727 | /* Absolute time */ |
9d9add34 SRRH |
728 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
729 | print_graph_abs_time(iter->ts, s); | |
d1f9cbd7 | 730 | |
749230b0 | 731 | /* Cpu */ |
9d9add34 SRRH |
732 | if (flags & TRACE_GRAPH_PRINT_CPU) |
733 | print_graph_cpu(s, cpu); | |
49ff5903 | 734 | |
749230b0 JO |
735 | /* Proc */ |
736 | if (flags & TRACE_GRAPH_PRINT_PROC) { | |
9d9add34 SRRH |
737 | print_graph_proc(s, pid); |
738 | trace_seq_puts(s, " | "); | |
749230b0 | 739 | } |
678f845e DBO |
740 | |
741 | /* Latency format */ | |
983f938a | 742 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 743 | print_graph_lat_fmt(s, ent); |
9005f3eb | 744 | } |
f8b755ac | 745 | |
9005f3eb | 746 | /* No overhead */ |
983f938a | 747 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); |
f8b755ac | 748 | |
9005f3eb | 749 | if (type == TRACE_GRAPH_ENT) |
9d9add34 | 750 | trace_seq_puts(s, "==========>"); |
9005f3eb | 751 | else |
9d9add34 | 752 | trace_seq_puts(s, "<=========="); |
9005f3eb | 753 | |
983f938a | 754 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); |
9d9add34 | 755 | trace_seq_putc(s, '\n'); |
f8b755ac | 756 | } |
83a8df61 | 757 | |
9d9add34 | 758 | void |
0706f1c4 | 759 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
83a8df61 FW |
760 | { |
761 | unsigned long nsecs_rem = do_div(duration, 1000); | |
166d3c79 | 762 | /* log10(ULONG_MAX) + '\0' */ |
4526d067 | 763 | char usecs_str[21]; |
166d3c79 | 764 | char nsecs_str[5]; |
9d9add34 | 765 | int len; |
166d3c79 FW |
766 | int i; |
767 | ||
4526d067 | 768 | sprintf(usecs_str, "%lu", (unsigned long) duration); |
166d3c79 FW |
769 | |
770 | /* Print msecs */ | |
9d9add34 | 771 | trace_seq_printf(s, "%s", usecs_str); |
166d3c79 | 772 | |
4526d067 | 773 | len = strlen(usecs_str); |
166d3c79 FW |
774 | |
775 | /* Print nsecs (we don't want to exceed 7 numbers) */ | |
776 | if (len < 7) { | |
14cae9bd BP |
777 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
778 | ||
779 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | |
9d9add34 | 780 | trace_seq_printf(s, ".%s", nsecs_str); |
82c355e8 | 781 | len += strlen(nsecs_str) + 1; |
166d3c79 FW |
782 | } |
783 | ||
9d9add34 | 784 | trace_seq_puts(s, " us "); |
166d3c79 FW |
785 | |
786 | /* Print remaining spaces to fit the row's width */ | |
82c355e8 | 787 | for (i = len; i < 8; i++) |
9d9add34 | 788 | trace_seq_putc(s, ' '); |
0706f1c4 SR |
789 | } |
790 | ||
9d9add34 | 791 | static void |
983f938a SRRH |
792 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
793 | struct trace_seq *s, u32 flags) | |
0706f1c4 | 794 | { |
749230b0 | 795 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
983f938a | 796 | !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 797 | return; |
ffeb80fc JO |
798 | |
799 | /* No real adata, just filling the column with spaces */ | |
6fc84ea7 SRRH |
800 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { |
801 | case FLAGS_FILL_FULL: | |
9d9add34 SRRH |
802 | trace_seq_puts(s, " | "); |
803 | return; | |
6fc84ea7 | 804 | case FLAGS_FILL_START: |
9d9add34 SRRH |
805 | trace_seq_puts(s, " "); |
806 | return; | |
6fc84ea7 | 807 | case FLAGS_FILL_END: |
9d9add34 SRRH |
808 | trace_seq_puts(s, " |"); |
809 | return; | |
ffeb80fc JO |
810 | } |
811 | ||
812 | /* Signal a overhead of time execution to the output */ | |
8e1e1df2 BP |
813 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) |
814 | trace_seq_printf(s, "%c ", trace_find_mark(duration)); | |
815 | else | |
9d9add34 | 816 | trace_seq_puts(s, " "); |
0706f1c4 | 817 | |
9d9add34 SRRH |
818 | trace_print_graph_duration(duration, s); |
819 | trace_seq_puts(s, "| "); | |
83a8df61 FW |
820 | } |
821 | ||
83a8df61 | 822 | /* Case of a leaf function on its call entry */ |
287b6e68 | 823 | static enum print_line_t |
83a8df61 | 824 | print_graph_entry_leaf(struct trace_iterator *iter, |
b91facc3 | 825 | struct ftrace_graph_ent_entry *entry, |
d7a8d9e9 JO |
826 | struct ftrace_graph_ret_entry *ret_entry, |
827 | struct trace_seq *s, u32 flags) | |
fb52607a | 828 | { |
2fbcdb35 | 829 | struct fgraph_data *data = iter->private; |
983f938a | 830 | struct trace_array *tr = iter->tr; |
83a8df61 | 831 | struct ftrace_graph_ret *graph_ret; |
83a8df61 FW |
832 | struct ftrace_graph_ent *call; |
833 | unsigned long long duration; | |
1a056155 | 834 | int i; |
fb52607a | 835 | |
83a8df61 FW |
836 | graph_ret = &ret_entry->ret; |
837 | call = &entry->graph_ent; | |
838 | duration = graph_ret->rettime - graph_ret->calltime; | |
839 | ||
2fbcdb35 | 840 | if (data) { |
f1c7f517 | 841 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 842 | int cpu = iter->cpu; |
f1c7f517 SR |
843 | |
844 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 | 845 | |
794de08a SRRH |
846 | /* If a graph tracer ignored set_graph_notrace */ |
847 | if (call->depth < -1) | |
848 | call->depth += FTRACE_NOTRACE_DEPTH; | |
849 | ||
2fbcdb35 SR |
850 | /* |
851 | * Comments display at + 1 to depth. Since | |
852 | * this is a leaf function, keep the comments | |
853 | * equal to this depth. | |
854 | */ | |
f1c7f517 SR |
855 | cpu_data->depth = call->depth - 1; |
856 | ||
857 | /* No need to keep this function around for this depth */ | |
794de08a SRRH |
858 | if (call->depth < FTRACE_RETFUNC_DEPTH && |
859 | !WARN_ON_ONCE(call->depth < 0)) | |
f1c7f517 | 860 | cpu_data->enter_funcs[call->depth] = 0; |
2fbcdb35 SR |
861 | } |
862 | ||
ffeb80fc | 863 | /* Overhead and duration */ |
983f938a | 864 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 865 | |
83a8df61 | 866 | /* Function */ |
9d9add34 SRRH |
867 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
868 | trace_seq_putc(s, ' '); | |
83a8df61 | 869 | |
9d9add34 | 870 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
83a8df61 | 871 | |
9d9add34 | 872 | return trace_handle_return(s); |
83a8df61 FW |
873 | } |
874 | ||
875 | static enum print_line_t | |
2fbcdb35 SR |
876 | print_graph_entry_nested(struct trace_iterator *iter, |
877 | struct ftrace_graph_ent_entry *entry, | |
d7a8d9e9 | 878 | struct trace_seq *s, int cpu, u32 flags) |
83a8df61 | 879 | { |
83a8df61 | 880 | struct ftrace_graph_ent *call = &entry->graph_ent; |
2fbcdb35 | 881 | struct fgraph_data *data = iter->private; |
983f938a | 882 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
883 | int i; |
884 | ||
885 | if (data) { | |
f1c7f517 | 886 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 887 | int cpu = iter->cpu; |
2fbcdb35 | 888 | |
794de08a SRRH |
889 | /* If a graph tracer ignored set_graph_notrace */ |
890 | if (call->depth < -1) | |
891 | call->depth += FTRACE_NOTRACE_DEPTH; | |
892 | ||
f1c7f517 SR |
893 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
894 | cpu_data->depth = call->depth; | |
895 | ||
896 | /* Save this function pointer to see if the exit matches */ | |
794de08a SRRH |
897 | if (call->depth < FTRACE_RETFUNC_DEPTH && |
898 | !WARN_ON_ONCE(call->depth < 0)) | |
f1c7f517 | 899 | cpu_data->enter_funcs[call->depth] = call->func; |
2fbcdb35 | 900 | } |
83a8df61 | 901 | |
9005f3eb | 902 | /* No time */ |
983f938a | 903 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
f8b755ac | 904 | |
83a8df61 | 905 | /* Function */ |
9d9add34 SRRH |
906 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
907 | trace_seq_putc(s, ' '); | |
908 | ||
909 | trace_seq_printf(s, "%ps() {\n", (void *)call->func); | |
287b6e68 | 910 | |
9d9add34 | 911 | if (trace_seq_has_overflowed(s)) |
83a8df61 FW |
912 | return TRACE_TYPE_PARTIAL_LINE; |
913 | ||
b91facc3 FW |
914 | /* |
915 | * we already consumed the current entry to check the next one | |
916 | * and see if this is a leaf. | |
917 | */ | |
918 | return TRACE_TYPE_NO_CONSUME; | |
287b6e68 FW |
919 | } |
920 | ||
9d9add34 | 921 | static void |
ac5f6c96 | 922 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
d7a8d9e9 | 923 | int type, unsigned long addr, u32 flags) |
83a8df61 | 924 | { |
2fbcdb35 | 925 | struct fgraph_data *data = iter->private; |
83a8df61 | 926 | struct trace_entry *ent = iter->ent; |
983f938a | 927 | struct trace_array *tr = iter->tr; |
ac5f6c96 | 928 | int cpu = iter->cpu; |
83a8df61 | 929 | |
1a056155 | 930 | /* Pid */ |
9d9add34 | 931 | verif_pid(s, ent->pid, cpu, data); |
9005f3eb | 932 | |
9d9add34 | 933 | if (type) |
ac5f6c96 | 934 | /* Interrupt */ |
9d9add34 | 935 | print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
83a8df61 | 936 | |
983f938a | 937 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 938 | return; |
749230b0 | 939 | |
9005f3eb | 940 | /* Absolute time */ |
9d9add34 SRRH |
941 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
942 | print_graph_abs_time(iter->ts, s); | |
9005f3eb | 943 | |
1a056155 | 944 | /* Cpu */ |
9d9add34 SRRH |
945 | if (flags & TRACE_GRAPH_PRINT_CPU) |
946 | print_graph_cpu(s, cpu); | |
11e84acc FW |
947 | |
948 | /* Proc */ | |
d7a8d9e9 | 949 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
9d9add34 SRRH |
950 | print_graph_proc(s, ent->pid); |
951 | trace_seq_puts(s, " | "); | |
1a056155 | 952 | } |
83a8df61 | 953 | |
49ff5903 | 954 | /* Latency format */ |
983f938a | 955 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 956 | print_graph_lat_fmt(s, ent); |
49ff5903 | 957 | |
9d9add34 | 958 | return; |
ac5f6c96 SR |
959 | } |
960 | ||
2bd16212 JO |
961 | /* |
962 | * Entry check for irq code | |
963 | * | |
964 | * returns 1 if | |
965 | * - we are inside irq code | |
25985edc | 966 | * - we just entered irq code |
2bd16212 JO |
967 | * |
968 | * retunns 0 if | |
969 | * - funcgraph-interrupts option is set | |
970 | * - we are not inside irq code | |
971 | */ | |
972 | static int | |
973 | check_irq_entry(struct trace_iterator *iter, u32 flags, | |
974 | unsigned long addr, int depth) | |
975 | { | |
976 | int cpu = iter->cpu; | |
a9d61173 | 977 | int *depth_irq; |
2bd16212 | 978 | struct fgraph_data *data = iter->private; |
2bd16212 | 979 | |
a9d61173 JO |
980 | /* |
981 | * If we are either displaying irqs, or we got called as | |
982 | * a graph event and private data does not exist, | |
983 | * then we bypass the irq check. | |
984 | */ | |
985 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
986 | (!data)) | |
2bd16212 JO |
987 | return 0; |
988 | ||
a9d61173 JO |
989 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
990 | ||
2bd16212 JO |
991 | /* |
992 | * We are inside the irq code | |
993 | */ | |
994 | if (*depth_irq >= 0) | |
995 | return 1; | |
996 | ||
997 | if ((addr < (unsigned long)__irqentry_text_start) || | |
998 | (addr >= (unsigned long)__irqentry_text_end)) | |
999 | return 0; | |
1000 | ||
1001 | /* | |
1002 | * We are entering irq code. | |
1003 | */ | |
1004 | *depth_irq = depth; | |
1005 | return 1; | |
1006 | } | |
1007 | ||
1008 | /* | |
1009 | * Return check for irq code | |
1010 | * | |
1011 | * returns 1 if | |
1012 | * - we are inside irq code | |
1013 | * - we just left irq code | |
1014 | * | |
1015 | * returns 0 if | |
1016 | * - funcgraph-interrupts option is set | |
1017 | * - we are not inside irq code | |
1018 | */ | |
1019 | static int | |
1020 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | |
1021 | { | |
1022 | int cpu = iter->cpu; | |
a9d61173 | 1023 | int *depth_irq; |
2bd16212 | 1024 | struct fgraph_data *data = iter->private; |
2bd16212 | 1025 | |
a9d61173 JO |
1026 | /* |
1027 | * If we are either displaying irqs, or we got called as | |
1028 | * a graph event and private data does not exist, | |
1029 | * then we bypass the irq check. | |
1030 | */ | |
1031 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
1032 | (!data)) | |
2bd16212 JO |
1033 | return 0; |
1034 | ||
a9d61173 JO |
1035 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1036 | ||
2bd16212 JO |
1037 | /* |
1038 | * We are not inside the irq code. | |
1039 | */ | |
1040 | if (*depth_irq == -1) | |
1041 | return 0; | |
1042 | ||
1043 | /* | |
1044 | * We are inside the irq code, and this is returning entry. | |
1045 | * Let's not trace it and clear the entry depth, since | |
1046 | * we are out of irq code. | |
1047 | * | |
1048 | * This condition ensures that we 'leave the irq code' once | |
1049 | * we are out of the entry depth. Thus protecting us from | |
1050 | * the RETURN entry loss. | |
1051 | */ | |
1052 | if (*depth_irq >= depth) { | |
1053 | *depth_irq = -1; | |
1054 | return 1; | |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * We are inside the irq code, and this is not the entry. | |
1059 | */ | |
1060 | return 1; | |
1061 | } | |
1062 | ||
ac5f6c96 SR |
1063 | static enum print_line_t |
1064 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |
d7a8d9e9 | 1065 | struct trace_iterator *iter, u32 flags) |
ac5f6c96 | 1066 | { |
be1eca39 | 1067 | struct fgraph_data *data = iter->private; |
ac5f6c96 SR |
1068 | struct ftrace_graph_ent *call = &field->graph_ent; |
1069 | struct ftrace_graph_ret_entry *leaf_ret; | |
be1eca39 JO |
1070 | static enum print_line_t ret; |
1071 | int cpu = iter->cpu; | |
ac5f6c96 | 1072 | |
2bd16212 JO |
1073 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1074 | return TRACE_TYPE_HANDLED; | |
1075 | ||
9d9add34 | 1076 | print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); |
ac5f6c96 | 1077 | |
b91facc3 FW |
1078 | leaf_ret = get_return_for_leaf(iter, field); |
1079 | if (leaf_ret) | |
d7a8d9e9 | 1080 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
83a8df61 | 1081 | else |
d7a8d9e9 | 1082 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
83a8df61 | 1083 | |
be1eca39 JO |
1084 | if (data) { |
1085 | /* | |
1086 | * If we failed to write our output, then we need to make | |
1087 | * note of it. Because we already consumed our entry. | |
1088 | */ | |
1089 | if (s->full) { | |
1090 | data->failed = 1; | |
1091 | data->cpu = cpu; | |
1092 | } else | |
1093 | data->failed = 0; | |
1094 | } | |
1095 | ||
1096 | return ret; | |
83a8df61 FW |
1097 | } |
1098 | ||
287b6e68 FW |
1099 | static enum print_line_t |
1100 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |
d7a8d9e9 JO |
1101 | struct trace_entry *ent, struct trace_iterator *iter, |
1102 | u32 flags) | |
287b6e68 | 1103 | { |
83a8df61 | 1104 | unsigned long long duration = trace->rettime - trace->calltime; |
2fbcdb35 | 1105 | struct fgraph_data *data = iter->private; |
983f938a | 1106 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
1107 | pid_t pid = ent->pid; |
1108 | int cpu = iter->cpu; | |
f1c7f517 | 1109 | int func_match = 1; |
2fbcdb35 SR |
1110 | int i; |
1111 | ||
2bd16212 JO |
1112 | if (check_irq_return(iter, flags, trace->depth)) |
1113 | return TRACE_TYPE_HANDLED; | |
1114 | ||
2fbcdb35 | 1115 | if (data) { |
f1c7f517 SR |
1116 | struct fgraph_cpu_data *cpu_data; |
1117 | int cpu = iter->cpu; | |
1118 | ||
1119 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 SR |
1120 | |
1121 | /* | |
1122 | * Comments display at + 1 to depth. This is the | |
1123 | * return from a function, we now want the comments | |
1124 | * to display at the same level of the bracket. | |
1125 | */ | |
f1c7f517 SR |
1126 | cpu_data->depth = trace->depth - 1; |
1127 | ||
794de08a SRRH |
1128 | if (trace->depth < FTRACE_RETFUNC_DEPTH && |
1129 | !WARN_ON_ONCE(trace->depth < 0)) { | |
f1c7f517 SR |
1130 | if (cpu_data->enter_funcs[trace->depth] != trace->func) |
1131 | func_match = 0; | |
1132 | cpu_data->enter_funcs[trace->depth] = 0; | |
1133 | } | |
2fbcdb35 | 1134 | } |
287b6e68 | 1135 | |
9d9add34 | 1136 | print_graph_prologue(iter, s, 0, 0, flags); |
437f24fb | 1137 | |
ffeb80fc | 1138 | /* Overhead and duration */ |
983f938a | 1139 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 1140 | |
83a8df61 | 1141 | /* Closing brace */ |
9d9add34 SRRH |
1142 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) |
1143 | trace_seq_putc(s, ' '); | |
287b6e68 | 1144 | |
f1c7f517 SR |
1145 | /* |
1146 | * If the return function does not have a matching entry, | |
1147 | * then the entry was lost. Instead of just printing | |
1148 | * the '}' and letting the user guess what function this | |
607e3a29 RE |
1149 | * belongs to, write out the function name. Always do |
1150 | * that if the funcgraph-tail option is enabled. | |
f1c7f517 | 1151 | */ |
9d9add34 SRRH |
1152 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) |
1153 | trace_seq_puts(s, "}\n"); | |
1154 | else | |
1155 | trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | |
fb52607a | 1156 | |
83a8df61 | 1157 | /* Overrun */ |
9d9add34 SRRH |
1158 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) |
1159 | trace_seq_printf(s, " (Overruns: %lu)\n", | |
1160 | trace->overrun); | |
f8b755ac | 1161 | |
9d9add34 SRRH |
1162 | print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1163 | cpu, pid, flags); | |
f8b755ac | 1164 | |
9d9add34 | 1165 | return trace_handle_return(s); |
287b6e68 FW |
1166 | } |
1167 | ||
1fd8f2a3 | 1168 | static enum print_line_t |
d7a8d9e9 JO |
1169 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
1170 | struct trace_iterator *iter, u32 flags) | |
1fd8f2a3 | 1171 | { |
983f938a SRRH |
1172 | struct trace_array *tr = iter->tr; |
1173 | unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); | |
2fbcdb35 | 1174 | struct fgraph_data *data = iter->private; |
5087f8d2 | 1175 | struct trace_event *event; |
2fbcdb35 | 1176 | int depth = 0; |
1fd8f2a3 | 1177 | int ret; |
2fbcdb35 SR |
1178 | int i; |
1179 | ||
1180 | if (data) | |
be1eca39 | 1181 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
9005f3eb | 1182 | |
9d9add34 | 1183 | print_graph_prologue(iter, s, 0, 0, flags); |
d1f9cbd7 | 1184 | |
9005f3eb | 1185 | /* No time */ |
983f938a | 1186 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
1fd8f2a3 | 1187 | |
1fd8f2a3 | 1188 | /* Indentation */ |
2fbcdb35 | 1189 | if (depth > 0) |
9d9add34 SRRH |
1190 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) |
1191 | trace_seq_putc(s, ' '); | |
1fd8f2a3 FW |
1192 | |
1193 | /* The comment */ | |
9d9add34 | 1194 | trace_seq_puts(s, "/* "); |
769b0441 | 1195 | |
5087f8d2 | 1196 | switch (iter->ent->type) { |
613dccdf NK |
1197 | case TRACE_BPUTS: |
1198 | ret = trace_print_bputs_msg_only(iter); | |
1199 | if (ret != TRACE_TYPE_HANDLED) | |
1200 | return ret; | |
1201 | break; | |
5087f8d2 SR |
1202 | case TRACE_BPRINT: |
1203 | ret = trace_print_bprintk_msg_only(iter); | |
1204 | if (ret != TRACE_TYPE_HANDLED) | |
1205 | return ret; | |
1206 | break; | |
1207 | case TRACE_PRINT: | |
1208 | ret = trace_print_printk_msg_only(iter); | |
1209 | if (ret != TRACE_TYPE_HANDLED) | |
1210 | return ret; | |
1211 | break; | |
1212 | default: | |
1213 | event = ftrace_find_event(ent->type); | |
1214 | if (!event) | |
1215 | return TRACE_TYPE_UNHANDLED; | |
1216 | ||
a9a57763 | 1217 | ret = event->funcs->trace(iter, sym_flags, event); |
5087f8d2 SR |
1218 | if (ret != TRACE_TYPE_HANDLED) |
1219 | return ret; | |
1220 | } | |
1fd8f2a3 | 1221 | |
5ac48378 SRRH |
1222 | if (trace_seq_has_overflowed(s)) |
1223 | goto out; | |
1224 | ||
412d0bb5 | 1225 | /* Strip ending newline */ |
3a161d99 SRRH |
1226 | if (s->buffer[s->seq.len - 1] == '\n') { |
1227 | s->buffer[s->seq.len - 1] = '\0'; | |
1228 | s->seq.len--; | |
412d0bb5 FW |
1229 | } |
1230 | ||
9d9add34 | 1231 | trace_seq_puts(s, " */\n"); |
5ac48378 | 1232 | out: |
9d9add34 | 1233 | return trace_handle_return(s); |
1fd8f2a3 FW |
1234 | } |
1235 | ||
1236 | ||
287b6e68 | 1237 | enum print_line_t |
321e68b0 | 1238 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
287b6e68 | 1239 | { |
be1eca39 JO |
1240 | struct ftrace_graph_ent_entry *field; |
1241 | struct fgraph_data *data = iter->private; | |
287b6e68 | 1242 | struct trace_entry *entry = iter->ent; |
5087f8d2 | 1243 | struct trace_seq *s = &iter->seq; |
be1eca39 JO |
1244 | int cpu = iter->cpu; |
1245 | int ret; | |
1246 | ||
1247 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | |
1248 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | |
1249 | return TRACE_TYPE_HANDLED; | |
1250 | } | |
1251 | ||
1252 | /* | |
1253 | * If the last output failed, there's a possibility we need | |
1254 | * to print out the missing entry which would never go out. | |
1255 | */ | |
1256 | if (data && data->failed) { | |
1257 | field = &data->ent; | |
1258 | iter->cpu = data->cpu; | |
d7a8d9e9 | 1259 | ret = print_graph_entry(field, s, iter, flags); |
be1eca39 JO |
1260 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1261 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | |
1262 | ret = TRACE_TYPE_NO_CONSUME; | |
1263 | } | |
1264 | iter->cpu = cpu; | |
1265 | return ret; | |
1266 | } | |
fb52607a | 1267 | |
287b6e68 FW |
1268 | switch (entry->type) { |
1269 | case TRACE_GRAPH_ENT: { | |
38ceb592 LJ |
1270 | /* |
1271 | * print_graph_entry() may consume the current event, | |
1272 | * thus @field may become invalid, so we need to save it. | |
1273 | * sizeof(struct ftrace_graph_ent_entry) is very small, | |
1274 | * it can be safely saved at the stack. | |
1275 | */ | |
be1eca39 | 1276 | struct ftrace_graph_ent_entry saved; |
287b6e68 | 1277 | trace_assign_type(field, entry); |
38ceb592 | 1278 | saved = *field; |
d7a8d9e9 | 1279 | return print_graph_entry(&saved, s, iter, flags); |
287b6e68 FW |
1280 | } |
1281 | case TRACE_GRAPH_RET: { | |
1282 | struct ftrace_graph_ret_entry *field; | |
1283 | trace_assign_type(field, entry); | |
d7a8d9e9 | 1284 | return print_graph_return(&field->ret, s, entry, iter, flags); |
287b6e68 | 1285 | } |
62b915f1 JO |
1286 | case TRACE_STACK: |
1287 | case TRACE_FN: | |
1288 | /* dont trace stack and functions as comments */ | |
1289 | return TRACE_TYPE_UNHANDLED; | |
1290 | ||
287b6e68 | 1291 | default: |
d7a8d9e9 | 1292 | return print_graph_comment(s, entry, iter, flags); |
fb52607a | 1293 | } |
5087f8d2 SR |
1294 | |
1295 | return TRACE_TYPE_HANDLED; | |
fb52607a FW |
1296 | } |
1297 | ||
d7a8d9e9 JO |
1298 | static enum print_line_t |
1299 | print_graph_function(struct trace_iterator *iter) | |
1300 | { | |
321e68b0 | 1301 | return print_graph_function_flags(iter, tracer_flags.val); |
d7a8d9e9 JO |
1302 | } |
1303 | ||
9106b693 | 1304 | static enum print_line_t |
a9a57763 SR |
1305 | print_graph_function_event(struct trace_iterator *iter, int flags, |
1306 | struct trace_event *event) | |
9106b693 JO |
1307 | { |
1308 | return print_graph_function(iter); | |
1309 | } | |
1310 | ||
d7a8d9e9 | 1311 | static void print_lat_header(struct seq_file *s, u32 flags) |
49ff5903 SR |
1312 | { |
1313 | static const char spaces[] = " " /* 16 spaces */ | |
1314 | " " /* 4 spaces */ | |
1315 | " "; /* 17 spaces */ | |
1316 | int size = 0; | |
1317 | ||
d7a8d9e9 | 1318 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
49ff5903 | 1319 | size += 16; |
d7a8d9e9 | 1320 | if (flags & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1321 | size += 4; |
d7a8d9e9 | 1322 | if (flags & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1323 | size += 17; |
1324 | ||
1325 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | |
1326 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | |
1327 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | |
1328 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | |
199abfab | 1329 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
49ff5903 SR |
1330 | } |
1331 | ||
983f938a SRRH |
1332 | static void __print_graph_headers_flags(struct trace_array *tr, |
1333 | struct seq_file *s, u32 flags) | |
decbec38 | 1334 | { |
983f938a | 1335 | int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; |
49ff5903 SR |
1336 | |
1337 | if (lat) | |
d7a8d9e9 | 1338 | print_lat_header(s, flags); |
49ff5903 | 1339 | |
decbec38 | 1340 | /* 1st line */ |
1177e436 | 1341 | seq_putc(s, '#'); |
d7a8d9e9 | 1342 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1343 | seq_puts(s, " TIME "); |
d7a8d9e9 | 1344 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1345 | seq_puts(s, " CPU"); |
d7a8d9e9 | 1346 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1347 | seq_puts(s, " TASK/PID "); |
49ff5903 | 1348 | if (lat) |
fa6f0cc7 | 1349 | seq_puts(s, "||||"); |
d7a8d9e9 | 1350 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1351 | seq_puts(s, " DURATION "); |
1352 | seq_puts(s, " FUNCTION CALLS\n"); | |
decbec38 FW |
1353 | |
1354 | /* 2nd line */ | |
1177e436 | 1355 | seq_putc(s, '#'); |
d7a8d9e9 | 1356 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1357 | seq_puts(s, " | "); |
d7a8d9e9 | 1358 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1359 | seq_puts(s, " | "); |
d7a8d9e9 | 1360 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1361 | seq_puts(s, " | | "); |
49ff5903 | 1362 | if (lat) |
fa6f0cc7 | 1363 | seq_puts(s, "||||"); |
d7a8d9e9 | 1364 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1365 | seq_puts(s, " | | "); |
1366 | seq_puts(s, " | | | |\n"); | |
decbec38 | 1367 | } |
9005f3eb | 1368 | |
ba1afef6 | 1369 | static void print_graph_headers(struct seq_file *s) |
d7a8d9e9 JO |
1370 | { |
1371 | print_graph_headers_flags(s, tracer_flags.val); | |
1372 | } | |
1373 | ||
0a772620 JO |
1374 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1375 | { | |
1376 | struct trace_iterator *iter = s->private; | |
983f938a | 1377 | struct trace_array *tr = iter->tr; |
0a772620 | 1378 | |
983f938a | 1379 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
749230b0 JO |
1380 | return; |
1381 | ||
983f938a | 1382 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { |
0a772620 JO |
1383 | /* print nothing if the buffers are empty */ |
1384 | if (trace_empty(iter)) | |
1385 | return; | |
1386 | ||
1387 | print_trace_header(s, iter); | |
321e68b0 | 1388 | } |
0a772620 | 1389 | |
983f938a | 1390 | __print_graph_headers_flags(tr, s, flags); |
0a772620 JO |
1391 | } |
1392 | ||
62b915f1 | 1393 | void graph_trace_open(struct trace_iterator *iter) |
9005f3eb | 1394 | { |
2fbcdb35 | 1395 | /* pid and depth on the last trace processed */ |
be1eca39 | 1396 | struct fgraph_data *data; |
ef99b88b | 1397 | gfp_t gfpflags; |
9005f3eb FW |
1398 | int cpu; |
1399 | ||
be1eca39 JO |
1400 | iter->private = NULL; |
1401 | ||
ef99b88b RV |
1402 | /* We can be called in atomic context via ftrace_dump() */ |
1403 | gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; | |
1404 | ||
1405 | data = kzalloc(sizeof(*data), gfpflags); | |
2fbcdb35 | 1406 | if (!data) |
be1eca39 JO |
1407 | goto out_err; |
1408 | ||
ef99b88b | 1409 | data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); |
be1eca39 JO |
1410 | if (!data->cpu_data) |
1411 | goto out_err_free; | |
1412 | ||
1413 | for_each_possible_cpu(cpu) { | |
1414 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | |
1415 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | |
1416 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | |
2bd16212 JO |
1417 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1418 | ||
be1eca39 JO |
1419 | *pid = -1; |
1420 | *depth = 0; | |
1421 | *ignore = 0; | |
2bd16212 | 1422 | *depth_irq = -1; |
be1eca39 | 1423 | } |
9005f3eb | 1424 | |
2fbcdb35 | 1425 | iter->private = data; |
be1eca39 JO |
1426 | |
1427 | return; | |
1428 | ||
1429 | out_err_free: | |
1430 | kfree(data); | |
1431 | out_err: | |
a395d6a7 | 1432 | pr_warn("function graph tracer: not enough memory\n"); |
9005f3eb FW |
1433 | } |
1434 | ||
62b915f1 | 1435 | void graph_trace_close(struct trace_iterator *iter) |
9005f3eb | 1436 | { |
be1eca39 JO |
1437 | struct fgraph_data *data = iter->private; |
1438 | ||
1439 | if (data) { | |
1440 | free_percpu(data->cpu_data); | |
1441 | kfree(data); | |
1442 | } | |
9005f3eb FW |
1443 | } |
1444 | ||
8c1a49ae SRRH |
1445 | static int |
1446 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
b304d044 SR |
1447 | { |
1448 | if (bit == TRACE_GRAPH_PRINT_IRQS) | |
1449 | ftrace_graph_skip_irqs = !set; | |
1450 | ||
55577204 SRRH |
1451 | if (bit == TRACE_GRAPH_SLEEP_TIME) |
1452 | ftrace_graph_sleep_time_control(set); | |
1453 | ||
1454 | if (bit == TRACE_GRAPH_GRAPH_TIME) | |
1455 | ftrace_graph_graph_time_control(set); | |
1456 | ||
b304d044 SR |
1457 | return 0; |
1458 | } | |
1459 | ||
a9a57763 SR |
1460 | static struct trace_event_functions graph_functions = { |
1461 | .trace = print_graph_function_event, | |
1462 | }; | |
1463 | ||
9106b693 JO |
1464 | static struct trace_event graph_trace_entry_event = { |
1465 | .type = TRACE_GRAPH_ENT, | |
a9a57763 | 1466 | .funcs = &graph_functions, |
9106b693 JO |
1467 | }; |
1468 | ||
1469 | static struct trace_event graph_trace_ret_event = { | |
1470 | .type = TRACE_GRAPH_RET, | |
a9a57763 | 1471 | .funcs = &graph_functions |
9106b693 JO |
1472 | }; |
1473 | ||
8f768993 | 1474 | static struct tracer graph_trace __tracer_data = { |
ef18012b | 1475 | .name = "function_graph", |
6508fa76 | 1476 | .update_thresh = graph_trace_update_thresh, |
9005f3eb | 1477 | .open = graph_trace_open, |
be1eca39 | 1478 | .pipe_open = graph_trace_open, |
9005f3eb | 1479 | .close = graph_trace_close, |
be1eca39 | 1480 | .pipe_close = graph_trace_close, |
ef18012b SR |
1481 | .init = graph_trace_init, |
1482 | .reset = graph_trace_reset, | |
decbec38 FW |
1483 | .print_line = print_graph_function, |
1484 | .print_header = print_graph_headers, | |
fb52607a | 1485 | .flags = &tracer_flags, |
b304d044 | 1486 | .set_flag = func_graph_set_flag, |
7447dce9 FW |
1487 | #ifdef CONFIG_FTRACE_SELFTEST |
1488 | .selftest = trace_selftest_startup_function_graph, | |
1489 | #endif | |
fb52607a FW |
1490 | }; |
1491 | ||
8741db53 SR |
1492 | |
1493 | static ssize_t | |
1494 | graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
1495 | loff_t *ppos) | |
1496 | { | |
1497 | unsigned long val; | |
1498 | int ret; | |
1499 | ||
1500 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
1501 | if (ret) | |
1502 | return ret; | |
1503 | ||
1a414428 | 1504 | fgraph_max_depth = val; |
8741db53 SR |
1505 | |
1506 | *ppos += cnt; | |
1507 | ||
1508 | return cnt; | |
1509 | } | |
1510 | ||
1511 | static ssize_t | |
1512 | graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, | |
1513 | loff_t *ppos) | |
1514 | { | |
1515 | char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ | |
1516 | int n; | |
1517 | ||
1a414428 | 1518 | n = sprintf(buf, "%d\n", fgraph_max_depth); |
8741db53 SR |
1519 | |
1520 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); | |
1521 | } | |
1522 | ||
1523 | static const struct file_operations graph_depth_fops = { | |
1524 | .open = tracing_open_generic, | |
1525 | .write = graph_depth_write, | |
1526 | .read = graph_depth_read, | |
1527 | .llseek = generic_file_llseek, | |
1528 | }; | |
1529 | ||
8434dc93 | 1530 | static __init int init_graph_tracefs(void) |
8741db53 SR |
1531 | { |
1532 | struct dentry *d_tracer; | |
1533 | ||
1534 | d_tracer = tracing_init_dentry(); | |
14a5ae40 | 1535 | if (IS_ERR(d_tracer)) |
8741db53 SR |
1536 | return 0; |
1537 | ||
1538 | trace_create_file("max_graph_depth", 0644, d_tracer, | |
1539 | NULL, &graph_depth_fops); | |
1540 | ||
1541 | return 0; | |
1542 | } | |
8434dc93 | 1543 | fs_initcall(init_graph_tracefs); |
8741db53 | 1544 | |
fb52607a FW |
1545 | static __init int init_graph_trace(void) |
1546 | { | |
9b130ad5 | 1547 | max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); |
0c9e6f63 | 1548 | |
9023c930 | 1549 | if (!register_trace_event(&graph_trace_entry_event)) { |
a395d6a7 | 1550 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1551 | return 1; |
1552 | } | |
1553 | ||
9023c930 | 1554 | if (!register_trace_event(&graph_trace_ret_event)) { |
a395d6a7 | 1555 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1556 | return 1; |
1557 | } | |
1558 | ||
fb52607a FW |
1559 | return register_tracer(&graph_trace); |
1560 | } | |
1561 | ||
6f415672 | 1562 | core_initcall(init_graph_trace); |