]>
Commit | Line | Data |
---|---|---|
fb52607a FW |
1 | /* |
2 | * | |
3 | * Function graph tracer. | |
9005f3eb | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
fb52607a FW |
5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | |
7 | * | |
8 | */ | |
fb52607a FW |
9 | #include <linux/uaccess.h> |
10 | #include <linux/ftrace.h> | |
be7635e7 | 11 | #include <linux/interrupt.h> |
5a0e3ad6 | 12 | #include <linux/slab.h> |
fb52607a FW |
13 | #include <linux/fs.h> |
14 | ||
15 | #include "trace.h" | |
f0868d1e | 16 | #include "trace_output.h" |
fb52607a | 17 | |
1b2f121c SRRH |
18 | static bool kill_ftrace_graph; |
19 | ||
20 | /** | |
21 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called | |
22 | * | |
23 | * ftrace_graph_stop() is called when a severe error is detected in | |
24 | * the function graph tracing. This function is called by the critical | |
25 | * paths of function graph to keep those paths from doing any more harm. | |
26 | */ | |
27 | bool ftrace_graph_is_dead(void) | |
28 | { | |
29 | return kill_ftrace_graph; | |
30 | } | |
31 | ||
32 | /** | |
33 | * ftrace_graph_stop - set to permanently disable function graph tracincg | |
34 | * | |
35 | * In case of an error int function graph tracing, this is called | |
36 | * to try to keep function graph tracing from causing any more harm. | |
37 | * Usually this is pretty severe and this is called to try to at least | |
38 | * get a warning out to the user. | |
39 | */ | |
40 | void ftrace_graph_stop(void) | |
41 | { | |
42 | kill_ftrace_graph = true; | |
1b2f121c SRRH |
43 | } |
44 | ||
b304d044 SR |
45 | /* When set, irq functions will be ignored */ |
46 | static int ftrace_graph_skip_irqs; | |
47 | ||
be1eca39 | 48 | struct fgraph_cpu_data { |
2fbcdb35 SR |
49 | pid_t last_pid; |
50 | int depth; | |
2bd16212 | 51 | int depth_irq; |
be1eca39 | 52 | int ignore; |
f1c7f517 | 53 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
be1eca39 JO |
54 | }; |
55 | ||
56 | struct fgraph_data { | |
6016ee13 | 57 | struct fgraph_cpu_data __percpu *cpu_data; |
be1eca39 JO |
58 | |
59 | /* Place to preserve last processed entry. */ | |
60 | struct ftrace_graph_ent_entry ent; | |
61 | struct ftrace_graph_ret_entry ret; | |
62 | int failed; | |
63 | int cpu; | |
2fbcdb35 SR |
64 | }; |
65 | ||
287b6e68 | 66 | #define TRACE_GRAPH_INDENT 2 |
fb52607a | 67 | |
8741db53 SR |
68 | static unsigned int max_depth; |
69 | ||
fb52607a | 70 | static struct tracer_opt trace_opts[] = { |
9005f3eb | 71 | /* Display overruns? (for self-debug purpose) */ |
1a056155 FW |
72 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
73 | /* Display CPU ? */ | |
74 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | |
75 | /* Display Overhead ? */ | |
76 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | |
11e84acc FW |
77 | /* Display proc name/pid */ |
78 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | |
9005f3eb FW |
79 | /* Display duration of execution */ |
80 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | |
81 | /* Display absolute time of an entry */ | |
82 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | |
2bd16212 JO |
83 | /* Display interrupts */ |
84 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | |
607e3a29 RE |
85 | /* Display function name after trailing } */ |
86 | { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, | |
55577204 SRRH |
87 | /* Include sleep time (scheduled out) between entry and return */ |
88 | { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, | |
89 | /* Include time within nested functions */ | |
90 | { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, | |
fb52607a FW |
91 | { } /* Empty entry */ |
92 | }; | |
93 | ||
94 | static struct tracer_flags tracer_flags = { | |
607e3a29 | 95 | /* Don't display overruns, proc, or tail by default */ |
9005f3eb | 96 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
55577204 SRRH |
97 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | |
98 | TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, | |
fb52607a FW |
99 | .opts = trace_opts |
100 | }; | |
101 | ||
1a0799a8 | 102 | static struct trace_array *graph_array; |
9005f3eb | 103 | |
ffeb80fc JO |
104 | /* |
105 | * DURATION column is being also used to display IRQ signs, | |
106 | * following values are used by print_graph_irq and others | |
107 | * to fill in space into DURATION column. | |
108 | */ | |
109 | enum { | |
6fc84ea7 SRRH |
110 | FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
111 | FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
112 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
ffeb80fc JO |
113 | }; |
114 | ||
9d9add34 | 115 | static void |
983f938a SRRH |
116 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
117 | struct trace_seq *s, u32 flags); | |
fb52607a | 118 | |
712406a6 SR |
119 | /* Add a function return address to the trace stack on thread info.*/ |
120 | int | |
71e308a2 | 121 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
9a7c348b | 122 | unsigned long frame_pointer, unsigned long *retp) |
712406a6 | 123 | { |
5d1a03dc | 124 | unsigned long long calltime; |
712406a6 SR |
125 | int index; |
126 | ||
1b2f121c SRRH |
127 | if (unlikely(ftrace_graph_is_dead())) |
128 | return -EBUSY; | |
129 | ||
712406a6 SR |
130 | if (!current->ret_stack) |
131 | return -EBUSY; | |
132 | ||
82310a32 SR |
133 | /* |
134 | * We must make sure the ret_stack is tested before we read | |
135 | * anything else. | |
136 | */ | |
137 | smp_rmb(); | |
138 | ||
712406a6 SR |
139 | /* The return trace stack is full */ |
140 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
141 | atomic_inc(¤t->trace_overrun); | |
142 | return -EBUSY; | |
143 | } | |
144 | ||
29ad23b0 NK |
145 | /* |
146 | * The curr_ret_stack is an index to ftrace return stack of | |
147 | * current task. Its value should be in [0, FTRACE_RETFUNC_ | |
148 | * DEPTH) when the function graph tracer is used. To support | |
149 | * filtering out specific functions, it makes the index | |
150 | * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) | |
151 | * so when it sees a negative index the ftrace will ignore | |
152 | * the record. And the index gets recovered when returning | |
153 | * from the filtered function by adding the FTRACE_NOTRACE_ | |
154 | * DEPTH and then it'll continue to record functions normally. | |
155 | * | |
156 | * The curr_ret_stack is initialized to -1 and get increased | |
157 | * in this function. So it can be less than -1 only if it was | |
158 | * filtered out via ftrace_graph_notrace_addr() which can be | |
8434dc93 | 159 | * set from set_graph_notrace file in tracefs by user. |
29ad23b0 NK |
160 | */ |
161 | if (current->curr_ret_stack < -1) | |
162 | return -EBUSY; | |
163 | ||
5d1a03dc SR |
164 | calltime = trace_clock_local(); |
165 | ||
712406a6 | 166 | index = ++current->curr_ret_stack; |
29ad23b0 NK |
167 | if (ftrace_graph_notrace_addr(func)) |
168 | current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; | |
712406a6 SR |
169 | barrier(); |
170 | current->ret_stack[index].ret = ret; | |
171 | current->ret_stack[index].func = func; | |
5d1a03dc | 172 | current->ret_stack[index].calltime = calltime; |
daa460a8 | 173 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 | 174 | current->ret_stack[index].fp = frame_pointer; |
9a7c348b JP |
175 | #endif |
176 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
177 | current->ret_stack[index].retp = retp; | |
daa460a8 | 178 | #endif |
29ad23b0 | 179 | *depth = current->curr_ret_stack; |
712406a6 SR |
180 | |
181 | return 0; | |
182 | } | |
183 | ||
184 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
a2a16d6a | 185 | static void |
71e308a2 SR |
186 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
187 | unsigned long frame_pointer) | |
712406a6 SR |
188 | { |
189 | int index; | |
190 | ||
191 | index = current->curr_ret_stack; | |
192 | ||
29ad23b0 NK |
193 | /* |
194 | * A negative index here means that it's just returned from a | |
195 | * notrace'd function. Recover index to get an original | |
196 | * return address. See ftrace_push_return_trace(). | |
197 | * | |
198 | * TODO: Need to check whether the stack gets corrupted. | |
199 | */ | |
200 | if (index < 0) | |
201 | index += FTRACE_NOTRACE_DEPTH; | |
202 | ||
203 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { | |
712406a6 SR |
204 | ftrace_graph_stop(); |
205 | WARN_ON(1); | |
206 | /* Might as well panic, otherwise we have no where to go */ | |
207 | *ret = (unsigned long)panic; | |
208 | return; | |
209 | } | |
210 | ||
e4a744ef | 211 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 SR |
212 | /* |
213 | * The arch may choose to record the frame pointer used | |
214 | * and check it here to make sure that it is what we expect it | |
215 | * to be. If gcc does not set the place holder of the return | |
216 | * address in the frame pointer, and does a copy instead, then | |
217 | * the function graph trace will fail. This test detects this | |
218 | * case. | |
219 | * | |
220 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
221 | * gcc do the above. | |
781d0624 SR |
222 | * |
223 | * Note, -mfentry does not use frame pointers, and this test | |
224 | * is not needed if CC_USING_FENTRY is set. | |
71e308a2 SR |
225 | */ |
226 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
227 | ftrace_graph_stop(); | |
228 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
b375a11a | 229 | " from func %ps return to %lx\n", |
71e308a2 SR |
230 | current->ret_stack[index].fp, |
231 | frame_pointer, | |
232 | (void *)current->ret_stack[index].func, | |
233 | current->ret_stack[index].ret); | |
234 | *ret = (unsigned long)panic; | |
235 | return; | |
236 | } | |
237 | #endif | |
238 | ||
712406a6 SR |
239 | *ret = current->ret_stack[index].ret; |
240 | trace->func = current->ret_stack[index].func; | |
241 | trace->calltime = current->ret_stack[index].calltime; | |
242 | trace->overrun = atomic_read(¤t->trace_overrun); | |
243 | trace->depth = index; | |
712406a6 SR |
244 | } |
245 | ||
246 | /* | |
247 | * Send the trace to the ring-buffer. | |
248 | * @return the original return address. | |
249 | */ | |
71e308a2 | 250 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
712406a6 SR |
251 | { |
252 | struct ftrace_graph_ret trace; | |
253 | unsigned long ret; | |
254 | ||
71e308a2 | 255 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
0012693a | 256 | trace.rettime = trace_clock_local(); |
a2a16d6a SR |
257 | barrier(); |
258 | current->curr_ret_stack--; | |
29ad23b0 NK |
259 | /* |
260 | * The curr_ret_stack can be less than -1 only if it was | |
261 | * filtered out and it's about to return from the function. | |
262 | * Recover the index and continue to trace normal functions. | |
263 | */ | |
264 | if (current->curr_ret_stack < -1) { | |
265 | current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; | |
266 | return ret; | |
267 | } | |
712406a6 | 268 | |
03274a3f SRRH |
269 | /* |
270 | * The trace should run after decrementing the ret counter | |
271 | * in case an interrupt were to come in. We don't want to | |
272 | * lose the interrupt if max_depth is set. | |
273 | */ | |
274 | ftrace_graph_return(&trace); | |
275 | ||
712406a6 SR |
276 | if (unlikely(!ret)) { |
277 | ftrace_graph_stop(); | |
278 | WARN_ON(1); | |
279 | /* Might as well panic. What else to do? */ | |
280 | ret = (unsigned long)panic; | |
281 | } | |
282 | ||
283 | return ret; | |
284 | } | |
285 | ||
223918e3 JP |
286 | /** |
287 | * ftrace_graph_ret_addr - convert a potentially modified stack return address | |
288 | * to its original value | |
289 | * | |
290 | * This function can be called by stack unwinding code to convert a found stack | |
291 | * return address ('ret') to its original value, in case the function graph | |
292 | * tracer has modified it to be 'return_to_handler'. If the address hasn't | |
293 | * been modified, the unchanged value of 'ret' is returned. | |
294 | * | |
295 | * 'idx' is a state variable which should be initialized by the caller to zero | |
296 | * before the first call. | |
297 | * | |
298 | * 'retp' is a pointer to the return address on the stack. It's ignored if | |
299 | * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined. | |
300 | */ | |
301 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
302 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
303 | unsigned long ret, unsigned long *retp) | |
304 | { | |
305 | int index = task->curr_ret_stack; | |
306 | int i; | |
307 | ||
308 | if (ret != (unsigned long)return_to_handler) | |
309 | return ret; | |
310 | ||
311 | if (index < -1) | |
312 | index += FTRACE_NOTRACE_DEPTH; | |
313 | ||
314 | if (index < 0) | |
315 | return ret; | |
316 | ||
317 | for (i = 0; i <= index; i++) | |
318 | if (task->ret_stack[i].retp == retp) | |
319 | return task->ret_stack[i].ret; | |
320 | ||
321 | return ret; | |
322 | } | |
323 | #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
324 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
325 | unsigned long ret, unsigned long *retp) | |
326 | { | |
327 | int task_idx; | |
328 | ||
329 | if (ret != (unsigned long)return_to_handler) | |
330 | return ret; | |
331 | ||
332 | task_idx = task->curr_ret_stack; | |
333 | ||
334 | if (!task->ret_stack || task_idx < *idx) | |
335 | return ret; | |
336 | ||
337 | task_idx -= *idx; | |
338 | (*idx)++; | |
339 | ||
340 | return task->ret_stack[task_idx].ret; | |
341 | } | |
342 | #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
343 | ||
62b915f1 | 344 | int __trace_graph_entry(struct trace_array *tr, |
1a0799a8 FW |
345 | struct ftrace_graph_ent *trace, |
346 | unsigned long flags, | |
347 | int pc) | |
348 | { | |
2425bcb9 | 349 | struct trace_event_call *call = &event_funcgraph_entry; |
1a0799a8 | 350 | struct ring_buffer_event *event; |
12883efb | 351 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
352 | struct ftrace_graph_ent_entry *entry; |
353 | ||
e77405ad | 354 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
1a0799a8 FW |
355 | sizeof(*entry), flags, pc); |
356 | if (!event) | |
357 | return 0; | |
358 | entry = ring_buffer_event_data(event); | |
359 | entry->graph_ent = *trace; | |
f306cc82 | 360 | if (!call_filter_check_discard(call, entry, buffer, event)) |
52ffabe3 | 361 | trace_buffer_unlock_commit_nostack(buffer, event); |
1a0799a8 FW |
362 | |
363 | return 1; | |
364 | } | |
365 | ||
b304d044 SR |
366 | static inline int ftrace_graph_ignore_irqs(void) |
367 | { | |
e4a3f541 | 368 | if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) |
b304d044 SR |
369 | return 0; |
370 | ||
371 | return in_irq(); | |
372 | } | |
373 | ||
1a0799a8 FW |
374 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
375 | { | |
376 | struct trace_array *tr = graph_array; | |
377 | struct trace_array_cpu *data; | |
378 | unsigned long flags; | |
379 | long disabled; | |
380 | int ret; | |
381 | int cpu; | |
382 | int pc; | |
383 | ||
345ddcc8 | 384 | if (!ftrace_trace_task(tr)) |
1a0799a8 FW |
385 | return 0; |
386 | ||
ea2c68a0 | 387 | /* trace it when it is-nested-in or is a function enabled. */ |
8741db53 | 388 | if ((!(trace->depth || ftrace_graph_addr(trace->func)) || |
29ad23b0 | 389 | ftrace_graph_ignore_irqs()) || (trace->depth < 0) || |
8741db53 | 390 | (max_depth && trace->depth >= max_depth)) |
1a0799a8 FW |
391 | return 0; |
392 | ||
29ad23b0 NK |
393 | /* |
394 | * Do not trace a function if it's filtered by set_graph_notrace. | |
395 | * Make the index of ret stack negative to indicate that it should | |
396 | * ignore further functions. But it needs its own ret stack entry | |
397 | * to recover the original index in order to continue tracing after | |
398 | * returning from the function. | |
399 | */ | |
400 | if (ftrace_graph_notrace_addr(trace->func)) | |
401 | return 1; | |
402 | ||
7fa8b717 JF |
403 | /* |
404 | * Stop here if tracing_threshold is set. We only write function return | |
405 | * events to the ring buffer. | |
406 | */ | |
407 | if (tracing_thresh) | |
408 | return 1; | |
409 | ||
1a0799a8 FW |
410 | local_irq_save(flags); |
411 | cpu = raw_smp_processor_id(); | |
12883efb | 412 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
413 | disabled = atomic_inc_return(&data->disabled); |
414 | if (likely(disabled == 1)) { | |
415 | pc = preempt_count(); | |
416 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
417 | } else { | |
418 | ret = 0; | |
419 | } | |
1a0799a8 FW |
420 | |
421 | atomic_dec(&data->disabled); | |
422 | local_irq_restore(flags); | |
423 | ||
424 | return ret; | |
425 | } | |
426 | ||
0a772620 JO |
427 | static void |
428 | __trace_graph_function(struct trace_array *tr, | |
429 | unsigned long ip, unsigned long flags, int pc) | |
430 | { | |
431 | u64 time = trace_clock_local(); | |
432 | struct ftrace_graph_ent ent = { | |
433 | .func = ip, | |
434 | .depth = 0, | |
435 | }; | |
436 | struct ftrace_graph_ret ret = { | |
437 | .func = ip, | |
438 | .depth = 0, | |
439 | .calltime = time, | |
440 | .rettime = time, | |
441 | }; | |
442 | ||
443 | __trace_graph_entry(tr, &ent, flags, pc); | |
444 | __trace_graph_return(tr, &ret, flags, pc); | |
445 | } | |
446 | ||
447 | void | |
448 | trace_graph_function(struct trace_array *tr, | |
449 | unsigned long ip, unsigned long parent_ip, | |
450 | unsigned long flags, int pc) | |
451 | { | |
0a772620 JO |
452 | __trace_graph_function(tr, ip, flags, pc); |
453 | } | |
454 | ||
62b915f1 | 455 | void __trace_graph_return(struct trace_array *tr, |
1a0799a8 FW |
456 | struct ftrace_graph_ret *trace, |
457 | unsigned long flags, | |
458 | int pc) | |
459 | { | |
2425bcb9 | 460 | struct trace_event_call *call = &event_funcgraph_exit; |
1a0799a8 | 461 | struct ring_buffer_event *event; |
12883efb | 462 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
463 | struct ftrace_graph_ret_entry *entry; |
464 | ||
e77405ad | 465 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
1a0799a8 FW |
466 | sizeof(*entry), flags, pc); |
467 | if (!event) | |
468 | return; | |
469 | entry = ring_buffer_event_data(event); | |
470 | entry->ret = *trace; | |
f306cc82 | 471 | if (!call_filter_check_discard(call, entry, buffer, event)) |
52ffabe3 | 472 | trace_buffer_unlock_commit_nostack(buffer, event); |
1a0799a8 FW |
473 | } |
474 | ||
475 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
476 | { | |
477 | struct trace_array *tr = graph_array; | |
478 | struct trace_array_cpu *data; | |
479 | unsigned long flags; | |
480 | long disabled; | |
481 | int cpu; | |
482 | int pc; | |
483 | ||
484 | local_irq_save(flags); | |
485 | cpu = raw_smp_processor_id(); | |
12883efb | 486 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
487 | disabled = atomic_inc_return(&data->disabled); |
488 | if (likely(disabled == 1)) { | |
489 | pc = preempt_count(); | |
490 | __trace_graph_return(tr, trace, flags, pc); | |
491 | } | |
1a0799a8 FW |
492 | atomic_dec(&data->disabled); |
493 | local_irq_restore(flags); | |
494 | } | |
495 | ||
24a53652 FW |
496 | void set_graph_array(struct trace_array *tr) |
497 | { | |
498 | graph_array = tr; | |
499 | ||
500 | /* Make graph_array visible before we start tracing */ | |
501 | ||
502 | smp_mb(); | |
503 | } | |
504 | ||
ba1afef6 | 505 | static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
0e950173 TB |
506 | { |
507 | if (tracing_thresh && | |
508 | (trace->rettime - trace->calltime < tracing_thresh)) | |
509 | return; | |
510 | else | |
511 | trace_graph_return(trace); | |
512 | } | |
513 | ||
fb52607a FW |
514 | static int graph_trace_init(struct trace_array *tr) |
515 | { | |
1a0799a8 FW |
516 | int ret; |
517 | ||
24a53652 | 518 | set_graph_array(tr); |
0e950173 TB |
519 | if (tracing_thresh) |
520 | ret = register_ftrace_graph(&trace_graph_thresh_return, | |
7fa8b717 | 521 | &trace_graph_entry); |
0e950173 TB |
522 | else |
523 | ret = register_ftrace_graph(&trace_graph_return, | |
524 | &trace_graph_entry); | |
660c7f9b SR |
525 | if (ret) |
526 | return ret; | |
527 | tracing_start_cmdline_record(); | |
528 | ||
529 | return 0; | |
fb52607a FW |
530 | } |
531 | ||
532 | static void graph_trace_reset(struct trace_array *tr) | |
533 | { | |
660c7f9b SR |
534 | tracing_stop_cmdline_record(); |
535 | unregister_ftrace_graph(); | |
fb52607a FW |
536 | } |
537 | ||
ba1afef6 | 538 | static int graph_trace_update_thresh(struct trace_array *tr) |
6508fa76 SF |
539 | { |
540 | graph_trace_reset(tr); | |
541 | return graph_trace_init(tr); | |
542 | } | |
543 | ||
0c9e6f63 | 544 | static int max_bytes_for_cpu; |
1a056155 | 545 | |
9d9add34 | 546 | static void print_graph_cpu(struct trace_seq *s, int cpu) |
1a056155 | 547 | { |
d51090b3 IM |
548 | /* |
549 | * Start with a space character - to make it stand out | |
550 | * to the right a bit when trace output is pasted into | |
551 | * email: | |
552 | */ | |
9d9add34 | 553 | trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
1a056155 FW |
554 | } |
555 | ||
11e84acc FW |
556 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
557 | ||
9d9add34 | 558 | static void print_graph_proc(struct trace_seq *s, pid_t pid) |
11e84acc | 559 | { |
4ca53085 | 560 | char comm[TASK_COMM_LEN]; |
11e84acc FW |
561 | /* sign + log10(MAX_INT) + '\0' */ |
562 | char pid_str[11]; | |
4ca53085 | 563 | int spaces = 0; |
4ca53085 SR |
564 | int len; |
565 | int i; | |
11e84acc | 566 | |
4ca53085 | 567 | trace_find_cmdline(pid, comm); |
11e84acc FW |
568 | comm[7] = '\0'; |
569 | sprintf(pid_str, "%d", pid); | |
570 | ||
571 | /* 1 stands for the "-" character */ | |
572 | len = strlen(comm) + strlen(pid_str) + 1; | |
573 | ||
574 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | |
575 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | |
576 | ||
577 | /* First spaces to align center */ | |
9d9add34 SRRH |
578 | for (i = 0; i < spaces / 2; i++) |
579 | trace_seq_putc(s, ' '); | |
11e84acc | 580 | |
9d9add34 | 581 | trace_seq_printf(s, "%s-%s", comm, pid_str); |
11e84acc FW |
582 | |
583 | /* Last spaces to align center */ | |
9d9add34 SRRH |
584 | for (i = 0; i < spaces - (spaces / 2); i++) |
585 | trace_seq_putc(s, ' '); | |
11e84acc FW |
586 | } |
587 | ||
1a056155 | 588 | |
9d9add34 | 589 | static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
49ff5903 | 590 | { |
9d9add34 SRRH |
591 | trace_seq_putc(s, ' '); |
592 | trace_print_lat_fmt(s, entry); | |
49ff5903 SR |
593 | } |
594 | ||
287b6e68 | 595 | /* If the pid changed since the last trace, output this event */ |
9d9add34 | 596 | static void |
2fbcdb35 | 597 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
287b6e68 | 598 | { |
d51090b3 | 599 | pid_t prev_pid; |
9005f3eb | 600 | pid_t *last_pid; |
660c7f9b | 601 | |
2fbcdb35 | 602 | if (!data) |
9d9add34 | 603 | return; |
9005f3eb | 604 | |
be1eca39 | 605 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
9005f3eb FW |
606 | |
607 | if (*last_pid == pid) | |
9d9add34 | 608 | return; |
fb52607a | 609 | |
9005f3eb FW |
610 | prev_pid = *last_pid; |
611 | *last_pid = pid; | |
d51090b3 | 612 | |
9005f3eb | 613 | if (prev_pid == -1) |
9d9add34 | 614 | return; |
d51090b3 IM |
615 | /* |
616 | * Context-switch trace line: | |
617 | ||
618 | ------------------------------------------ | |
619 | | 1) migration/0--1 => sshd-1755 | |
620 | ------------------------------------------ | |
621 | ||
622 | */ | |
9d9add34 SRRH |
623 | trace_seq_puts(s, " ------------------------------------------\n"); |
624 | print_graph_cpu(s, cpu); | |
625 | print_graph_proc(s, prev_pid); | |
626 | trace_seq_puts(s, " => "); | |
627 | print_graph_proc(s, pid); | |
628 | trace_seq_puts(s, "\n ------------------------------------------\n\n"); | |
287b6e68 FW |
629 | } |
630 | ||
b91facc3 FW |
631 | static struct ftrace_graph_ret_entry * |
632 | get_return_for_leaf(struct trace_iterator *iter, | |
83a8df61 FW |
633 | struct ftrace_graph_ent_entry *curr) |
634 | { | |
be1eca39 JO |
635 | struct fgraph_data *data = iter->private; |
636 | struct ring_buffer_iter *ring_iter = NULL; | |
83a8df61 FW |
637 | struct ring_buffer_event *event; |
638 | struct ftrace_graph_ret_entry *next; | |
639 | ||
be1eca39 JO |
640 | /* |
641 | * If the previous output failed to write to the seq buffer, | |
642 | * then we just reuse the data from before. | |
643 | */ | |
644 | if (data && data->failed) { | |
645 | curr = &data->ent; | |
646 | next = &data->ret; | |
647 | } else { | |
83a8df61 | 648 | |
6d158a81 | 649 | ring_iter = trace_buffer_iter(iter, iter->cpu); |
be1eca39 JO |
650 | |
651 | /* First peek to compare current entry and the next one */ | |
652 | if (ring_iter) | |
653 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
654 | else { | |
655 | /* | |
656 | * We need to consume the current entry to see | |
657 | * the next one. | |
658 | */ | |
12883efb | 659 | ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 660 | NULL, NULL); |
12883efb | 661 | event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 662 | NULL, NULL); |
be1eca39 | 663 | } |
83a8df61 | 664 | |
be1eca39 JO |
665 | if (!event) |
666 | return NULL; | |
667 | ||
668 | next = ring_buffer_event_data(event); | |
83a8df61 | 669 | |
be1eca39 JO |
670 | if (data) { |
671 | /* | |
672 | * Save current and next entries for later reference | |
673 | * if the output fails. | |
674 | */ | |
675 | data->ent = *curr; | |
575570f0 SL |
676 | /* |
677 | * If the next event is not a return type, then | |
678 | * we only care about what type it is. Otherwise we can | |
679 | * safely copy the entire event. | |
680 | */ | |
681 | if (next->ent.type == TRACE_GRAPH_RET) | |
682 | data->ret = *next; | |
683 | else | |
684 | data->ret.ent.type = next->ent.type; | |
be1eca39 JO |
685 | } |
686 | } | |
83a8df61 FW |
687 | |
688 | if (next->ent.type != TRACE_GRAPH_RET) | |
b91facc3 | 689 | return NULL; |
83a8df61 FW |
690 | |
691 | if (curr->ent.pid != next->ent.pid || | |
692 | curr->graph_ent.func != next->ret.func) | |
b91facc3 | 693 | return NULL; |
83a8df61 | 694 | |
b91facc3 FW |
695 | /* this is a leaf, now advance the iterator */ |
696 | if (ring_iter) | |
697 | ring_buffer_read(ring_iter, NULL); | |
698 | ||
699 | return next; | |
83a8df61 FW |
700 | } |
701 | ||
9d9add34 | 702 | static void print_graph_abs_time(u64 t, struct trace_seq *s) |
d1f9cbd7 FW |
703 | { |
704 | unsigned long usecs_rem; | |
705 | ||
706 | usecs_rem = do_div(t, NSEC_PER_SEC); | |
707 | usecs_rem /= 1000; | |
708 | ||
9d9add34 SRRH |
709 | trace_seq_printf(s, "%5lu.%06lu | ", |
710 | (unsigned long)t, usecs_rem); | |
d1f9cbd7 FW |
711 | } |
712 | ||
9d9add34 | 713 | static void |
d1f9cbd7 | 714 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
d7a8d9e9 | 715 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
f8b755ac | 716 | { |
983f938a | 717 | struct trace_array *tr = iter->tr; |
d1f9cbd7 | 718 | struct trace_seq *s = &iter->seq; |
678f845e | 719 | struct trace_entry *ent = iter->ent; |
f8b755ac FW |
720 | |
721 | if (addr < (unsigned long)__irqentry_text_start || | |
722 | addr >= (unsigned long)__irqentry_text_end) | |
9d9add34 | 723 | return; |
f8b755ac | 724 | |
983f938a | 725 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { |
749230b0 | 726 | /* Absolute time */ |
9d9add34 SRRH |
727 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
728 | print_graph_abs_time(iter->ts, s); | |
d1f9cbd7 | 729 | |
749230b0 | 730 | /* Cpu */ |
9d9add34 SRRH |
731 | if (flags & TRACE_GRAPH_PRINT_CPU) |
732 | print_graph_cpu(s, cpu); | |
49ff5903 | 733 | |
749230b0 JO |
734 | /* Proc */ |
735 | if (flags & TRACE_GRAPH_PRINT_PROC) { | |
9d9add34 SRRH |
736 | print_graph_proc(s, pid); |
737 | trace_seq_puts(s, " | "); | |
749230b0 | 738 | } |
678f845e DBO |
739 | |
740 | /* Latency format */ | |
983f938a | 741 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 742 | print_graph_lat_fmt(s, ent); |
9005f3eb | 743 | } |
f8b755ac | 744 | |
9005f3eb | 745 | /* No overhead */ |
983f938a | 746 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); |
f8b755ac | 747 | |
9005f3eb | 748 | if (type == TRACE_GRAPH_ENT) |
9d9add34 | 749 | trace_seq_puts(s, "==========>"); |
9005f3eb | 750 | else |
9d9add34 | 751 | trace_seq_puts(s, "<=========="); |
9005f3eb | 752 | |
983f938a | 753 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); |
9d9add34 | 754 | trace_seq_putc(s, '\n'); |
f8b755ac | 755 | } |
83a8df61 | 756 | |
9d9add34 | 757 | void |
0706f1c4 | 758 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
83a8df61 FW |
759 | { |
760 | unsigned long nsecs_rem = do_div(duration, 1000); | |
166d3c79 | 761 | /* log10(ULONG_MAX) + '\0' */ |
4526d067 | 762 | char usecs_str[21]; |
166d3c79 | 763 | char nsecs_str[5]; |
9d9add34 | 764 | int len; |
166d3c79 FW |
765 | int i; |
766 | ||
4526d067 | 767 | sprintf(usecs_str, "%lu", (unsigned long) duration); |
166d3c79 FW |
768 | |
769 | /* Print msecs */ | |
9d9add34 | 770 | trace_seq_printf(s, "%s", usecs_str); |
166d3c79 | 771 | |
4526d067 | 772 | len = strlen(usecs_str); |
166d3c79 FW |
773 | |
774 | /* Print nsecs (we don't want to exceed 7 numbers) */ | |
775 | if (len < 7) { | |
14cae9bd BP |
776 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
777 | ||
778 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | |
9d9add34 | 779 | trace_seq_printf(s, ".%s", nsecs_str); |
82c355e8 | 780 | len += strlen(nsecs_str) + 1; |
166d3c79 FW |
781 | } |
782 | ||
9d9add34 | 783 | trace_seq_puts(s, " us "); |
166d3c79 FW |
784 | |
785 | /* Print remaining spaces to fit the row's width */ | |
82c355e8 | 786 | for (i = len; i < 8; i++) |
9d9add34 | 787 | trace_seq_putc(s, ' '); |
0706f1c4 SR |
788 | } |
789 | ||
9d9add34 | 790 | static void |
983f938a SRRH |
791 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
792 | struct trace_seq *s, u32 flags) | |
0706f1c4 | 793 | { |
749230b0 | 794 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
983f938a | 795 | !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 796 | return; |
ffeb80fc JO |
797 | |
798 | /* No real adata, just filling the column with spaces */ | |
6fc84ea7 SRRH |
799 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { |
800 | case FLAGS_FILL_FULL: | |
9d9add34 SRRH |
801 | trace_seq_puts(s, " | "); |
802 | return; | |
6fc84ea7 | 803 | case FLAGS_FILL_START: |
9d9add34 SRRH |
804 | trace_seq_puts(s, " "); |
805 | return; | |
6fc84ea7 | 806 | case FLAGS_FILL_END: |
9d9add34 SRRH |
807 | trace_seq_puts(s, " |"); |
808 | return; | |
ffeb80fc JO |
809 | } |
810 | ||
811 | /* Signal a overhead of time execution to the output */ | |
8e1e1df2 BP |
812 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) |
813 | trace_seq_printf(s, "%c ", trace_find_mark(duration)); | |
814 | else | |
9d9add34 | 815 | trace_seq_puts(s, " "); |
0706f1c4 | 816 | |
9d9add34 SRRH |
817 | trace_print_graph_duration(duration, s); |
818 | trace_seq_puts(s, "| "); | |
83a8df61 FW |
819 | } |
820 | ||
83a8df61 | 821 | /* Case of a leaf function on its call entry */ |
287b6e68 | 822 | static enum print_line_t |
83a8df61 | 823 | print_graph_entry_leaf(struct trace_iterator *iter, |
b91facc3 | 824 | struct ftrace_graph_ent_entry *entry, |
d7a8d9e9 JO |
825 | struct ftrace_graph_ret_entry *ret_entry, |
826 | struct trace_seq *s, u32 flags) | |
fb52607a | 827 | { |
2fbcdb35 | 828 | struct fgraph_data *data = iter->private; |
983f938a | 829 | struct trace_array *tr = iter->tr; |
83a8df61 | 830 | struct ftrace_graph_ret *graph_ret; |
83a8df61 FW |
831 | struct ftrace_graph_ent *call; |
832 | unsigned long long duration; | |
1a056155 | 833 | int i; |
fb52607a | 834 | |
83a8df61 FW |
835 | graph_ret = &ret_entry->ret; |
836 | call = &entry->graph_ent; | |
837 | duration = graph_ret->rettime - graph_ret->calltime; | |
838 | ||
2fbcdb35 | 839 | if (data) { |
f1c7f517 | 840 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 841 | int cpu = iter->cpu; |
f1c7f517 SR |
842 | |
843 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 | 844 | |
794de08a SRRH |
845 | /* If a graph tracer ignored set_graph_notrace */ |
846 | if (call->depth < -1) | |
847 | call->depth += FTRACE_NOTRACE_DEPTH; | |
848 | ||
2fbcdb35 SR |
849 | /* |
850 | * Comments display at + 1 to depth. Since | |
851 | * this is a leaf function, keep the comments | |
852 | * equal to this depth. | |
853 | */ | |
f1c7f517 SR |
854 | cpu_data->depth = call->depth - 1; |
855 | ||
856 | /* No need to keep this function around for this depth */ | |
794de08a SRRH |
857 | if (call->depth < FTRACE_RETFUNC_DEPTH && |
858 | !WARN_ON_ONCE(call->depth < 0)) | |
f1c7f517 | 859 | cpu_data->enter_funcs[call->depth] = 0; |
2fbcdb35 SR |
860 | } |
861 | ||
ffeb80fc | 862 | /* Overhead and duration */ |
983f938a | 863 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 864 | |
83a8df61 | 865 | /* Function */ |
9d9add34 SRRH |
866 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
867 | trace_seq_putc(s, ' '); | |
83a8df61 | 868 | |
9d9add34 | 869 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
83a8df61 | 870 | |
9d9add34 | 871 | return trace_handle_return(s); |
83a8df61 FW |
872 | } |
873 | ||
874 | static enum print_line_t | |
2fbcdb35 SR |
875 | print_graph_entry_nested(struct trace_iterator *iter, |
876 | struct ftrace_graph_ent_entry *entry, | |
d7a8d9e9 | 877 | struct trace_seq *s, int cpu, u32 flags) |
83a8df61 | 878 | { |
83a8df61 | 879 | struct ftrace_graph_ent *call = &entry->graph_ent; |
2fbcdb35 | 880 | struct fgraph_data *data = iter->private; |
983f938a | 881 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
882 | int i; |
883 | ||
884 | if (data) { | |
f1c7f517 | 885 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 886 | int cpu = iter->cpu; |
2fbcdb35 | 887 | |
794de08a SRRH |
888 | /* If a graph tracer ignored set_graph_notrace */ |
889 | if (call->depth < -1) | |
890 | call->depth += FTRACE_NOTRACE_DEPTH; | |
891 | ||
f1c7f517 SR |
892 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
893 | cpu_data->depth = call->depth; | |
894 | ||
895 | /* Save this function pointer to see if the exit matches */ | |
794de08a SRRH |
896 | if (call->depth < FTRACE_RETFUNC_DEPTH && |
897 | !WARN_ON_ONCE(call->depth < 0)) | |
f1c7f517 | 898 | cpu_data->enter_funcs[call->depth] = call->func; |
2fbcdb35 | 899 | } |
83a8df61 | 900 | |
9005f3eb | 901 | /* No time */ |
983f938a | 902 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
f8b755ac | 903 | |
83a8df61 | 904 | /* Function */ |
9d9add34 SRRH |
905 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
906 | trace_seq_putc(s, ' '); | |
907 | ||
908 | trace_seq_printf(s, "%ps() {\n", (void *)call->func); | |
287b6e68 | 909 | |
9d9add34 | 910 | if (trace_seq_has_overflowed(s)) |
83a8df61 FW |
911 | return TRACE_TYPE_PARTIAL_LINE; |
912 | ||
b91facc3 FW |
913 | /* |
914 | * we already consumed the current entry to check the next one | |
915 | * and see if this is a leaf. | |
916 | */ | |
917 | return TRACE_TYPE_NO_CONSUME; | |
287b6e68 FW |
918 | } |
919 | ||
9d9add34 | 920 | static void |
ac5f6c96 | 921 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
d7a8d9e9 | 922 | int type, unsigned long addr, u32 flags) |
83a8df61 | 923 | { |
2fbcdb35 | 924 | struct fgraph_data *data = iter->private; |
83a8df61 | 925 | struct trace_entry *ent = iter->ent; |
983f938a | 926 | struct trace_array *tr = iter->tr; |
ac5f6c96 | 927 | int cpu = iter->cpu; |
83a8df61 | 928 | |
1a056155 | 929 | /* Pid */ |
9d9add34 | 930 | verif_pid(s, ent->pid, cpu, data); |
9005f3eb | 931 | |
9d9add34 | 932 | if (type) |
ac5f6c96 | 933 | /* Interrupt */ |
9d9add34 | 934 | print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
83a8df61 | 935 | |
983f938a | 936 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 937 | return; |
749230b0 | 938 | |
9005f3eb | 939 | /* Absolute time */ |
9d9add34 SRRH |
940 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
941 | print_graph_abs_time(iter->ts, s); | |
9005f3eb | 942 | |
1a056155 | 943 | /* Cpu */ |
9d9add34 SRRH |
944 | if (flags & TRACE_GRAPH_PRINT_CPU) |
945 | print_graph_cpu(s, cpu); | |
11e84acc FW |
946 | |
947 | /* Proc */ | |
d7a8d9e9 | 948 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
9d9add34 SRRH |
949 | print_graph_proc(s, ent->pid); |
950 | trace_seq_puts(s, " | "); | |
1a056155 | 951 | } |
83a8df61 | 952 | |
49ff5903 | 953 | /* Latency format */ |
983f938a | 954 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 955 | print_graph_lat_fmt(s, ent); |
49ff5903 | 956 | |
9d9add34 | 957 | return; |
ac5f6c96 SR |
958 | } |
959 | ||
2bd16212 JO |
960 | /* |
961 | * Entry check for irq code | |
962 | * | |
963 | * returns 1 if | |
964 | * - we are inside irq code | |
25985edc | 965 | * - we just entered irq code |
2bd16212 JO |
966 | * |
967 | * retunns 0 if | |
968 | * - funcgraph-interrupts option is set | |
969 | * - we are not inside irq code | |
970 | */ | |
971 | static int | |
972 | check_irq_entry(struct trace_iterator *iter, u32 flags, | |
973 | unsigned long addr, int depth) | |
974 | { | |
975 | int cpu = iter->cpu; | |
a9d61173 | 976 | int *depth_irq; |
2bd16212 | 977 | struct fgraph_data *data = iter->private; |
2bd16212 | 978 | |
a9d61173 JO |
979 | /* |
980 | * If we are either displaying irqs, or we got called as | |
981 | * a graph event and private data does not exist, | |
982 | * then we bypass the irq check. | |
983 | */ | |
984 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
985 | (!data)) | |
2bd16212 JO |
986 | return 0; |
987 | ||
a9d61173 JO |
988 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
989 | ||
2bd16212 JO |
990 | /* |
991 | * We are inside the irq code | |
992 | */ | |
993 | if (*depth_irq >= 0) | |
994 | return 1; | |
995 | ||
996 | if ((addr < (unsigned long)__irqentry_text_start) || | |
997 | (addr >= (unsigned long)__irqentry_text_end)) | |
998 | return 0; | |
999 | ||
1000 | /* | |
1001 | * We are entering irq code. | |
1002 | */ | |
1003 | *depth_irq = depth; | |
1004 | return 1; | |
1005 | } | |
1006 | ||
1007 | /* | |
1008 | * Return check for irq code | |
1009 | * | |
1010 | * returns 1 if | |
1011 | * - we are inside irq code | |
1012 | * - we just left irq code | |
1013 | * | |
1014 | * returns 0 if | |
1015 | * - funcgraph-interrupts option is set | |
1016 | * - we are not inside irq code | |
1017 | */ | |
1018 | static int | |
1019 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | |
1020 | { | |
1021 | int cpu = iter->cpu; | |
a9d61173 | 1022 | int *depth_irq; |
2bd16212 | 1023 | struct fgraph_data *data = iter->private; |
2bd16212 | 1024 | |
a9d61173 JO |
1025 | /* |
1026 | * If we are either displaying irqs, or we got called as | |
1027 | * a graph event and private data does not exist, | |
1028 | * then we bypass the irq check. | |
1029 | */ | |
1030 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
1031 | (!data)) | |
2bd16212 JO |
1032 | return 0; |
1033 | ||
a9d61173 JO |
1034 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1035 | ||
2bd16212 JO |
1036 | /* |
1037 | * We are not inside the irq code. | |
1038 | */ | |
1039 | if (*depth_irq == -1) | |
1040 | return 0; | |
1041 | ||
1042 | /* | |
1043 | * We are inside the irq code, and this is returning entry. | |
1044 | * Let's not trace it and clear the entry depth, since | |
1045 | * we are out of irq code. | |
1046 | * | |
1047 | * This condition ensures that we 'leave the irq code' once | |
1048 | * we are out of the entry depth. Thus protecting us from | |
1049 | * the RETURN entry loss. | |
1050 | */ | |
1051 | if (*depth_irq >= depth) { | |
1052 | *depth_irq = -1; | |
1053 | return 1; | |
1054 | } | |
1055 | ||
1056 | /* | |
1057 | * We are inside the irq code, and this is not the entry. | |
1058 | */ | |
1059 | return 1; | |
1060 | } | |
1061 | ||
ac5f6c96 SR |
1062 | static enum print_line_t |
1063 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |
d7a8d9e9 | 1064 | struct trace_iterator *iter, u32 flags) |
ac5f6c96 | 1065 | { |
be1eca39 | 1066 | struct fgraph_data *data = iter->private; |
ac5f6c96 SR |
1067 | struct ftrace_graph_ent *call = &field->graph_ent; |
1068 | struct ftrace_graph_ret_entry *leaf_ret; | |
be1eca39 JO |
1069 | static enum print_line_t ret; |
1070 | int cpu = iter->cpu; | |
ac5f6c96 | 1071 | |
2bd16212 JO |
1072 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1073 | return TRACE_TYPE_HANDLED; | |
1074 | ||
9d9add34 | 1075 | print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); |
ac5f6c96 | 1076 | |
b91facc3 FW |
1077 | leaf_ret = get_return_for_leaf(iter, field); |
1078 | if (leaf_ret) | |
d7a8d9e9 | 1079 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
83a8df61 | 1080 | else |
d7a8d9e9 | 1081 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
83a8df61 | 1082 | |
be1eca39 JO |
1083 | if (data) { |
1084 | /* | |
1085 | * If we failed to write our output, then we need to make | |
1086 | * note of it. Because we already consumed our entry. | |
1087 | */ | |
1088 | if (s->full) { | |
1089 | data->failed = 1; | |
1090 | data->cpu = cpu; | |
1091 | } else | |
1092 | data->failed = 0; | |
1093 | } | |
1094 | ||
1095 | return ret; | |
83a8df61 FW |
1096 | } |
1097 | ||
287b6e68 FW |
1098 | static enum print_line_t |
1099 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |
d7a8d9e9 JO |
1100 | struct trace_entry *ent, struct trace_iterator *iter, |
1101 | u32 flags) | |
287b6e68 | 1102 | { |
83a8df61 | 1103 | unsigned long long duration = trace->rettime - trace->calltime; |
2fbcdb35 | 1104 | struct fgraph_data *data = iter->private; |
983f938a | 1105 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
1106 | pid_t pid = ent->pid; |
1107 | int cpu = iter->cpu; | |
f1c7f517 | 1108 | int func_match = 1; |
2fbcdb35 SR |
1109 | int i; |
1110 | ||
2bd16212 JO |
1111 | if (check_irq_return(iter, flags, trace->depth)) |
1112 | return TRACE_TYPE_HANDLED; | |
1113 | ||
2fbcdb35 | 1114 | if (data) { |
f1c7f517 SR |
1115 | struct fgraph_cpu_data *cpu_data; |
1116 | int cpu = iter->cpu; | |
1117 | ||
1118 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 SR |
1119 | |
1120 | /* | |
1121 | * Comments display at + 1 to depth. This is the | |
1122 | * return from a function, we now want the comments | |
1123 | * to display at the same level of the bracket. | |
1124 | */ | |
f1c7f517 SR |
1125 | cpu_data->depth = trace->depth - 1; |
1126 | ||
794de08a SRRH |
1127 | if (trace->depth < FTRACE_RETFUNC_DEPTH && |
1128 | !WARN_ON_ONCE(trace->depth < 0)) { | |
f1c7f517 SR |
1129 | if (cpu_data->enter_funcs[trace->depth] != trace->func) |
1130 | func_match = 0; | |
1131 | cpu_data->enter_funcs[trace->depth] = 0; | |
1132 | } | |
2fbcdb35 | 1133 | } |
287b6e68 | 1134 | |
9d9add34 | 1135 | print_graph_prologue(iter, s, 0, 0, flags); |
437f24fb | 1136 | |
ffeb80fc | 1137 | /* Overhead and duration */ |
983f938a | 1138 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 1139 | |
83a8df61 | 1140 | /* Closing brace */ |
9d9add34 SRRH |
1141 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) |
1142 | trace_seq_putc(s, ' '); | |
287b6e68 | 1143 | |
f1c7f517 SR |
1144 | /* |
1145 | * If the return function does not have a matching entry, | |
1146 | * then the entry was lost. Instead of just printing | |
1147 | * the '}' and letting the user guess what function this | |
607e3a29 RE |
1148 | * belongs to, write out the function name. Always do |
1149 | * that if the funcgraph-tail option is enabled. | |
f1c7f517 | 1150 | */ |
9d9add34 SRRH |
1151 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) |
1152 | trace_seq_puts(s, "}\n"); | |
1153 | else | |
1154 | trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | |
fb52607a | 1155 | |
83a8df61 | 1156 | /* Overrun */ |
9d9add34 SRRH |
1157 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) |
1158 | trace_seq_printf(s, " (Overruns: %lu)\n", | |
1159 | trace->overrun); | |
f8b755ac | 1160 | |
9d9add34 SRRH |
1161 | print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1162 | cpu, pid, flags); | |
f8b755ac | 1163 | |
9d9add34 | 1164 | return trace_handle_return(s); |
287b6e68 FW |
1165 | } |
1166 | ||
1fd8f2a3 | 1167 | static enum print_line_t |
d7a8d9e9 JO |
1168 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
1169 | struct trace_iterator *iter, u32 flags) | |
1fd8f2a3 | 1170 | { |
983f938a SRRH |
1171 | struct trace_array *tr = iter->tr; |
1172 | unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); | |
2fbcdb35 | 1173 | struct fgraph_data *data = iter->private; |
5087f8d2 | 1174 | struct trace_event *event; |
2fbcdb35 | 1175 | int depth = 0; |
1fd8f2a3 | 1176 | int ret; |
2fbcdb35 SR |
1177 | int i; |
1178 | ||
1179 | if (data) | |
be1eca39 | 1180 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
9005f3eb | 1181 | |
9d9add34 | 1182 | print_graph_prologue(iter, s, 0, 0, flags); |
d1f9cbd7 | 1183 | |
9005f3eb | 1184 | /* No time */ |
983f938a | 1185 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
1fd8f2a3 | 1186 | |
1fd8f2a3 | 1187 | /* Indentation */ |
2fbcdb35 | 1188 | if (depth > 0) |
9d9add34 SRRH |
1189 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) |
1190 | trace_seq_putc(s, ' '); | |
1fd8f2a3 FW |
1191 | |
1192 | /* The comment */ | |
9d9add34 | 1193 | trace_seq_puts(s, "/* "); |
769b0441 | 1194 | |
5087f8d2 | 1195 | switch (iter->ent->type) { |
613dccdf NK |
1196 | case TRACE_BPUTS: |
1197 | ret = trace_print_bputs_msg_only(iter); | |
1198 | if (ret != TRACE_TYPE_HANDLED) | |
1199 | return ret; | |
1200 | break; | |
5087f8d2 SR |
1201 | case TRACE_BPRINT: |
1202 | ret = trace_print_bprintk_msg_only(iter); | |
1203 | if (ret != TRACE_TYPE_HANDLED) | |
1204 | return ret; | |
1205 | break; | |
1206 | case TRACE_PRINT: | |
1207 | ret = trace_print_printk_msg_only(iter); | |
1208 | if (ret != TRACE_TYPE_HANDLED) | |
1209 | return ret; | |
1210 | break; | |
1211 | default: | |
1212 | event = ftrace_find_event(ent->type); | |
1213 | if (!event) | |
1214 | return TRACE_TYPE_UNHANDLED; | |
1215 | ||
a9a57763 | 1216 | ret = event->funcs->trace(iter, sym_flags, event); |
5087f8d2 SR |
1217 | if (ret != TRACE_TYPE_HANDLED) |
1218 | return ret; | |
1219 | } | |
1fd8f2a3 | 1220 | |
5ac48378 SRRH |
1221 | if (trace_seq_has_overflowed(s)) |
1222 | goto out; | |
1223 | ||
412d0bb5 | 1224 | /* Strip ending newline */ |
3a161d99 SRRH |
1225 | if (s->buffer[s->seq.len - 1] == '\n') { |
1226 | s->buffer[s->seq.len - 1] = '\0'; | |
1227 | s->seq.len--; | |
412d0bb5 FW |
1228 | } |
1229 | ||
9d9add34 | 1230 | trace_seq_puts(s, " */\n"); |
5ac48378 | 1231 | out: |
9d9add34 | 1232 | return trace_handle_return(s); |
1fd8f2a3 FW |
1233 | } |
1234 | ||
1235 | ||
287b6e68 | 1236 | enum print_line_t |
321e68b0 | 1237 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
287b6e68 | 1238 | { |
be1eca39 JO |
1239 | struct ftrace_graph_ent_entry *field; |
1240 | struct fgraph_data *data = iter->private; | |
287b6e68 | 1241 | struct trace_entry *entry = iter->ent; |
5087f8d2 | 1242 | struct trace_seq *s = &iter->seq; |
be1eca39 JO |
1243 | int cpu = iter->cpu; |
1244 | int ret; | |
1245 | ||
1246 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | |
1247 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | |
1248 | return TRACE_TYPE_HANDLED; | |
1249 | } | |
1250 | ||
1251 | /* | |
1252 | * If the last output failed, there's a possibility we need | |
1253 | * to print out the missing entry which would never go out. | |
1254 | */ | |
1255 | if (data && data->failed) { | |
1256 | field = &data->ent; | |
1257 | iter->cpu = data->cpu; | |
d7a8d9e9 | 1258 | ret = print_graph_entry(field, s, iter, flags); |
be1eca39 JO |
1259 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1260 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | |
1261 | ret = TRACE_TYPE_NO_CONSUME; | |
1262 | } | |
1263 | iter->cpu = cpu; | |
1264 | return ret; | |
1265 | } | |
fb52607a | 1266 | |
287b6e68 FW |
1267 | switch (entry->type) { |
1268 | case TRACE_GRAPH_ENT: { | |
38ceb592 LJ |
1269 | /* |
1270 | * print_graph_entry() may consume the current event, | |
1271 | * thus @field may become invalid, so we need to save it. | |
1272 | * sizeof(struct ftrace_graph_ent_entry) is very small, | |
1273 | * it can be safely saved at the stack. | |
1274 | */ | |
be1eca39 | 1275 | struct ftrace_graph_ent_entry saved; |
287b6e68 | 1276 | trace_assign_type(field, entry); |
38ceb592 | 1277 | saved = *field; |
d7a8d9e9 | 1278 | return print_graph_entry(&saved, s, iter, flags); |
287b6e68 FW |
1279 | } |
1280 | case TRACE_GRAPH_RET: { | |
1281 | struct ftrace_graph_ret_entry *field; | |
1282 | trace_assign_type(field, entry); | |
d7a8d9e9 | 1283 | return print_graph_return(&field->ret, s, entry, iter, flags); |
287b6e68 | 1284 | } |
62b915f1 JO |
1285 | case TRACE_STACK: |
1286 | case TRACE_FN: | |
1287 | /* dont trace stack and functions as comments */ | |
1288 | return TRACE_TYPE_UNHANDLED; | |
1289 | ||
287b6e68 | 1290 | default: |
d7a8d9e9 | 1291 | return print_graph_comment(s, entry, iter, flags); |
fb52607a | 1292 | } |
5087f8d2 SR |
1293 | |
1294 | return TRACE_TYPE_HANDLED; | |
fb52607a FW |
1295 | } |
1296 | ||
d7a8d9e9 JO |
1297 | static enum print_line_t |
1298 | print_graph_function(struct trace_iterator *iter) | |
1299 | { | |
321e68b0 | 1300 | return print_graph_function_flags(iter, tracer_flags.val); |
d7a8d9e9 JO |
1301 | } |
1302 | ||
9106b693 | 1303 | static enum print_line_t |
a9a57763 SR |
1304 | print_graph_function_event(struct trace_iterator *iter, int flags, |
1305 | struct trace_event *event) | |
9106b693 JO |
1306 | { |
1307 | return print_graph_function(iter); | |
1308 | } | |
1309 | ||
d7a8d9e9 | 1310 | static void print_lat_header(struct seq_file *s, u32 flags) |
49ff5903 SR |
1311 | { |
1312 | static const char spaces[] = " " /* 16 spaces */ | |
1313 | " " /* 4 spaces */ | |
1314 | " "; /* 17 spaces */ | |
1315 | int size = 0; | |
1316 | ||
d7a8d9e9 | 1317 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
49ff5903 | 1318 | size += 16; |
d7a8d9e9 | 1319 | if (flags & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1320 | size += 4; |
d7a8d9e9 | 1321 | if (flags & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1322 | size += 17; |
1323 | ||
1324 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | |
1325 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | |
1326 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | |
1327 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | |
199abfab | 1328 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
49ff5903 SR |
1329 | } |
1330 | ||
983f938a SRRH |
1331 | static void __print_graph_headers_flags(struct trace_array *tr, |
1332 | struct seq_file *s, u32 flags) | |
decbec38 | 1333 | { |
983f938a | 1334 | int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; |
49ff5903 SR |
1335 | |
1336 | if (lat) | |
d7a8d9e9 | 1337 | print_lat_header(s, flags); |
49ff5903 | 1338 | |
decbec38 | 1339 | /* 1st line */ |
1177e436 | 1340 | seq_putc(s, '#'); |
d7a8d9e9 | 1341 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1342 | seq_puts(s, " TIME "); |
d7a8d9e9 | 1343 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1344 | seq_puts(s, " CPU"); |
d7a8d9e9 | 1345 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1346 | seq_puts(s, " TASK/PID "); |
49ff5903 | 1347 | if (lat) |
fa6f0cc7 | 1348 | seq_puts(s, "||||"); |
d7a8d9e9 | 1349 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1350 | seq_puts(s, " DURATION "); |
1351 | seq_puts(s, " FUNCTION CALLS\n"); | |
decbec38 FW |
1352 | |
1353 | /* 2nd line */ | |
1177e436 | 1354 | seq_putc(s, '#'); |
d7a8d9e9 | 1355 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1356 | seq_puts(s, " | "); |
d7a8d9e9 | 1357 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1358 | seq_puts(s, " | "); |
d7a8d9e9 | 1359 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1360 | seq_puts(s, " | | "); |
49ff5903 | 1361 | if (lat) |
fa6f0cc7 | 1362 | seq_puts(s, "||||"); |
d7a8d9e9 | 1363 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1364 | seq_puts(s, " | | "); |
1365 | seq_puts(s, " | | | |\n"); | |
decbec38 | 1366 | } |
9005f3eb | 1367 | |
ba1afef6 | 1368 | static void print_graph_headers(struct seq_file *s) |
d7a8d9e9 JO |
1369 | { |
1370 | print_graph_headers_flags(s, tracer_flags.val); | |
1371 | } | |
1372 | ||
0a772620 JO |
1373 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1374 | { | |
1375 | struct trace_iterator *iter = s->private; | |
983f938a | 1376 | struct trace_array *tr = iter->tr; |
0a772620 | 1377 | |
983f938a | 1378 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
749230b0 JO |
1379 | return; |
1380 | ||
983f938a | 1381 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { |
0a772620 JO |
1382 | /* print nothing if the buffers are empty */ |
1383 | if (trace_empty(iter)) | |
1384 | return; | |
1385 | ||
1386 | print_trace_header(s, iter); | |
321e68b0 | 1387 | } |
0a772620 | 1388 | |
983f938a | 1389 | __print_graph_headers_flags(tr, s, flags); |
0a772620 JO |
1390 | } |
1391 | ||
62b915f1 | 1392 | void graph_trace_open(struct trace_iterator *iter) |
9005f3eb | 1393 | { |
2fbcdb35 | 1394 | /* pid and depth on the last trace processed */ |
be1eca39 | 1395 | struct fgraph_data *data; |
ef99b88b | 1396 | gfp_t gfpflags; |
9005f3eb FW |
1397 | int cpu; |
1398 | ||
be1eca39 JO |
1399 | iter->private = NULL; |
1400 | ||
ef99b88b RV |
1401 | /* We can be called in atomic context via ftrace_dump() */ |
1402 | gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; | |
1403 | ||
1404 | data = kzalloc(sizeof(*data), gfpflags); | |
2fbcdb35 | 1405 | if (!data) |
be1eca39 JO |
1406 | goto out_err; |
1407 | ||
ef99b88b | 1408 | data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); |
be1eca39 JO |
1409 | if (!data->cpu_data) |
1410 | goto out_err_free; | |
1411 | ||
1412 | for_each_possible_cpu(cpu) { | |
1413 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | |
1414 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | |
1415 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | |
2bd16212 JO |
1416 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1417 | ||
be1eca39 JO |
1418 | *pid = -1; |
1419 | *depth = 0; | |
1420 | *ignore = 0; | |
2bd16212 | 1421 | *depth_irq = -1; |
be1eca39 | 1422 | } |
9005f3eb | 1423 | |
2fbcdb35 | 1424 | iter->private = data; |
be1eca39 JO |
1425 | |
1426 | return; | |
1427 | ||
1428 | out_err_free: | |
1429 | kfree(data); | |
1430 | out_err: | |
a395d6a7 | 1431 | pr_warn("function graph tracer: not enough memory\n"); |
9005f3eb FW |
1432 | } |
1433 | ||
62b915f1 | 1434 | void graph_trace_close(struct trace_iterator *iter) |
9005f3eb | 1435 | { |
be1eca39 JO |
1436 | struct fgraph_data *data = iter->private; |
1437 | ||
1438 | if (data) { | |
1439 | free_percpu(data->cpu_data); | |
1440 | kfree(data); | |
1441 | } | |
9005f3eb FW |
1442 | } |
1443 | ||
8c1a49ae SRRH |
1444 | static int |
1445 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
b304d044 SR |
1446 | { |
1447 | if (bit == TRACE_GRAPH_PRINT_IRQS) | |
1448 | ftrace_graph_skip_irqs = !set; | |
1449 | ||
55577204 SRRH |
1450 | if (bit == TRACE_GRAPH_SLEEP_TIME) |
1451 | ftrace_graph_sleep_time_control(set); | |
1452 | ||
1453 | if (bit == TRACE_GRAPH_GRAPH_TIME) | |
1454 | ftrace_graph_graph_time_control(set); | |
1455 | ||
b304d044 SR |
1456 | return 0; |
1457 | } | |
1458 | ||
a9a57763 SR |
1459 | static struct trace_event_functions graph_functions = { |
1460 | .trace = print_graph_function_event, | |
1461 | }; | |
1462 | ||
9106b693 JO |
1463 | static struct trace_event graph_trace_entry_event = { |
1464 | .type = TRACE_GRAPH_ENT, | |
a9a57763 | 1465 | .funcs = &graph_functions, |
9106b693 JO |
1466 | }; |
1467 | ||
1468 | static struct trace_event graph_trace_ret_event = { | |
1469 | .type = TRACE_GRAPH_RET, | |
a9a57763 | 1470 | .funcs = &graph_functions |
9106b693 JO |
1471 | }; |
1472 | ||
8f768993 | 1473 | static struct tracer graph_trace __tracer_data = { |
ef18012b | 1474 | .name = "function_graph", |
6508fa76 | 1475 | .update_thresh = graph_trace_update_thresh, |
9005f3eb | 1476 | .open = graph_trace_open, |
be1eca39 | 1477 | .pipe_open = graph_trace_open, |
9005f3eb | 1478 | .close = graph_trace_close, |
be1eca39 | 1479 | .pipe_close = graph_trace_close, |
ef18012b SR |
1480 | .init = graph_trace_init, |
1481 | .reset = graph_trace_reset, | |
decbec38 FW |
1482 | .print_line = print_graph_function, |
1483 | .print_header = print_graph_headers, | |
fb52607a | 1484 | .flags = &tracer_flags, |
b304d044 | 1485 | .set_flag = func_graph_set_flag, |
7447dce9 FW |
1486 | #ifdef CONFIG_FTRACE_SELFTEST |
1487 | .selftest = trace_selftest_startup_function_graph, | |
1488 | #endif | |
fb52607a FW |
1489 | }; |
1490 | ||
8741db53 SR |
1491 | |
1492 | static ssize_t | |
1493 | graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
1494 | loff_t *ppos) | |
1495 | { | |
1496 | unsigned long val; | |
1497 | int ret; | |
1498 | ||
1499 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
1500 | if (ret) | |
1501 | return ret; | |
1502 | ||
1503 | max_depth = val; | |
1504 | ||
1505 | *ppos += cnt; | |
1506 | ||
1507 | return cnt; | |
1508 | } | |
1509 | ||
1510 | static ssize_t | |
1511 | graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, | |
1512 | loff_t *ppos) | |
1513 | { | |
1514 | char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ | |
1515 | int n; | |
1516 | ||
1517 | n = sprintf(buf, "%d\n", max_depth); | |
1518 | ||
1519 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); | |
1520 | } | |
1521 | ||
1522 | static const struct file_operations graph_depth_fops = { | |
1523 | .open = tracing_open_generic, | |
1524 | .write = graph_depth_write, | |
1525 | .read = graph_depth_read, | |
1526 | .llseek = generic_file_llseek, | |
1527 | }; | |
1528 | ||
8434dc93 | 1529 | static __init int init_graph_tracefs(void) |
8741db53 SR |
1530 | { |
1531 | struct dentry *d_tracer; | |
1532 | ||
1533 | d_tracer = tracing_init_dentry(); | |
14a5ae40 | 1534 | if (IS_ERR(d_tracer)) |
8741db53 SR |
1535 | return 0; |
1536 | ||
1537 | trace_create_file("max_graph_depth", 0644, d_tracer, | |
1538 | NULL, &graph_depth_fops); | |
1539 | ||
1540 | return 0; | |
1541 | } | |
8434dc93 | 1542 | fs_initcall(init_graph_tracefs); |
8741db53 | 1543 | |
fb52607a FW |
1544 | static __init int init_graph_trace(void) |
1545 | { | |
0c9e6f63 LJ |
1546 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1547 | ||
9023c930 | 1548 | if (!register_trace_event(&graph_trace_entry_event)) { |
a395d6a7 | 1549 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1550 | return 1; |
1551 | } | |
1552 | ||
9023c930 | 1553 | if (!register_trace_event(&graph_trace_ret_event)) { |
a395d6a7 | 1554 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1555 | return 1; |
1556 | } | |
1557 | ||
fb52607a FW |
1558 | return register_tracer(&graph_trace); |
1559 | } | |
1560 | ||
6f415672 | 1561 | core_initcall(init_graph_trace); |