]>
Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | |
3 | ||
4 | #include <linux/fs.h> | |
5 | #include <asm/atomic.h> | |
6 | #include <linux/sched.h> | |
7 | #include <linux/clocksource.h> | |
3928a8a2 | 8 | #include <linux/ring_buffer.h> |
bd8ac686 | 9 | #include <linux/mmiotrace.h> |
d13744cd | 10 | #include <linux/ftrace.h> |
3f5ec136 | 11 | #include <trace/boot.h> |
36994e58 | 12 | #include <trace/kmemtrace.h> |
bc0c38d1 | 13 | |
72829bc3 TG |
14 | enum trace_type { |
15 | __TRACE_FIRST_TYPE = 0, | |
16 | ||
17 | TRACE_FN, | |
18 | TRACE_CTX, | |
19 | TRACE_WAKE, | |
20 | TRACE_STACK, | |
dd0e545f | 21 | TRACE_PRINT, |
72829bc3 | 22 | TRACE_SPECIAL, |
bd8ac686 PP |
23 | TRACE_MMIO_RW, |
24 | TRACE_MMIO_MAP, | |
9f029e83 | 25 | TRACE_BRANCH, |
74239072 FW |
26 | TRACE_BOOT_CALL, |
27 | TRACE_BOOT_RET, | |
287b6e68 FW |
28 | TRACE_GRAPH_RET, |
29 | TRACE_GRAPH_ENT, | |
02b67518 | 30 | TRACE_USER_STACK, |
a93751ca | 31 | TRACE_HW_BRANCHES, |
36994e58 FW |
32 | TRACE_KMEM_ALLOC, |
33 | TRACE_KMEM_FREE, | |
f3f47a67 | 34 | TRACE_POWER, |
72829bc3 | 35 | |
f0868d1e | 36 | __TRACE_LAST_TYPE, |
72829bc3 TG |
37 | }; |
38 | ||
777e208d SR |
39 | /* |
40 | * The trace entry - the most basic unit of tracing. This is what | |
41 | * is printed in the end as a single line in the trace output, such as: | |
42 | * | |
43 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | |
44 | */ | |
45 | struct trace_entry { | |
46 | unsigned char type; | |
47 | unsigned char cpu; | |
48 | unsigned char flags; | |
49 | unsigned char preempt_count; | |
50 | int pid; | |
02b67518 | 51 | int tgid; |
777e208d SR |
52 | }; |
53 | ||
bc0c38d1 SR |
54 | /* |
55 | * Function trace entry - function address and parent function addres: | |
56 | */ | |
57 | struct ftrace_entry { | |
777e208d | 58 | struct trace_entry ent; |
bc0c38d1 SR |
59 | unsigned long ip; |
60 | unsigned long parent_ip; | |
61 | }; | |
15e6cb36 | 62 | |
287b6e68 FW |
63 | /* Function call entry */ |
64 | struct ftrace_graph_ent_entry { | |
65 | struct trace_entry ent; | |
66 | struct ftrace_graph_ent graph_ent; | |
67 | }; | |
68 | ||
15e6cb36 | 69 | /* Function return entry */ |
287b6e68 FW |
70 | struct ftrace_graph_ret_entry { |
71 | struct trace_entry ent; | |
72 | struct ftrace_graph_ret ret; | |
15e6cb36 | 73 | }; |
d13744cd | 74 | extern struct tracer boot_tracer; |
bc0c38d1 SR |
75 | |
76 | /* | |
77 | * Context switch trace entry - which task (and prio) we switched from/to: | |
78 | */ | |
79 | struct ctx_switch_entry { | |
777e208d | 80 | struct trace_entry ent; |
bc0c38d1 SR |
81 | unsigned int prev_pid; |
82 | unsigned char prev_prio; | |
83 | unsigned char prev_state; | |
84 | unsigned int next_pid; | |
85 | unsigned char next_prio; | |
bac524d3 | 86 | unsigned char next_state; |
80b5e940 | 87 | unsigned int next_cpu; |
bc0c38d1 SR |
88 | }; |
89 | ||
f0a920d5 IM |
90 | /* |
91 | * Special (free-form) trace entry: | |
92 | */ | |
93 | struct special_entry { | |
777e208d | 94 | struct trace_entry ent; |
f0a920d5 IM |
95 | unsigned long arg1; |
96 | unsigned long arg2; | |
97 | unsigned long arg3; | |
98 | }; | |
99 | ||
86387f7e IM |
100 | /* |
101 | * Stack-trace entry: | |
102 | */ | |
103 | ||
74f4e369 | 104 | #define FTRACE_STACK_ENTRIES 8 |
86387f7e IM |
105 | |
106 | struct stack_entry { | |
777e208d | 107 | struct trace_entry ent; |
86387f7e IM |
108 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
109 | }; | |
110 | ||
02b67518 TE |
111 | struct userstack_entry { |
112 | struct trace_entry ent; | |
113 | unsigned long caller[FTRACE_STACK_ENTRIES]; | |
114 | }; | |
115 | ||
dd0e545f SR |
116 | /* |
117 | * ftrace_printk entry: | |
118 | */ | |
119 | struct print_entry { | |
777e208d | 120 | struct trace_entry ent; |
dd0e545f | 121 | unsigned long ip; |
1fd8f2a3 | 122 | int depth; |
dd0e545f SR |
123 | char buf[]; |
124 | }; | |
125 | ||
777e208d SR |
126 | #define TRACE_OLD_SIZE 88 |
127 | ||
128 | struct trace_field_cont { | |
129 | unsigned char type; | |
130 | /* Temporary till we get rid of this completely */ | |
131 | char buf[TRACE_OLD_SIZE - 1]; | |
132 | }; | |
133 | ||
134 | struct trace_mmiotrace_rw { | |
135 | struct trace_entry ent; | |
136 | struct mmiotrace_rw rw; | |
137 | }; | |
138 | ||
139 | struct trace_mmiotrace_map { | |
140 | struct trace_entry ent; | |
141 | struct mmiotrace_map map; | |
142 | }; | |
143 | ||
74239072 | 144 | struct trace_boot_call { |
777e208d | 145 | struct trace_entry ent; |
74239072 FW |
146 | struct boot_trace_call boot_call; |
147 | }; | |
148 | ||
149 | struct trace_boot_ret { | |
150 | struct trace_entry ent; | |
151 | struct boot_trace_ret boot_ret; | |
777e208d SR |
152 | }; |
153 | ||
52f232cb SR |
154 | #define TRACE_FUNC_SIZE 30 |
155 | #define TRACE_FILE_SIZE 20 | |
9f029e83 | 156 | struct trace_branch { |
52f232cb SR |
157 | struct trace_entry ent; |
158 | unsigned line; | |
159 | char func[TRACE_FUNC_SIZE+1]; | |
160 | char file[TRACE_FILE_SIZE+1]; | |
161 | char correct; | |
162 | }; | |
163 | ||
a93751ca | 164 | struct hw_branch_entry { |
1e9b51c2 | 165 | struct trace_entry ent; |
a93751ca MM |
166 | u64 from; |
167 | u64 to; | |
1e9b51c2 MM |
168 | }; |
169 | ||
f3f47a67 AV |
170 | struct trace_power { |
171 | struct trace_entry ent; | |
172 | struct power_trace state_data; | |
173 | }; | |
174 | ||
36994e58 FW |
175 | struct kmemtrace_alloc_entry { |
176 | struct trace_entry ent; | |
177 | enum kmemtrace_type_id type_id; | |
178 | unsigned long call_site; | |
179 | const void *ptr; | |
180 | size_t bytes_req; | |
181 | size_t bytes_alloc; | |
182 | gfp_t gfp_flags; | |
183 | int node; | |
184 | }; | |
185 | ||
186 | struct kmemtrace_free_entry { | |
187 | struct trace_entry ent; | |
188 | enum kmemtrace_type_id type_id; | |
189 | unsigned long call_site; | |
190 | const void *ptr; | |
191 | }; | |
192 | ||
fc5e27ae PP |
193 | /* |
194 | * trace_flag_type is an enumeration that holds different | |
195 | * states when a trace occurs. These are: | |
9244489a SR |
196 | * IRQS_OFF - interrupts were disabled |
197 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | |
198 | * NEED_RESCED - reschedule is requested | |
199 | * HARDIRQ - inside an interrupt handler | |
200 | * SOFTIRQ - inside a softirq handler | |
fc5e27ae PP |
201 | */ |
202 | enum trace_flag_type { | |
203 | TRACE_FLAG_IRQS_OFF = 0x01, | |
9244489a SR |
204 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
205 | TRACE_FLAG_NEED_RESCHED = 0x04, | |
206 | TRACE_FLAG_HARDIRQ = 0x08, | |
207 | TRACE_FLAG_SOFTIRQ = 0x10, | |
fc5e27ae PP |
208 | }; |
209 | ||
5bf9a1ee | 210 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 SR |
211 | |
212 | /* | |
213 | * The CPU trace array - it consists of thousands of trace entries | |
214 | * plus some other descriptor data: (for example which task started | |
215 | * the trace, etc.) | |
216 | */ | |
217 | struct trace_array_cpu { | |
bc0c38d1 | 218 | atomic_t disabled; |
4e3c3333 | 219 | |
c7aafc54 | 220 | /* these fields get copied into max-trace: */ |
c7aafc54 | 221 | unsigned long trace_idx; |
53d0aa77 | 222 | unsigned long overrun; |
bc0c38d1 SR |
223 | unsigned long saved_latency; |
224 | unsigned long critical_start; | |
225 | unsigned long critical_end; | |
226 | unsigned long critical_sequence; | |
227 | unsigned long nice; | |
228 | unsigned long policy; | |
229 | unsigned long rt_priority; | |
230 | cycle_t preempt_timestamp; | |
231 | pid_t pid; | |
232 | uid_t uid; | |
233 | char comm[TASK_COMM_LEN]; | |
234 | }; | |
235 | ||
236 | struct trace_iterator; | |
237 | ||
238 | /* | |
239 | * The trace array - an array of per-CPU trace arrays. This is the | |
240 | * highest level data structure that individual tracers deal with. | |
241 | * They have on/off state as well: | |
242 | */ | |
243 | struct trace_array { | |
3928a8a2 | 244 | struct ring_buffer *buffer; |
bc0c38d1 | 245 | unsigned long entries; |
bc0c38d1 SR |
246 | int cpu; |
247 | cycle_t time_start; | |
b3806b43 | 248 | struct task_struct *waiter; |
bc0c38d1 SR |
249 | struct trace_array_cpu *data[NR_CPUS]; |
250 | }; | |
251 | ||
7104f300 SR |
252 | #define FTRACE_CMP_TYPE(var, type) \ |
253 | __builtin_types_compatible_p(typeof(var), type *) | |
254 | ||
255 | #undef IF_ASSIGN | |
256 | #define IF_ASSIGN(var, entry, etype, id) \ | |
257 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
258 | var = (typeof(var))(entry); \ | |
259 | WARN_ON(id && (entry)->type != id); \ | |
260 | break; \ | |
261 | } | |
262 | ||
263 | /* Will cause compile errors if type is not found. */ | |
264 | extern void __ftrace_bad_type(void); | |
265 | ||
266 | /* | |
267 | * The trace_assign_type is a verifier that the entry type is | |
268 | * the same as the type being assigned. To add new types simply | |
269 | * add a line with the following format: | |
270 | * | |
271 | * IF_ASSIGN(var, ent, type, id); | |
272 | * | |
273 | * Where "type" is the trace type that includes the trace_entry | |
274 | * as the "ent" item. And "id" is the trace identifier that is | |
275 | * used in the trace_type enum. | |
276 | * | |
277 | * If the type can have more than one id, then use zero. | |
278 | */ | |
279 | #define trace_assign_type(var, ent) \ | |
280 | do { \ | |
281 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
282 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
7104f300 | 283 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
02b67518 | 284 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300 SR |
285 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
286 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | |
287 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | |
288 | TRACE_MMIO_RW); \ | |
289 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
290 | TRACE_MMIO_MAP); \ | |
74239072 FW |
291 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
292 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | |
9f029e83 | 293 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68 FW |
294 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
295 | TRACE_GRAPH_ENT); \ | |
296 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | |
297 | TRACE_GRAPH_RET); \ | |
a93751ca | 298 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
f3f47a67 | 299 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ |
36994e58 FW |
300 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ |
301 | TRACE_KMEM_ALLOC); \ | |
302 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | |
303 | TRACE_KMEM_FREE); \ | |
7104f300 SR |
304 | __ftrace_bad_type(); \ |
305 | } while (0) | |
2c4f035f FW |
306 | |
307 | /* Return values for print_line callback */ | |
308 | enum print_line_t { | |
309 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | |
310 | TRACE_TYPE_HANDLED = 1, | |
311 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | |
312 | }; | |
313 | ||
adf9f195 FW |
314 | |
315 | /* | |
316 | * An option specific to a tracer. This is a boolean value. | |
317 | * The bit is the bit index that sets its value on the | |
318 | * flags value in struct tracer_flags. | |
319 | */ | |
320 | struct tracer_opt { | |
321 | const char *name; /* Will appear on the trace_options file */ | |
322 | u32 bit; /* Mask assigned in val field in tracer_flags */ | |
323 | }; | |
324 | ||
325 | /* | |
326 | * The set of specific options for a tracer. Your tracer | |
327 | * have to set the initial value of the flags val. | |
328 | */ | |
329 | struct tracer_flags { | |
330 | u32 val; | |
331 | struct tracer_opt *opts; | |
332 | }; | |
333 | ||
334 | /* Makes more easy to define a tracer opt */ | |
335 | #define TRACER_OPT(s, b) .name = #s, .bit = b | |
336 | ||
bc0c38d1 SR |
337 | /* |
338 | * A specific tracer, represented by methods that operate on a trace array: | |
339 | */ | |
340 | struct tracer { | |
341 | const char *name; | |
1c80025a FW |
342 | /* Your tracer should raise a warning if init fails */ |
343 | int (*init)(struct trace_array *tr); | |
bc0c38d1 | 344 | void (*reset)(struct trace_array *tr); |
9036990d SR |
345 | void (*start)(struct trace_array *tr); |
346 | void (*stop)(struct trace_array *tr); | |
bc0c38d1 | 347 | void (*open)(struct trace_iterator *iter); |
107bad8b | 348 | void (*pipe_open)(struct trace_iterator *iter); |
bc0c38d1 | 349 | void (*close)(struct trace_iterator *iter); |
107bad8b SR |
350 | ssize_t (*read)(struct trace_iterator *iter, |
351 | struct file *filp, char __user *ubuf, | |
352 | size_t cnt, loff_t *ppos); | |
60a11774 SR |
353 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
354 | int (*selftest)(struct tracer *trace, | |
355 | struct trace_array *tr); | |
356 | #endif | |
8bba1bf5 | 357 | void (*print_header)(struct seq_file *m); |
2c4f035f | 358 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f195 FW |
359 | /* If you handled the flag setting, return 0 */ |
360 | int (*set_flag)(u32 old_flags, u32 bit, int set); | |
bc0c38d1 SR |
361 | struct tracer *next; |
362 | int print_max; | |
adf9f195 | 363 | struct tracer_flags *flags; |
dbd0b4b3 FW |
364 | |
365 | /* | |
366 | * If you change one of the following on tracing runtime, recall | |
367 | * init_tracer_stat() | |
368 | */ | |
369 | ||
370 | /* Iteration over statistic entries */ | |
371 | void *(*stat_start)(void); | |
372 | void *(*stat_next)(void *prev, int idx); | |
373 | /* Compare two entries for sorting (optional) for stats */ | |
374 | int (*stat_cmp)(void *p1, void *p2); | |
375 | /* Print a stat entry */ | |
376 | int (*stat_show)(struct seq_file *s, void *p); | |
377 | /* Print the headers of your stat entries */ | |
378 | int (*stat_headers)(struct seq_file *s); | |
bc0c38d1 SR |
379 | }; |
380 | ||
214023c3 SR |
381 | struct trace_seq { |
382 | unsigned char buffer[PAGE_SIZE]; | |
383 | unsigned int len; | |
6c6c2796 | 384 | unsigned int readpos; |
214023c3 SR |
385 | }; |
386 | ||
bc0c38d1 SR |
387 | /* |
388 | * Trace iterator - used by printout routines who present trace | |
389 | * results to users and which routines might sleep, etc: | |
390 | */ | |
391 | struct trace_iterator { | |
392 | struct trace_array *tr; | |
393 | struct tracer *trace; | |
107bad8b | 394 | void *private; |
3928a8a2 | 395 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
4e3c3333 | 396 | |
53d0aa77 SR |
397 | /* The below is zeroed out in pipe_read */ |
398 | struct trace_seq seq; | |
bc0c38d1 | 399 | struct trace_entry *ent; |
4e3c3333 | 400 | int cpu; |
3928a8a2 | 401 | u64 ts; |
4e3c3333 | 402 | |
bc0c38d1 SR |
403 | unsigned long iter_flags; |
404 | loff_t pos; | |
4c11d7ae | 405 | long idx; |
a309720c | 406 | |
4462344e | 407 | cpumask_var_t started; |
bc0c38d1 SR |
408 | }; |
409 | ||
9036990d | 410 | int tracing_is_enabled(void); |
45dcd8b8 | 411 | void trace_wake_up(void); |
3928a8a2 | 412 | void tracing_reset(struct trace_array *tr, int cpu); |
213cc060 | 413 | void tracing_reset_online_cpus(struct trace_array *tr); |
bc0c38d1 SR |
414 | int tracing_open_generic(struct inode *inode, struct file *filp); |
415 | struct dentry *tracing_init_dentry(void); | |
d618b3e6 IM |
416 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
417 | ||
45dcd8b8 PP |
418 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
419 | struct trace_array_cpu *data); | |
420 | void tracing_generic_entry_update(struct trace_entry *entry, | |
38697053 SR |
421 | unsigned long flags, |
422 | int pc); | |
45dcd8b8 | 423 | |
bc0c38d1 SR |
424 | void ftrace(struct trace_array *tr, |
425 | struct trace_array_cpu *data, | |
426 | unsigned long ip, | |
427 | unsigned long parent_ip, | |
38697053 | 428 | unsigned long flags, int pc); |
bc0c38d1 SR |
429 | void tracing_sched_switch_trace(struct trace_array *tr, |
430 | struct trace_array_cpu *data, | |
431 | struct task_struct *prev, | |
432 | struct task_struct *next, | |
38697053 | 433 | unsigned long flags, int pc); |
bc0c38d1 | 434 | void tracing_record_cmdline(struct task_struct *tsk); |
57422797 IM |
435 | |
436 | void tracing_sched_wakeup_trace(struct trace_array *tr, | |
437 | struct trace_array_cpu *data, | |
438 | struct task_struct *wakee, | |
439 | struct task_struct *cur, | |
38697053 | 440 | unsigned long flags, int pc); |
f0a920d5 IM |
441 | void trace_special(struct trace_array *tr, |
442 | struct trace_array_cpu *data, | |
443 | unsigned long arg1, | |
444 | unsigned long arg2, | |
38697053 | 445 | unsigned long arg3, int pc); |
6fb44b71 SR |
446 | void trace_function(struct trace_array *tr, |
447 | struct trace_array_cpu *data, | |
448 | unsigned long ip, | |
449 | unsigned long parent_ip, | |
38697053 | 450 | unsigned long flags, int pc); |
bc0c38d1 | 451 | |
287b6e68 | 452 | void trace_graph_return(struct ftrace_graph_ret *trace); |
e49dc19c | 453 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
a93751ca | 454 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to); |
1e9b51c2 | 455 | |
41bc8144 SR |
456 | void tracing_start_cmdline_record(void); |
457 | void tracing_stop_cmdline_record(void); | |
e168e051 SR |
458 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
459 | void tracing_stop_sched_switch_record(void); | |
460 | void tracing_start_sched_switch_record(void); | |
bc0c38d1 SR |
461 | int register_tracer(struct tracer *type); |
462 | void unregister_tracer(struct tracer *type); | |
463 | ||
dbd0b4b3 FW |
464 | void init_tracer_stat(struct tracer *trace); |
465 | ||
bc0c38d1 SR |
466 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
467 | ||
468 | extern unsigned long tracing_max_latency; | |
469 | extern unsigned long tracing_thresh; | |
470 | ||
471 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |
472 | void update_max_tr_single(struct trace_array *tr, | |
473 | struct task_struct *tsk, int cpu); | |
474 | ||
e309b41d | 475 | extern cycle_t ftrace_now(int cpu); |
bc0c38d1 | 476 | |
606576ce | 477 | #ifdef CONFIG_FUNCTION_TRACER |
001b6767 SR |
478 | void tracing_start_function_trace(void); |
479 | void tracing_stop_function_trace(void); | |
480 | #else | |
481 | # define tracing_start_function_trace() do { } while (0) | |
482 | # define tracing_stop_function_trace() do { } while (0) | |
483 | #endif | |
484 | ||
bc0c38d1 SR |
485 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
486 | typedef void | |
487 | (*tracer_switch_func_t)(void *private, | |
5b82a1b0 | 488 | void *__rq, |
bc0c38d1 SR |
489 | struct task_struct *prev, |
490 | struct task_struct *next); | |
491 | ||
492 | struct tracer_switch_ops { | |
493 | tracer_switch_func_t func; | |
494 | void *private; | |
495 | struct tracer_switch_ops *next; | |
496 | }; | |
bc0c38d1 SR |
497 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
498 | ||
f7d48cbd IM |
499 | extern char *trace_find_cmdline(int pid); |
500 | ||
bc0c38d1 SR |
501 | #ifdef CONFIG_DYNAMIC_FTRACE |
502 | extern unsigned long ftrace_update_tot_cnt; | |
d05cdb25 SR |
503 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
504 | extern int DYN_FTRACE_TEST_NAME(void); | |
bc0c38d1 SR |
505 | #endif |
506 | ||
60a11774 | 507 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60a11774 SR |
508 | extern int trace_selftest_startup_function(struct tracer *trace, |
509 | struct trace_array *tr); | |
60a11774 SR |
510 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
511 | struct trace_array *tr); | |
60a11774 SR |
512 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
513 | struct trace_array *tr); | |
60a11774 SR |
514 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
515 | struct trace_array *tr); | |
60a11774 SR |
516 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
517 | struct trace_array *tr); | |
fb1b6d8b SN |
518 | extern int trace_selftest_startup_nop(struct tracer *trace, |
519 | struct trace_array *tr); | |
60a11774 SR |
520 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
521 | struct trace_array *tr); | |
a6dd24f8 IM |
522 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
523 | struct trace_array *tr); | |
80e5ea45 SR |
524 | extern int trace_selftest_startup_branch(struct tracer *trace, |
525 | struct trace_array *tr); | |
60a11774 SR |
526 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
527 | ||
c7aafc54 | 528 | extern void *head_page(struct trace_array_cpu *data); |
72829bc3 | 529 | extern long ns2usecs(cycle_t nsec); |
1fd8f2a3 FW |
530 | extern int |
531 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); | |
c7aafc54 | 532 | |
4e655519 IM |
533 | extern unsigned long trace_flags; |
534 | ||
15e6cb36 | 535 | /* Standard output formatting function used for function return traces */ |
fb52607a FW |
536 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
537 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | |
ea4e2bc4 SR |
538 | |
539 | #ifdef CONFIG_DYNAMIC_FTRACE | |
540 | /* TODO: make this variable */ | |
541 | #define FTRACE_GRAPH_MAX_FUNCS 32 | |
542 | extern int ftrace_graph_count; | |
543 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | |
544 | ||
545 | static inline int ftrace_graph_addr(unsigned long addr) | |
546 | { | |
547 | int i; | |
548 | ||
549 | if (!ftrace_graph_count || test_tsk_trace_graph(current)) | |
550 | return 1; | |
551 | ||
552 | for (i = 0; i < ftrace_graph_count; i++) { | |
553 | if (addr == ftrace_graph_funcs[i]) | |
554 | return 1; | |
555 | } | |
556 | ||
557 | return 0; | |
558 | } | |
15e6cb36 | 559 | #else |
ea4e2bc4 SR |
560 | static inline int ftrace_trace_addr(unsigned long addr) |
561 | { | |
6b253930 IM |
562 | return 1; |
563 | } | |
564 | static inline int ftrace_graph_addr(unsigned long addr) | |
565 | { | |
566 | return 1; | |
ea4e2bc4 SR |
567 | } |
568 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
569 | ||
570 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
15e6cb36 | 571 | static inline enum print_line_t |
fb52607a | 572 | print_graph_function(struct trace_iterator *iter) |
15e6cb36 FW |
573 | { |
574 | return TRACE_TYPE_UNHANDLED; | |
575 | } | |
ea4e2bc4 | 576 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 577 | |
978f3a45 | 578 | extern struct pid *ftrace_pid_trace; |
804a6851 SR |
579 | |
580 | static inline int ftrace_trace_task(struct task_struct *task) | |
581 | { | |
77d683f3 | 582 | if (!ftrace_pid_trace) |
804a6851 SR |
583 | return 1; |
584 | ||
585 | return test_tsk_trace_trace(task); | |
586 | } | |
587 | ||
4fcdae83 SR |
588 | /* |
589 | * trace_iterator_flags is an enumeration that defines bit | |
590 | * positions into trace_flags that controls the output. | |
591 | * | |
592 | * NOTE: These bits must match the trace_options array in | |
593 | * trace.c. | |
594 | */ | |
4e655519 IM |
595 | enum trace_iterator_flags { |
596 | TRACE_ITER_PRINT_PARENT = 0x01, | |
597 | TRACE_ITER_SYM_OFFSET = 0x02, | |
598 | TRACE_ITER_SYM_ADDR = 0x04, | |
599 | TRACE_ITER_VERBOSE = 0x08, | |
600 | TRACE_ITER_RAW = 0x10, | |
601 | TRACE_ITER_HEX = 0x20, | |
602 | TRACE_ITER_BIN = 0x40, | |
603 | TRACE_ITER_BLOCK = 0x80, | |
604 | TRACE_ITER_STACKTRACE = 0x100, | |
4ac3ba41 | 605 | TRACE_ITER_SCHED_TREE = 0x200, |
f09ce573 | 606 | TRACE_ITER_PRINTK = 0x400, |
b2a866f9 | 607 | TRACE_ITER_PREEMPTONLY = 0x800, |
9f029e83 | 608 | TRACE_ITER_BRANCH = 0x1000, |
12ef7d44 | 609 | TRACE_ITER_ANNOTATE = 0x2000, |
b54d3de9 | 610 | TRACE_ITER_USERSTACKTRACE = 0x4000, |
66896a85 FW |
611 | TRACE_ITER_SYM_USEROBJ = 0x8000, |
612 | TRACE_ITER_PRINTK_MSGONLY = 0x10000 | |
4e655519 IM |
613 | }; |
614 | ||
15e6cb36 FW |
615 | /* |
616 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | |
617 | * control the output of kernel symbols. | |
618 | */ | |
619 | #define TRACE_ITER_SYM_MASK \ | |
620 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | |
621 | ||
43a15386 FW |
622 | extern struct tracer nop_trace; |
623 | ||
8f0a056f SR |
624 | /** |
625 | * ftrace_preempt_disable - disable preemption scheduler safe | |
626 | * | |
627 | * When tracing can happen inside the scheduler, there exists | |
628 | * cases that the tracing might happen before the need_resched | |
629 | * flag is checked. If this happens and the tracer calls | |
630 | * preempt_enable (after a disable), a schedule might take place | |
631 | * causing an infinite recursion. | |
632 | * | |
633 | * To prevent this, we read the need_recshed flag before | |
634 | * disabling preemption. When we want to enable preemption we | |
635 | * check the flag, if it is set, then we call preempt_enable_no_resched. | |
636 | * Otherwise, we call preempt_enable. | |
637 | * | |
638 | * The rational for doing the above is that if need resched is set | |
639 | * and we have yet to reschedule, we are either in an atomic location | |
640 | * (where we do not need to check for scheduling) or we are inside | |
641 | * the scheduler and do not want to resched. | |
642 | */ | |
643 | static inline int ftrace_preempt_disable(void) | |
644 | { | |
645 | int resched; | |
646 | ||
647 | resched = need_resched(); | |
648 | preempt_disable_notrace(); | |
649 | ||
650 | return resched; | |
651 | } | |
652 | ||
653 | /** | |
654 | * ftrace_preempt_enable - enable preemption scheduler safe | |
655 | * @resched: the return value from ftrace_preempt_disable | |
656 | * | |
657 | * This is a scheduler safe way to enable preemption and not miss | |
658 | * any preemption checks. The disabled saved the state of preemption. | |
659 | * If resched is set, then we were either inside an atomic or | |
660 | * are inside the scheduler (we would have already scheduled | |
661 | * otherwise). In this case, we do not want to call normal | |
662 | * preempt_enable, but preempt_enable_no_resched instead. | |
663 | */ | |
664 | static inline void ftrace_preempt_enable(int resched) | |
665 | { | |
666 | if (resched) | |
667 | preempt_enable_no_resched_notrace(); | |
668 | else | |
669 | preempt_enable_notrace(); | |
670 | } | |
671 | ||
2ed84eeb | 672 | #ifdef CONFIG_BRANCH_TRACER |
9f029e83 SR |
673 | extern int enable_branch_tracing(struct trace_array *tr); |
674 | extern void disable_branch_tracing(void); | |
675 | static inline int trace_branch_enable(struct trace_array *tr) | |
52f232cb | 676 | { |
9f029e83 SR |
677 | if (trace_flags & TRACE_ITER_BRANCH) |
678 | return enable_branch_tracing(tr); | |
52f232cb SR |
679 | return 0; |
680 | } | |
9f029e83 | 681 | static inline void trace_branch_disable(void) |
52f232cb SR |
682 | { |
683 | /* due to races, always disable */ | |
9f029e83 | 684 | disable_branch_tracing(); |
52f232cb SR |
685 | } |
686 | #else | |
9f029e83 | 687 | static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb SR |
688 | { |
689 | return 0; | |
690 | } | |
9f029e83 | 691 | static inline void trace_branch_disable(void) |
52f232cb SR |
692 | { |
693 | } | |
2ed84eeb | 694 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 695 | |
bc0c38d1 | 696 | #endif /* _LINUX_KERNEL_TRACE_H */ |