]>
Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | |
3 | ||
4 | #include <linux/fs.h> | |
5 | #include <asm/atomic.h> | |
6 | #include <linux/sched.h> | |
7 | #include <linux/clocksource.h> | |
3928a8a2 | 8 | #include <linux/ring_buffer.h> |
bd8ac686 | 9 | #include <linux/mmiotrace.h> |
4e5292ea | 10 | #include <linux/tracepoint.h> |
d13744cd | 11 | #include <linux/ftrace.h> |
3f5ec136 | 12 | #include <trace/boot.h> |
02af61bb | 13 | #include <linux/kmemtrace.h> |
24f1e32c | 14 | #include <linux/hw_breakpoint.h> |
bc0c38d1 | 15 | |
9504504c | 16 | #include <linux/trace_seq.h> |
97f20251 | 17 | #include <linux/ftrace_event.h> |
9504504c | 18 | |
72829bc3 TG |
19 | enum trace_type { |
20 | __TRACE_FIRST_TYPE = 0, | |
21 | ||
22 | TRACE_FN, | |
23 | TRACE_CTX, | |
24 | TRACE_WAKE, | |
25 | TRACE_STACK, | |
dd0e545f | 26 | TRACE_PRINT, |
48ead020 | 27 | TRACE_BPRINT, |
72829bc3 | 28 | TRACE_SPECIAL, |
bd8ac686 PP |
29 | TRACE_MMIO_RW, |
30 | TRACE_MMIO_MAP, | |
9f029e83 | 31 | TRACE_BRANCH, |
74239072 FW |
32 | TRACE_BOOT_CALL, |
33 | TRACE_BOOT_RET, | |
287b6e68 FW |
34 | TRACE_GRAPH_RET, |
35 | TRACE_GRAPH_ENT, | |
02b67518 | 36 | TRACE_USER_STACK, |
36994e58 FW |
37 | TRACE_KMEM_ALLOC, |
38 | TRACE_KMEM_FREE, | |
c71a8961 | 39 | TRACE_BLK, |
0722db01 | 40 | TRACE_KSYM, |
72829bc3 | 41 | |
f0868d1e | 42 | __TRACE_LAST_TYPE, |
72829bc3 TG |
43 | }; |
44 | ||
0a1c49db SR |
45 | enum kmemtrace_type_id { |
46 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | |
47 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | |
48 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | |
287b6e68 FW |
49 | }; |
50 | ||
d13744cd | 51 | extern struct tracer boot_tracer; |
bc0c38d1 | 52 | |
0a1c49db SR |
53 | #undef __field |
54 | #define __field(type, item) type item; | |
86387f7e | 55 | |
d7315094 SR |
56 | #undef __field_struct |
57 | #define __field_struct(type, item) __field(type, item) | |
86387f7e | 58 | |
d7315094 SR |
59 | #undef __field_desc |
60 | #define __field_desc(type, container, item) | |
02b67518 | 61 | |
0a1c49db SR |
62 | #undef __array |
63 | #define __array(type, item, size) type item[size]; | |
1427cdf0 | 64 | |
d7315094 SR |
65 | #undef __array_desc |
66 | #define __array_desc(type, container, item, size) | |
777e208d | 67 | |
0a1c49db SR |
68 | #undef __dynamic_array |
69 | #define __dynamic_array(type, item) type item[]; | |
777e208d | 70 | |
0a1c49db SR |
71 | #undef F_STRUCT |
72 | #define F_STRUCT(args...) args | |
74239072 | 73 | |
0a1c49db SR |
74 | #undef FTRACE_ENTRY |
75 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ | |
76 | struct struct_name { \ | |
77 | struct trace_entry ent; \ | |
78 | tstruct \ | |
79 | } | |
777e208d | 80 | |
0a1c49db SR |
81 | #undef TP_ARGS |
82 | #define TP_ARGS(args...) args | |
52f232cb | 83 | |
0a1c49db SR |
84 | #undef FTRACE_ENTRY_DUP |
85 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) | |
1e9b51c2 | 86 | |
0a1c49db | 87 | #include "trace_entries.h" |
36994e58 | 88 | |
0a1c49db SR |
89 | /* |
90 | * syscalls are special, and need special handling, this is why | |
91 | * they are not included in trace_entries.h | |
92 | */ | |
bed1ffca FW |
93 | struct syscall_trace_enter { |
94 | struct trace_entry ent; | |
95 | int nr; | |
96 | unsigned long args[]; | |
97 | }; | |
98 | ||
99 | struct syscall_trace_exit { | |
100 | struct trace_entry ent; | |
101 | int nr; | |
99df5a6a | 102 | long ret; |
bed1ffca FW |
103 | }; |
104 | ||
93ccae7a | 105 | struct kprobe_trace_entry_head { |
413d37d1 MH |
106 | struct trace_entry ent; |
107 | unsigned long ip; | |
413d37d1 MH |
108 | }; |
109 | ||
93ccae7a | 110 | struct kretprobe_trace_entry_head { |
413d37d1 MH |
111 | struct trace_entry ent; |
112 | unsigned long func; | |
113 | unsigned long ret_ip; | |
413d37d1 MH |
114 | }; |
115 | ||
fc5e27ae PP |
116 | /* |
117 | * trace_flag_type is an enumeration that holds different | |
118 | * states when a trace occurs. These are: | |
9244489a | 119 | * IRQS_OFF - interrupts were disabled |
9de36825 | 120 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
bd9cfca9 | 121 | * NEED_RESCHED - reschedule is requested |
9244489a SR |
122 | * HARDIRQ - inside an interrupt handler |
123 | * SOFTIRQ - inside a softirq handler | |
fc5e27ae PP |
124 | */ |
125 | enum trace_flag_type { | |
126 | TRACE_FLAG_IRQS_OFF = 0x01, | |
9244489a SR |
127 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
128 | TRACE_FLAG_NEED_RESCHED = 0x04, | |
129 | TRACE_FLAG_HARDIRQ = 0x08, | |
130 | TRACE_FLAG_SOFTIRQ = 0x10, | |
fc5e27ae PP |
131 | }; |
132 | ||
5bf9a1ee | 133 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 SR |
134 | |
135 | /* | |
136 | * The CPU trace array - it consists of thousands of trace entries | |
137 | * plus some other descriptor data: (for example which task started | |
138 | * the trace, etc.) | |
139 | */ | |
140 | struct trace_array_cpu { | |
bc0c38d1 | 141 | atomic_t disabled; |
2cadf913 | 142 | void *buffer_page; /* ring buffer spare */ |
4e3c3333 | 143 | |
bc0c38d1 SR |
144 | unsigned long saved_latency; |
145 | unsigned long critical_start; | |
146 | unsigned long critical_end; | |
147 | unsigned long critical_sequence; | |
148 | unsigned long nice; | |
149 | unsigned long policy; | |
150 | unsigned long rt_priority; | |
2f26ebd5 | 151 | unsigned long skipped_entries; |
bc0c38d1 SR |
152 | cycle_t preempt_timestamp; |
153 | pid_t pid; | |
154 | uid_t uid; | |
155 | char comm[TASK_COMM_LEN]; | |
156 | }; | |
157 | ||
bc0c38d1 SR |
158 | /* |
159 | * The trace array - an array of per-CPU trace arrays. This is the | |
160 | * highest level data structure that individual tracers deal with. | |
161 | * They have on/off state as well: | |
162 | */ | |
163 | struct trace_array { | |
3928a8a2 | 164 | struct ring_buffer *buffer; |
bc0c38d1 | 165 | unsigned long entries; |
bc0c38d1 SR |
166 | int cpu; |
167 | cycle_t time_start; | |
b3806b43 | 168 | struct task_struct *waiter; |
bc0c38d1 SR |
169 | struct trace_array_cpu *data[NR_CPUS]; |
170 | }; | |
171 | ||
7104f300 SR |
172 | #define FTRACE_CMP_TYPE(var, type) \ |
173 | __builtin_types_compatible_p(typeof(var), type *) | |
174 | ||
175 | #undef IF_ASSIGN | |
176 | #define IF_ASSIGN(var, entry, etype, id) \ | |
177 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
178 | var = (typeof(var))(entry); \ | |
179 | WARN_ON(id && (entry)->type != id); \ | |
180 | break; \ | |
181 | } | |
182 | ||
183 | /* Will cause compile errors if type is not found. */ | |
184 | extern void __ftrace_bad_type(void); | |
185 | ||
186 | /* | |
187 | * The trace_assign_type is a verifier that the entry type is | |
188 | * the same as the type being assigned. To add new types simply | |
189 | * add a line with the following format: | |
190 | * | |
191 | * IF_ASSIGN(var, ent, type, id); | |
192 | * | |
193 | * Where "type" is the trace type that includes the trace_entry | |
194 | * as the "ent" item. And "id" is the trace identifier that is | |
195 | * used in the trace_type enum. | |
196 | * | |
197 | * If the type can have more than one id, then use zero. | |
198 | */ | |
199 | #define trace_assign_type(var, ent) \ | |
200 | do { \ | |
201 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
202 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
7104f300 | 203 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
02b67518 | 204 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300 | 205 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
48ead020 | 206 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
7104f300 SR |
207 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
208 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | |
209 | TRACE_MMIO_RW); \ | |
210 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
211 | TRACE_MMIO_MAP); \ | |
74239072 FW |
212 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
213 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | |
9f029e83 | 214 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68 FW |
215 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
216 | TRACE_GRAPH_ENT); \ | |
217 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | |
218 | TRACE_GRAPH_RET); \ | |
36994e58 FW |
219 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ |
220 | TRACE_KMEM_ALLOC); \ | |
221 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | |
222 | TRACE_KMEM_FREE); \ | |
db59504d | 223 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ |
7104f300 SR |
224 | __ftrace_bad_type(); \ |
225 | } while (0) | |
2c4f035f | 226 | |
adf9f195 FW |
227 | /* |
228 | * An option specific to a tracer. This is a boolean value. | |
229 | * The bit is the bit index that sets its value on the | |
230 | * flags value in struct tracer_flags. | |
231 | */ | |
232 | struct tracer_opt { | |
9de36825 IM |
233 | const char *name; /* Will appear on the trace_options file */ |
234 | u32 bit; /* Mask assigned in val field in tracer_flags */ | |
adf9f195 FW |
235 | }; |
236 | ||
237 | /* | |
238 | * The set of specific options for a tracer. Your tracer | |
239 | * have to set the initial value of the flags val. | |
240 | */ | |
241 | struct tracer_flags { | |
242 | u32 val; | |
9de36825 | 243 | struct tracer_opt *opts; |
adf9f195 FW |
244 | }; |
245 | ||
246 | /* Makes more easy to define a tracer opt */ | |
247 | #define TRACER_OPT(s, b) .name = #s, .bit = b | |
248 | ||
034939b6 | 249 | |
6eaaa5d5 FW |
250 | /** |
251 | * struct tracer - a specific tracer and its callbacks to interact with debugfs | |
252 | * @name: the name chosen to select it on the available_tracers file | |
253 | * @init: called when one switches to this tracer (echo name > current_tracer) | |
254 | * @reset: called when one switches to another tracer | |
255 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | |
256 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | |
257 | * @open: called when the trace file is opened | |
258 | * @pipe_open: called when the trace_pipe file is opened | |
259 | * @wait_pipe: override how the user waits for traces on trace_pipe | |
260 | * @close: called when the trace file is released | |
c521efd1 | 261 | * @pipe_close: called when the trace_pipe file is released |
6eaaa5d5 FW |
262 | * @read: override the default read callback on trace_pipe |
263 | * @splice_read: override the default splice_read callback on trace_pipe | |
264 | * @selftest: selftest to run on boot (see trace_selftest.c) | |
265 | * @print_headers: override the first lines that describe your columns | |
266 | * @print_line: callback that prints a trace | |
267 | * @set_flag: signals one of your private flags changed (trace_options file) | |
268 | * @flags: your private flags | |
bc0c38d1 SR |
269 | */ |
270 | struct tracer { | |
271 | const char *name; | |
1c80025a | 272 | int (*init)(struct trace_array *tr); |
bc0c38d1 | 273 | void (*reset)(struct trace_array *tr); |
9036990d SR |
274 | void (*start)(struct trace_array *tr); |
275 | void (*stop)(struct trace_array *tr); | |
bc0c38d1 | 276 | void (*open)(struct trace_iterator *iter); |
107bad8b | 277 | void (*pipe_open)(struct trace_iterator *iter); |
6eaaa5d5 | 278 | void (*wait_pipe)(struct trace_iterator *iter); |
bc0c38d1 | 279 | void (*close)(struct trace_iterator *iter); |
c521efd1 | 280 | void (*pipe_close)(struct trace_iterator *iter); |
107bad8b SR |
281 | ssize_t (*read)(struct trace_iterator *iter, |
282 | struct file *filp, char __user *ubuf, | |
283 | size_t cnt, loff_t *ppos); | |
3c56819b EGM |
284 | ssize_t (*splice_read)(struct trace_iterator *iter, |
285 | struct file *filp, | |
286 | loff_t *ppos, | |
287 | struct pipe_inode_info *pipe, | |
288 | size_t len, | |
289 | unsigned int flags); | |
60a11774 SR |
290 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
291 | int (*selftest)(struct tracer *trace, | |
292 | struct trace_array *tr); | |
293 | #endif | |
8bba1bf5 | 294 | void (*print_header)(struct seq_file *m); |
2c4f035f | 295 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f195 FW |
296 | /* If you handled the flag setting, return 0 */ |
297 | int (*set_flag)(u32 old_flags, u32 bit, int set); | |
bc0c38d1 SR |
298 | struct tracer *next; |
299 | int print_max; | |
9de36825 | 300 | struct tracer_flags *flags; |
bc0c38d1 SR |
301 | }; |
302 | ||
f9520750 | 303 | |
b04cc6b1 FW |
304 | #define TRACE_PIPE_ALL_CPU -1 |
305 | ||
b6f11df2 | 306 | int tracer_init(struct tracer *t, struct trace_array *tr); |
9036990d | 307 | int tracing_is_enabled(void); |
45dcd8b8 | 308 | void trace_wake_up(void); |
3928a8a2 | 309 | void tracing_reset(struct trace_array *tr, int cpu); |
213cc060 | 310 | void tracing_reset_online_cpus(struct trace_array *tr); |
9456f0fa SR |
311 | void tracing_reset_current(int cpu); |
312 | void tracing_reset_current_online_cpus(void); | |
bc0c38d1 | 313 | int tracing_open_generic(struct inode *inode, struct file *filp); |
5452af66 FW |
314 | struct dentry *trace_create_file(const char *name, |
315 | mode_t mode, | |
316 | struct dentry *parent, | |
317 | void *data, | |
318 | const struct file_operations *fops); | |
319 | ||
bc0c38d1 | 320 | struct dentry *tracing_init_dentry(void); |
d618b3e6 IM |
321 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
322 | ||
51a763dd ACM |
323 | struct ring_buffer_event; |
324 | ||
e77405ad SR |
325 | struct ring_buffer_event * |
326 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | |
327 | int type, | |
328 | unsigned long len, | |
329 | unsigned long flags, | |
330 | int pc); | |
331 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | |
51a763dd ACM |
332 | struct ring_buffer_event *event, |
333 | unsigned long flags, int pc); | |
334 | ||
45dcd8b8 PP |
335 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
336 | struct trace_array_cpu *data); | |
c4a8e8be FW |
337 | |
338 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |
339 | int *ent_cpu, u64 *ent_ts); | |
340 | ||
955b61e5 JW |
341 | int trace_empty(struct trace_iterator *iter); |
342 | ||
343 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | |
344 | ||
345 | void trace_init_global_iter(struct trace_iterator *iter); | |
346 | ||
347 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | |
348 | ||
6eaaa5d5 FW |
349 | void default_wait_pipe(struct trace_iterator *iter); |
350 | void poll_wait_pipe(struct trace_iterator *iter); | |
351 | ||
bc0c38d1 SR |
352 | void ftrace(struct trace_array *tr, |
353 | struct trace_array_cpu *data, | |
354 | unsigned long ip, | |
355 | unsigned long parent_ip, | |
38697053 | 356 | unsigned long flags, int pc); |
bc0c38d1 | 357 | void tracing_sched_switch_trace(struct trace_array *tr, |
bc0c38d1 SR |
358 | struct task_struct *prev, |
359 | struct task_struct *next, | |
38697053 | 360 | unsigned long flags, int pc); |
57422797 IM |
361 | |
362 | void tracing_sched_wakeup_trace(struct trace_array *tr, | |
57422797 IM |
363 | struct task_struct *wakee, |
364 | struct task_struct *cur, | |
38697053 | 365 | unsigned long flags, int pc); |
f0a920d5 IM |
366 | void trace_special(struct trace_array *tr, |
367 | struct trace_array_cpu *data, | |
368 | unsigned long arg1, | |
369 | unsigned long arg2, | |
38697053 | 370 | unsigned long arg3, int pc); |
6fb44b71 | 371 | void trace_function(struct trace_array *tr, |
6fb44b71 SR |
372 | unsigned long ip, |
373 | unsigned long parent_ip, | |
38697053 | 374 | unsigned long flags, int pc); |
62b915f1 JO |
375 | void trace_default_header(struct seq_file *m); |
376 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); | |
377 | int trace_empty(struct trace_iterator *iter); | |
bc0c38d1 | 378 | |
287b6e68 | 379 | void trace_graph_return(struct ftrace_graph_ret *trace); |
e49dc19c | 380 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
1a0799a8 | 381 | void set_graph_array(struct trace_array *tr); |
1e9b51c2 | 382 | |
41bc8144 SR |
383 | void tracing_start_cmdline_record(void); |
384 | void tracing_stop_cmdline_record(void); | |
e168e051 SR |
385 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
386 | void tracing_stop_sched_switch_record(void); | |
387 | void tracing_start_sched_switch_record(void); | |
bc0c38d1 SR |
388 | int register_tracer(struct tracer *type); |
389 | void unregister_tracer(struct tracer *type); | |
b5130b1e | 390 | int is_tracing_stopped(void); |
955b61e5 JW |
391 | enum trace_file_type { |
392 | TRACE_FILE_LAT_FMT = 1, | |
393 | TRACE_FILE_ANNOTATE = 2, | |
394 | }; | |
395 | ||
396 | extern cpumask_var_t __read_mostly tracing_buffer_mask; | |
397 | ||
398 | #define for_each_tracing_cpu(cpu) \ | |
399 | for_each_cpu(cpu, tracing_buffer_mask) | |
bc0c38d1 | 400 | |
0f8f86c7 | 401 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); |
bc0c38d1 SR |
402 | |
403 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |
404 | ||
0e950173 TB |
405 | extern unsigned long tracing_thresh; |
406 | ||
5d4a9dba | 407 | #ifdef CONFIG_TRACER_MAX_TRACE |
bc0c38d1 | 408 | extern unsigned long tracing_max_latency; |
bc0c38d1 SR |
409 | |
410 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |
411 | void update_max_tr_single(struct trace_array *tr, | |
412 | struct task_struct *tsk, int cpu); | |
5d4a9dba | 413 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
bc0c38d1 | 414 | |
c0a0d0d3 | 415 | #ifdef CONFIG_STACKTRACE |
e77405ad | 416 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
c0a0d0d3 FW |
417 | int skip, int pc); |
418 | ||
e77405ad | 419 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
c0a0d0d3 FW |
420 | int pc); |
421 | ||
422 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |
423 | int pc); | |
424 | #else | |
e1f7992e | 425 | static inline void ftrace_trace_stack(struct ring_buffer *buffer, |
c0a0d0d3 FW |
426 | unsigned long flags, int skip, int pc) |
427 | { | |
428 | } | |
429 | ||
e1f7992e | 430 | static inline void ftrace_trace_userstack(struct ring_buffer *buffer, |
c0a0d0d3 FW |
431 | unsigned long flags, int pc) |
432 | { | |
433 | } | |
434 | ||
435 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | |
436 | int skip, int pc) | |
437 | { | |
438 | } | |
439 | #endif /* CONFIG_STACKTRACE */ | |
53614991 | 440 | |
e309b41d | 441 | extern cycle_t ftrace_now(int cpu); |
bc0c38d1 | 442 | |
4ca53085 | 443 | extern void trace_find_cmdline(int pid, char comm[]); |
f7d48cbd | 444 | |
bc0c38d1 SR |
445 | #ifdef CONFIG_DYNAMIC_FTRACE |
446 | extern unsigned long ftrace_update_tot_cnt; | |
d05cdb25 SR |
447 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
448 | extern int DYN_FTRACE_TEST_NAME(void); | |
bc0c38d1 SR |
449 | #endif |
450 | ||
020e5f85 LZ |
451 | extern int ring_buffer_expanded; |
452 | extern bool tracing_selftest_disabled; | |
9288f99a | 453 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
020e5f85 | 454 | |
60a11774 | 455 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60a11774 SR |
456 | extern int trace_selftest_startup_function(struct tracer *trace, |
457 | struct trace_array *tr); | |
7447dce9 FW |
458 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
459 | struct trace_array *tr); | |
60a11774 SR |
460 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
461 | struct trace_array *tr); | |
60a11774 SR |
462 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
463 | struct trace_array *tr); | |
60a11774 SR |
464 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
465 | struct trace_array *tr); | |
60a11774 SR |
466 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
467 | struct trace_array *tr); | |
fb1b6d8b SN |
468 | extern int trace_selftest_startup_nop(struct tracer *trace, |
469 | struct trace_array *tr); | |
60a11774 SR |
470 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
471 | struct trace_array *tr); | |
a6dd24f8 IM |
472 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
473 | struct trace_array *tr); | |
80e5ea45 SR |
474 | extern int trace_selftest_startup_branch(struct tracer *trace, |
475 | struct trace_array *tr); | |
0722db01 P |
476 | extern int trace_selftest_startup_ksym(struct tracer *trace, |
477 | struct trace_array *tr); | |
60a11774 SR |
478 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
479 | ||
c7aafc54 | 480 | extern void *head_page(struct trace_array_cpu *data); |
cf8e3474 | 481 | extern unsigned long long ns2usecs(cycle_t nsec); |
1fd8f2a3 | 482 | extern int |
40ce74f1 | 483 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
48ead020 | 484 | extern int |
40ce74f1 | 485 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
659372d3 SR |
486 | extern int |
487 | trace_array_vprintk(struct trace_array *tr, | |
488 | unsigned long ip, const char *fmt, va_list args); | |
489 | int trace_array_printk(struct trace_array *tr, | |
490 | unsigned long ip, const char *fmt, ...); | |
955b61e5 JW |
491 | void trace_printk_seq(struct trace_seq *s); |
492 | enum print_line_t print_trace_line(struct trace_iterator *iter); | |
c7aafc54 | 493 | |
4e655519 IM |
494 | extern unsigned long trace_flags; |
495 | ||
5079f326 Z |
496 | extern int trace_clock_id; |
497 | ||
15e6cb36 | 498 | /* Standard output formatting function used for function return traces */ |
fb52607a | 499 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
62b915f1 JO |
500 | |
501 | /* Flag options */ | |
502 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | |
503 | #define TRACE_GRAPH_PRINT_CPU 0x2 | |
504 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | |
505 | #define TRACE_GRAPH_PRINT_PROC 0x8 | |
506 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | |
507 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | |
508 | ||
d7a8d9e9 JO |
509 | extern enum print_line_t |
510 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | |
511 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | |
0706f1c4 SR |
512 | extern enum print_line_t |
513 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | |
62b915f1 JO |
514 | extern void graph_trace_open(struct trace_iterator *iter); |
515 | extern void graph_trace_close(struct trace_iterator *iter); | |
516 | extern int __trace_graph_entry(struct trace_array *tr, | |
517 | struct ftrace_graph_ent *trace, | |
518 | unsigned long flags, int pc); | |
519 | extern void __trace_graph_return(struct trace_array *tr, | |
520 | struct ftrace_graph_ret *trace, | |
521 | unsigned long flags, int pc); | |
522 | ||
ea4e2bc4 SR |
523 | |
524 | #ifdef CONFIG_DYNAMIC_FTRACE | |
525 | /* TODO: make this variable */ | |
526 | #define FTRACE_GRAPH_MAX_FUNCS 32 | |
c7c6b1fe | 527 | extern int ftrace_graph_filter_enabled; |
ea4e2bc4 SR |
528 | extern int ftrace_graph_count; |
529 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | |
530 | ||
531 | static inline int ftrace_graph_addr(unsigned long addr) | |
532 | { | |
533 | int i; | |
534 | ||
c7c6b1fe | 535 | if (!ftrace_graph_filter_enabled) |
ea4e2bc4 SR |
536 | return 1; |
537 | ||
538 | for (i = 0; i < ftrace_graph_count; i++) { | |
539 | if (addr == ftrace_graph_funcs[i]) | |
540 | return 1; | |
541 | } | |
542 | ||
543 | return 0; | |
544 | } | |
15e6cb36 | 545 | #else |
6b253930 IM |
546 | static inline int ftrace_graph_addr(unsigned long addr) |
547 | { | |
548 | return 1; | |
ea4e2bc4 SR |
549 | } |
550 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
ea4e2bc4 | 551 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 552 | static inline enum print_line_t |
d7a8d9e9 | 553 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
15e6cb36 FW |
554 | { |
555 | return TRACE_TYPE_UNHANDLED; | |
556 | } | |
ea4e2bc4 | 557 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 558 | |
756d17ee | 559 | extern struct list_head ftrace_pids; |
804a6851 | 560 | |
1155de47 | 561 | #ifdef CONFIG_FUNCTION_TRACER |
804a6851 SR |
562 | static inline int ftrace_trace_task(struct task_struct *task) |
563 | { | |
756d17ee | 564 | if (list_empty(&ftrace_pids)) |
804a6851 SR |
565 | return 1; |
566 | ||
567 | return test_tsk_trace_trace(task); | |
568 | } | |
1155de47 PM |
569 | #else |
570 | static inline int ftrace_trace_task(struct task_struct *task) | |
571 | { | |
572 | return 1; | |
573 | } | |
574 | #endif | |
804a6851 | 575 | |
b63f39ea | 576 | /* |
577 | * struct trace_parser - servers for reading the user input separated by spaces | |
578 | * @cont: set if the input is not complete - no final space char was found | |
579 | * @buffer: holds the parsed user input | |
1537a363 | 580 | * @idx: user input length |
b63f39ea | 581 | * @size: buffer size |
582 | */ | |
583 | struct trace_parser { | |
584 | bool cont; | |
585 | char *buffer; | |
586 | unsigned idx; | |
587 | unsigned size; | |
588 | }; | |
589 | ||
590 | static inline bool trace_parser_loaded(struct trace_parser *parser) | |
591 | { | |
592 | return (parser->idx != 0); | |
593 | } | |
594 | ||
595 | static inline bool trace_parser_cont(struct trace_parser *parser) | |
596 | { | |
597 | return parser->cont; | |
598 | } | |
599 | ||
600 | static inline void trace_parser_clear(struct trace_parser *parser) | |
601 | { | |
602 | parser->cont = false; | |
603 | parser->idx = 0; | |
604 | } | |
605 | ||
606 | extern int trace_parser_get_init(struct trace_parser *parser, int size); | |
607 | extern void trace_parser_put(struct trace_parser *parser); | |
608 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |
609 | size_t cnt, loff_t *ppos); | |
610 | ||
4fcdae83 SR |
611 | /* |
612 | * trace_iterator_flags is an enumeration that defines bit | |
613 | * positions into trace_flags that controls the output. | |
614 | * | |
615 | * NOTE: These bits must match the trace_options array in | |
616 | * trace.c. | |
617 | */ | |
4e655519 IM |
618 | enum trace_iterator_flags { |
619 | TRACE_ITER_PRINT_PARENT = 0x01, | |
620 | TRACE_ITER_SYM_OFFSET = 0x02, | |
621 | TRACE_ITER_SYM_ADDR = 0x04, | |
622 | TRACE_ITER_VERBOSE = 0x08, | |
623 | TRACE_ITER_RAW = 0x10, | |
624 | TRACE_ITER_HEX = 0x20, | |
625 | TRACE_ITER_BIN = 0x40, | |
626 | TRACE_ITER_BLOCK = 0x80, | |
627 | TRACE_ITER_STACKTRACE = 0x100, | |
2cbafd68 LZ |
628 | TRACE_ITER_PRINTK = 0x200, |
629 | TRACE_ITER_PREEMPTONLY = 0x400, | |
630 | TRACE_ITER_BRANCH = 0x800, | |
631 | TRACE_ITER_ANNOTATE = 0x1000, | |
632 | TRACE_ITER_USERSTACKTRACE = 0x2000, | |
633 | TRACE_ITER_SYM_USEROBJ = 0x4000, | |
634 | TRACE_ITER_PRINTK_MSGONLY = 0x8000, | |
635 | TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ | |
636 | TRACE_ITER_LATENCY_FMT = 0x20000, | |
637 | TRACE_ITER_SLEEP_TIME = 0x40000, | |
638 | TRACE_ITER_GRAPH_TIME = 0x80000, | |
4e655519 IM |
639 | }; |
640 | ||
15e6cb36 FW |
641 | /* |
642 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | |
643 | * control the output of kernel symbols. | |
644 | */ | |
645 | #define TRACE_ITER_SYM_MASK \ | |
646 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | |
647 | ||
43a15386 FW |
648 | extern struct tracer nop_trace; |
649 | ||
8f0a056f SR |
650 | /** |
651 | * ftrace_preempt_disable - disable preemption scheduler safe | |
652 | * | |
653 | * When tracing can happen inside the scheduler, there exists | |
654 | * cases that the tracing might happen before the need_resched | |
655 | * flag is checked. If this happens and the tracer calls | |
656 | * preempt_enable (after a disable), a schedule might take place | |
657 | * causing an infinite recursion. | |
658 | * | |
57794a9d | 659 | * To prevent this, we read the need_resched flag before |
8f0a056f SR |
660 | * disabling preemption. When we want to enable preemption we |
661 | * check the flag, if it is set, then we call preempt_enable_no_resched. | |
662 | * Otherwise, we call preempt_enable. | |
663 | * | |
57794a9d | 664 | * The rational for doing the above is that if need_resched is set |
8f0a056f SR |
665 | * and we have yet to reschedule, we are either in an atomic location |
666 | * (where we do not need to check for scheduling) or we are inside | |
667 | * the scheduler and do not want to resched. | |
668 | */ | |
669 | static inline int ftrace_preempt_disable(void) | |
670 | { | |
671 | int resched; | |
672 | ||
673 | resched = need_resched(); | |
674 | preempt_disable_notrace(); | |
675 | ||
676 | return resched; | |
677 | } | |
678 | ||
679 | /** | |
680 | * ftrace_preempt_enable - enable preemption scheduler safe | |
681 | * @resched: the return value from ftrace_preempt_disable | |
682 | * | |
683 | * This is a scheduler safe way to enable preemption and not miss | |
684 | * any preemption checks. The disabled saved the state of preemption. | |
57794a9d | 685 | * If resched is set, then we are either inside an atomic or |
8f0a056f SR |
686 | * are inside the scheduler (we would have already scheduled |
687 | * otherwise). In this case, we do not want to call normal | |
688 | * preempt_enable, but preempt_enable_no_resched instead. | |
689 | */ | |
690 | static inline void ftrace_preempt_enable(int resched) | |
691 | { | |
692 | if (resched) | |
693 | preempt_enable_no_resched_notrace(); | |
694 | else | |
695 | preempt_enable_notrace(); | |
696 | } | |
697 | ||
2ed84eeb | 698 | #ifdef CONFIG_BRANCH_TRACER |
9f029e83 SR |
699 | extern int enable_branch_tracing(struct trace_array *tr); |
700 | extern void disable_branch_tracing(void); | |
701 | static inline int trace_branch_enable(struct trace_array *tr) | |
52f232cb | 702 | { |
9f029e83 SR |
703 | if (trace_flags & TRACE_ITER_BRANCH) |
704 | return enable_branch_tracing(tr); | |
52f232cb SR |
705 | return 0; |
706 | } | |
9f029e83 | 707 | static inline void trace_branch_disable(void) |
52f232cb SR |
708 | { |
709 | /* due to races, always disable */ | |
9f029e83 | 710 | disable_branch_tracing(); |
52f232cb SR |
711 | } |
712 | #else | |
9f029e83 | 713 | static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb SR |
714 | { |
715 | return 0; | |
716 | } | |
9f029e83 | 717 | static inline void trace_branch_disable(void) |
52f232cb SR |
718 | { |
719 | } | |
2ed84eeb | 720 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 721 | |
1852fcce SR |
722 | /* set ring buffers to default size if not already done so */ |
723 | int tracing_update_buffers(void); | |
724 | ||
fd994989 SR |
725 | /* trace event type bit fields, not numeric */ |
726 | enum { | |
727 | TRACE_EVENT_TYPE_PRINTF = 1, | |
728 | TRACE_EVENT_TYPE_RAW = 2, | |
729 | }; | |
730 | ||
cf027f64 TZ |
731 | struct ftrace_event_field { |
732 | struct list_head link; | |
733 | char *name; | |
734 | char *type; | |
aa38e9fc | 735 | int filter_type; |
cf027f64 TZ |
736 | int offset; |
737 | int size; | |
a118e4d1 | 738 | int is_signed; |
cf027f64 TZ |
739 | }; |
740 | ||
30e673b2 TZ |
741 | struct event_filter { |
742 | int n_preds; | |
743 | struct filter_pred **preds; | |
8b372562 | 744 | char *filter_string; |
30e673b2 TZ |
745 | }; |
746 | ||
cfb180f3 TZ |
747 | struct event_subsystem { |
748 | struct list_head list; | |
749 | const char *name; | |
750 | struct dentry *entry; | |
1f9963cb | 751 | struct event_filter *filter; |
dc82ec98 | 752 | int nr_events; |
cfb180f3 TZ |
753 | }; |
754 | ||
7ce7e424 | 755 | struct filter_pred; |
1889d209 | 756 | struct regex; |
7ce7e424 | 757 | |
8b372562 TZ |
758 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, |
759 | int val1, int val2); | |
7ce7e424 | 760 | |
1889d209 FW |
761 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
762 | ||
3f6fe06d | 763 | enum regex_type { |
b0f1a59a | 764 | MATCH_FULL = 0, |
3f6fe06d FW |
765 | MATCH_FRONT_ONLY, |
766 | MATCH_MIDDLE_ONLY, | |
767 | MATCH_END_ONLY, | |
768 | }; | |
769 | ||
1889d209 FW |
770 | struct regex { |
771 | char pattern[MAX_FILTER_STR_VAL]; | |
772 | int len; | |
773 | int field_len; | |
774 | regex_match_func match; | |
775 | }; | |
776 | ||
7ce7e424 | 777 | struct filter_pred { |
1889d209 FW |
778 | filter_pred_fn_t fn; |
779 | u64 val; | |
780 | struct regex regex; | |
781 | char *field_name; | |
782 | int offset; | |
783 | int not; | |
784 | int op; | |
785 | int pop_n; | |
7ce7e424 TZ |
786 | }; |
787 | ||
3f6fe06d FW |
788 | extern enum regex_type |
789 | filter_parse_regex(char *buff, int len, char **search, int *not); | |
8b372562 | 790 | extern void print_event_filter(struct ftrace_event_call *call, |
4bda2d51 | 791 | struct trace_seq *s); |
8b372562 TZ |
792 | extern int apply_event_filter(struct ftrace_event_call *call, |
793 | char *filter_string); | |
794 | extern int apply_subsystem_event_filter(struct event_subsystem *system, | |
795 | char *filter_string); | |
796 | extern void print_subsystem_event_filter(struct event_subsystem *system, | |
ac1adc55 | 797 | struct trace_seq *s); |
aa38e9fc | 798 | extern int filter_assign_type(const char *type); |
7ce7e424 | 799 | |
2e33af02 SR |
800 | struct list_head * |
801 | trace_get_fields(struct ftrace_event_call *event_call); | |
802 | ||
eb02ce01 | 803 | static inline int |
e1112b4d | 804 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
eb02ce01 | 805 | struct ring_buffer *buffer, |
e1112b4d TZ |
806 | struct ring_buffer_event *event) |
807 | { | |
553552ce | 808 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && |
6fb2915d | 809 | !filter_match_preds(call->filter, rec)) { |
eb02ce01 TZ |
810 | ring_buffer_discard_commit(buffer, event); |
811 | return 1; | |
812 | } | |
813 | ||
814 | return 0; | |
e1112b4d TZ |
815 | } |
816 | ||
20c8928a | 817 | extern struct mutex event_mutex; |
a59fd602 | 818 | extern struct list_head ftrace_events; |
ac199db0 | 819 | |
e9fb2b6d SR |
820 | extern const char *__start___trace_bprintk_fmt[]; |
821 | extern const char *__stop___trace_bprintk_fmt[]; | |
822 | ||
4e5292ea SR |
823 | #undef FTRACE_ENTRY |
824 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ | |
86c38a31 JM |
825 | extern struct ftrace_event_call \ |
826 | __attribute__((__aligned__(4))) event_##call; | |
4e5292ea SR |
827 | #undef FTRACE_ENTRY_DUP |
828 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ | |
829 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) | |
830 | #include "trace_entries.h" | |
e1112b4d | 831 | |
bc0c38d1 | 832 | #endif /* _LINUX_KERNEL_TRACE_H */ |