]>
Commit | Line | Data |
---|---|---|
bac5fb97 | 1 | |
bc0c38d1 SR |
2 | #ifndef _LINUX_KERNEL_TRACE_H |
3 | #define _LINUX_KERNEL_TRACE_H | |
4 | ||
5 | #include <linux/fs.h> | |
60063497 | 6 | #include <linux/atomic.h> |
bc0c38d1 SR |
7 | #include <linux/sched.h> |
8 | #include <linux/clocksource.h> | |
3928a8a2 | 9 | #include <linux/ring_buffer.h> |
bd8ac686 | 10 | #include <linux/mmiotrace.h> |
4e5292ea | 11 | #include <linux/tracepoint.h> |
d13744cd | 12 | #include <linux/ftrace.h> |
24f1e32c | 13 | #include <linux/hw_breakpoint.h> |
9504504c | 14 | #include <linux/trace_seq.h> |
af658dca | 15 | #include <linux/trace_events.h> |
52f5684c | 16 | #include <linux/compiler.h> |
19a7fe20 | 17 | #include <linux/trace_seq.h> |
60f1d5e3 | 18 | #include <linux/glob.h> |
9504504c | 19 | |
12ab74ee SR |
20 | #ifdef CONFIG_FTRACE_SYSCALLS |
21 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | |
22 | #include <asm/syscall.h> /* some archs define it here */ | |
23 | #endif | |
24 | ||
72829bc3 TG |
25 | enum trace_type { |
26 | __TRACE_FIRST_TYPE = 0, | |
27 | ||
28 | TRACE_FN, | |
29 | TRACE_CTX, | |
30 | TRACE_WAKE, | |
31 | TRACE_STACK, | |
dd0e545f | 32 | TRACE_PRINT, |
48ead020 | 33 | TRACE_BPRINT, |
bd8ac686 PP |
34 | TRACE_MMIO_RW, |
35 | TRACE_MMIO_MAP, | |
9f029e83 | 36 | TRACE_BRANCH, |
287b6e68 FW |
37 | TRACE_GRAPH_RET, |
38 | TRACE_GRAPH_ENT, | |
02b67518 | 39 | TRACE_USER_STACK, |
c71a8961 | 40 | TRACE_BLK, |
09ae7234 | 41 | TRACE_BPUTS, |
e7c15cd8 | 42 | TRACE_HWLAT, |
fa32e855 | 43 | TRACE_RAW_DATA, |
72829bc3 | 44 | |
f0868d1e | 45 | __TRACE_LAST_TYPE, |
72829bc3 TG |
46 | }; |
47 | ||
bc0c38d1 | 48 | |
0a1c49db SR |
49 | #undef __field |
50 | #define __field(type, item) type item; | |
86387f7e | 51 | |
d7315094 SR |
52 | #undef __field_struct |
53 | #define __field_struct(type, item) __field(type, item) | |
86387f7e | 54 | |
d7315094 SR |
55 | #undef __field_desc |
56 | #define __field_desc(type, container, item) | |
02b67518 | 57 | |
0a1c49db SR |
58 | #undef __array |
59 | #define __array(type, item, size) type item[size]; | |
1427cdf0 | 60 | |
d7315094 SR |
61 | #undef __array_desc |
62 | #define __array_desc(type, container, item, size) | |
777e208d | 63 | |
0a1c49db SR |
64 | #undef __dynamic_array |
65 | #define __dynamic_array(type, item) type item[]; | |
777e208d | 66 | |
0a1c49db SR |
67 | #undef F_STRUCT |
68 | #define F_STRUCT(args...) args | |
74239072 | 69 | |
0a1c49db | 70 | #undef FTRACE_ENTRY |
02aa3162 JO |
71 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ |
72 | struct struct_name { \ | |
73 | struct trace_entry ent; \ | |
74 | tstruct \ | |
0a1c49db | 75 | } |
777e208d | 76 | |
0a1c49db | 77 | #undef FTRACE_ENTRY_DUP |
02aa3162 | 78 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) |
1e9b51c2 | 79 | |
e59a0bff | 80 | #undef FTRACE_ENTRY_REG |
02aa3162 JO |
81 | #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ |
82 | filter, regfn) \ | |
83 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | |
84 | filter) | |
e59a0bff | 85 | |
a4a551b8 NK |
86 | #undef FTRACE_ENTRY_PACKED |
87 | #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ | |
88 | filter) \ | |
89 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | |
90 | filter) __packed | |
91 | ||
0a1c49db | 92 | #include "trace_entries.h" |
36994e58 | 93 | |
0a1c49db SR |
94 | /* |
95 | * syscalls are special, and need special handling, this is why | |
96 | * they are not included in trace_entries.h | |
97 | */ | |
bed1ffca FW |
98 | struct syscall_trace_enter { |
99 | struct trace_entry ent; | |
100 | int nr; | |
101 | unsigned long args[]; | |
102 | }; | |
103 | ||
104 | struct syscall_trace_exit { | |
105 | struct trace_entry ent; | |
106 | int nr; | |
99df5a6a | 107 | long ret; |
bed1ffca FW |
108 | }; |
109 | ||
93ccae7a | 110 | struct kprobe_trace_entry_head { |
413d37d1 MH |
111 | struct trace_entry ent; |
112 | unsigned long ip; | |
413d37d1 MH |
113 | }; |
114 | ||
93ccae7a | 115 | struct kretprobe_trace_entry_head { |
413d37d1 MH |
116 | struct trace_entry ent; |
117 | unsigned long func; | |
118 | unsigned long ret_ip; | |
413d37d1 MH |
119 | }; |
120 | ||
fc5e27ae PP |
121 | /* |
122 | * trace_flag_type is an enumeration that holds different | |
123 | * states when a trace occurs. These are: | |
9244489a | 124 | * IRQS_OFF - interrupts were disabled |
9de36825 | 125 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
bd9cfca9 | 126 | * NEED_RESCHED - reschedule is requested |
9244489a SR |
127 | * HARDIRQ - inside an interrupt handler |
128 | * SOFTIRQ - inside a softirq handler | |
fc5e27ae PP |
129 | */ |
130 | enum trace_flag_type { | |
131 | TRACE_FLAG_IRQS_OFF = 0x01, | |
9244489a SR |
132 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
133 | TRACE_FLAG_NEED_RESCHED = 0x04, | |
134 | TRACE_FLAG_HARDIRQ = 0x08, | |
135 | TRACE_FLAG_SOFTIRQ = 0x10, | |
e5137b50 | 136 | TRACE_FLAG_PREEMPT_RESCHED = 0x20, |
7e6867bf | 137 | TRACE_FLAG_NMI = 0x40, |
fc5e27ae PP |
138 | }; |
139 | ||
5bf9a1ee | 140 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 | 141 | |
2b6080f2 SR |
142 | struct trace_array; |
143 | ||
bc0c38d1 SR |
144 | /* |
145 | * The CPU trace array - it consists of thousands of trace entries | |
146 | * plus some other descriptor data: (for example which task started | |
147 | * the trace, etc.) | |
148 | */ | |
149 | struct trace_array_cpu { | |
bc0c38d1 | 150 | atomic_t disabled; |
2cadf913 | 151 | void *buffer_page; /* ring buffer spare */ |
4e3c3333 | 152 | |
438ced17 | 153 | unsigned long entries; |
bc0c38d1 SR |
154 | unsigned long saved_latency; |
155 | unsigned long critical_start; | |
156 | unsigned long critical_end; | |
157 | unsigned long critical_sequence; | |
158 | unsigned long nice; | |
159 | unsigned long policy; | |
160 | unsigned long rt_priority; | |
2f26ebd5 | 161 | unsigned long skipped_entries; |
a5a1d1c2 | 162 | u64 preempt_timestamp; |
bc0c38d1 | 163 | pid_t pid; |
d20b92ab | 164 | kuid_t uid; |
bc0c38d1 | 165 | char comm[TASK_COMM_LEN]; |
3fdaf80f SRRH |
166 | |
167 | bool ignore_pid; | |
345ddcc8 SRRH |
168 | #ifdef CONFIG_FUNCTION_TRACER |
169 | bool ftrace_ignore_pid; | |
170 | #endif | |
bc0c38d1 SR |
171 | }; |
172 | ||
2b6080f2 | 173 | struct tracer; |
37aea98b | 174 | struct trace_option_dentry; |
2b6080f2 | 175 | |
12883efb SRRH |
176 | struct trace_buffer { |
177 | struct trace_array *tr; | |
178 | struct ring_buffer *buffer; | |
179 | struct trace_array_cpu __percpu *data; | |
a5a1d1c2 | 180 | u64 time_start; |
12883efb SRRH |
181 | int cpu; |
182 | }; | |
183 | ||
9a38a885 SRRH |
184 | #define TRACE_FLAGS_MAX_SIZE 32 |
185 | ||
37aea98b SRRH |
186 | struct trace_options { |
187 | struct tracer *tracer; | |
188 | struct trace_option_dentry *topts; | |
189 | }; | |
190 | ||
49090107 | 191 | struct trace_pid_list { |
f4d34a87 SR |
192 | int pid_max; |
193 | unsigned long *pids; | |
49090107 SRRH |
194 | }; |
195 | ||
bc0c38d1 SR |
196 | /* |
197 | * The trace array - an array of per-CPU trace arrays. This is the | |
198 | * highest level data structure that individual tracers deal with. | |
199 | * They have on/off state as well: | |
200 | */ | |
201 | struct trace_array { | |
ae63b31e | 202 | struct list_head list; |
277ba044 | 203 | char *name; |
12883efb SRRH |
204 | struct trace_buffer trace_buffer; |
205 | #ifdef CONFIG_TRACER_MAX_TRACE | |
206 | /* | |
207 | * The max_buffer is used to snapshot the trace when a maximum | |
208 | * latency is reached, or when the user initiates a snapshot. | |
209 | * Some tracers will use this to store a maximum trace while | |
210 | * it continues examining live traces. | |
211 | * | |
212 | * The buffers for the max_buffer are set up the same as the trace_buffer | |
213 | * When a snapshot is taken, the buffer of the max_buffer is swapped | |
214 | * with the buffer of the trace_buffer and the buffers are reset for | |
215 | * the trace_buffer so the tracing can continue. | |
216 | */ | |
217 | struct trace_buffer max_buffer; | |
45ad21ca | 218 | bool allocated_snapshot; |
f971cc9a SRRH |
219 | #endif |
220 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) | |
6d9b3fa5 | 221 | unsigned long max_latency; |
12883efb | 222 | #endif |
49090107 | 223 | struct trace_pid_list __rcu *filtered_pids; |
0b9b12c1 SRRH |
224 | /* |
225 | * max_lock is used to protect the swapping of buffers | |
226 | * when taking a max snapshot. The buffers themselves are | |
227 | * protected by per_cpu spinlocks. But the action of the swap | |
228 | * needs its own lock. | |
229 | * | |
230 | * This is defined as a arch_spinlock_t in order to help | |
231 | * with performance when lockdep debugging is enabled. | |
232 | * | |
233 | * It is also used in other places outside the update_max_tr | |
234 | * so it needs to be defined outside of the | |
235 | * CONFIG_TRACER_MAX_TRACE. | |
236 | */ | |
237 | arch_spinlock_t max_lock; | |
499e5470 | 238 | int buffer_disabled; |
12ab74ee SR |
239 | #ifdef CONFIG_FTRACE_SYSCALLS |
240 | int sys_refcount_enter; | |
241 | int sys_refcount_exit; | |
7f1d2f82 SRRH |
242 | struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; |
243 | struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; | |
12ab74ee | 244 | #endif |
2b6080f2 SR |
245 | int stop_count; |
246 | int clock_id; | |
37aea98b | 247 | int nr_topts; |
2b6080f2 | 248 | struct tracer *current_trace; |
983f938a | 249 | unsigned int trace_flags; |
9a38a885 | 250 | unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; |
ae63b31e | 251 | unsigned int flags; |
2b6080f2 | 252 | raw_spinlock_t start_lock; |
ae63b31e | 253 | struct dentry *dir; |
2b6080f2 SR |
254 | struct dentry *options; |
255 | struct dentry *percpu_dir; | |
ae63b31e | 256 | struct dentry *event_dir; |
37aea98b | 257 | struct trace_options *topts; |
ae63b31e SR |
258 | struct list_head systems; |
259 | struct list_head events; | |
ccfe9e42 | 260 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
a695cb58 | 261 | int ref; |
f20a5806 SRRH |
262 | #ifdef CONFIG_FUNCTION_TRACER |
263 | struct ftrace_ops *ops; | |
345ddcc8 | 264 | struct trace_pid_list __rcu *function_pids; |
f20a5806 SRRH |
265 | /* function tracing enabled */ |
266 | int function_enabled; | |
267 | #endif | |
bc0c38d1 SR |
268 | }; |
269 | ||
ae63b31e SR |
270 | enum { |
271 | TRACE_ARRAY_FL_GLOBAL = (1 << 0) | |
272 | }; | |
273 | ||
274 | extern struct list_head ftrace_trace_arrays; | |
275 | ||
a8227415 AL |
276 | extern struct mutex trace_types_lock; |
277 | ||
8e2e2fa4 SRRH |
278 | extern int trace_array_get(struct trace_array *tr); |
279 | extern void trace_array_put(struct trace_array *tr); | |
280 | ||
ae63b31e SR |
281 | /* |
282 | * The global tracer (top) should be the first trace array added, | |
283 | * but we check the flag anyway. | |
284 | */ | |
285 | static inline struct trace_array *top_trace_array(void) | |
286 | { | |
287 | struct trace_array *tr; | |
288 | ||
da9c3413 | 289 | if (list_empty(&ftrace_trace_arrays)) |
dc81e5e3 YY |
290 | return NULL; |
291 | ||
ae63b31e SR |
292 | tr = list_entry(ftrace_trace_arrays.prev, |
293 | typeof(*tr), list); | |
294 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); | |
295 | return tr; | |
296 | } | |
297 | ||
7104f300 SR |
298 | #define FTRACE_CMP_TYPE(var, type) \ |
299 | __builtin_types_compatible_p(typeof(var), type *) | |
300 | ||
301 | #undef IF_ASSIGN | |
302 | #define IF_ASSIGN(var, entry, etype, id) \ | |
303 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
304 | var = (typeof(var))(entry); \ | |
305 | WARN_ON(id && (entry)->type != id); \ | |
306 | break; \ | |
307 | } | |
308 | ||
309 | /* Will cause compile errors if type is not found. */ | |
310 | extern void __ftrace_bad_type(void); | |
311 | ||
312 | /* | |
313 | * The trace_assign_type is a verifier that the entry type is | |
314 | * the same as the type being assigned. To add new types simply | |
315 | * add a line with the following format: | |
316 | * | |
317 | * IF_ASSIGN(var, ent, type, id); | |
318 | * | |
319 | * Where "type" is the trace type that includes the trace_entry | |
320 | * as the "ent" item. And "id" is the trace identifier that is | |
321 | * used in the trace_type enum. | |
322 | * | |
323 | * If the type can have more than one id, then use zero. | |
324 | */ | |
325 | #define trace_assign_type(var, ent) \ | |
326 | do { \ | |
327 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
328 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
7104f300 | 329 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
02b67518 | 330 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300 | 331 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
48ead020 | 332 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
09ae7234 | 333 | IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ |
e7c15cd8 | 334 | IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ |
fa32e855 | 335 | IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ |
7104f300 SR |
336 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
337 | TRACE_MMIO_RW); \ | |
338 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
339 | TRACE_MMIO_MAP); \ | |
9f029e83 | 340 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68 FW |
341 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
342 | TRACE_GRAPH_ENT); \ | |
343 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | |
344 | TRACE_GRAPH_RET); \ | |
7104f300 SR |
345 | __ftrace_bad_type(); \ |
346 | } while (0) | |
2c4f035f | 347 | |
adf9f195 FW |
348 | /* |
349 | * An option specific to a tracer. This is a boolean value. | |
350 | * The bit is the bit index that sets its value on the | |
351 | * flags value in struct tracer_flags. | |
352 | */ | |
353 | struct tracer_opt { | |
9de36825 IM |
354 | const char *name; /* Will appear on the trace_options file */ |
355 | u32 bit; /* Mask assigned in val field in tracer_flags */ | |
adf9f195 FW |
356 | }; |
357 | ||
358 | /* | |
359 | * The set of specific options for a tracer. Your tracer | |
360 | * have to set the initial value of the flags val. | |
361 | */ | |
362 | struct tracer_flags { | |
363 | u32 val; | |
9de36825 | 364 | struct tracer_opt *opts; |
d39cdd20 | 365 | struct tracer *trace; |
adf9f195 FW |
366 | }; |
367 | ||
368 | /* Makes more easy to define a tracer opt */ | |
369 | #define TRACER_OPT(s, b) .name = #s, .bit = b | |
370 | ||
034939b6 | 371 | |
41d9c0be SRRH |
372 | struct trace_option_dentry { |
373 | struct tracer_opt *opt; | |
374 | struct tracer_flags *flags; | |
375 | struct trace_array *tr; | |
376 | struct dentry *entry; | |
377 | }; | |
378 | ||
6eaaa5d5 | 379 | /** |
8434dc93 | 380 | * struct tracer - a specific tracer and its callbacks to interact with tracefs |
6eaaa5d5 FW |
381 | * @name: the name chosen to select it on the available_tracers file |
382 | * @init: called when one switches to this tracer (echo name > current_tracer) | |
383 | * @reset: called when one switches to another tracer | |
05a724bd CH |
384 | * @start: called when tracing is unpaused (echo 1 > tracing_on) |
385 | * @stop: called when tracing is paused (echo 0 > tracing_on) | |
6508fa76 | 386 | * @update_thresh: called when tracing_thresh is updated |
6eaaa5d5 FW |
387 | * @open: called when the trace file is opened |
388 | * @pipe_open: called when the trace_pipe file is opened | |
6eaaa5d5 | 389 | * @close: called when the trace file is released |
c521efd1 | 390 | * @pipe_close: called when the trace_pipe file is released |
6eaaa5d5 FW |
391 | * @read: override the default read callback on trace_pipe |
392 | * @splice_read: override the default splice_read callback on trace_pipe | |
393 | * @selftest: selftest to run on boot (see trace_selftest.c) | |
394 | * @print_headers: override the first lines that describe your columns | |
395 | * @print_line: callback that prints a trace | |
396 | * @set_flag: signals one of your private flags changed (trace_options file) | |
397 | * @flags: your private flags | |
bc0c38d1 SR |
398 | */ |
399 | struct tracer { | |
400 | const char *name; | |
1c80025a | 401 | int (*init)(struct trace_array *tr); |
bc0c38d1 | 402 | void (*reset)(struct trace_array *tr); |
9036990d SR |
403 | void (*start)(struct trace_array *tr); |
404 | void (*stop)(struct trace_array *tr); | |
6508fa76 | 405 | int (*update_thresh)(struct trace_array *tr); |
bc0c38d1 | 406 | void (*open)(struct trace_iterator *iter); |
107bad8b | 407 | void (*pipe_open)(struct trace_iterator *iter); |
bc0c38d1 | 408 | void (*close)(struct trace_iterator *iter); |
c521efd1 | 409 | void (*pipe_close)(struct trace_iterator *iter); |
107bad8b SR |
410 | ssize_t (*read)(struct trace_iterator *iter, |
411 | struct file *filp, char __user *ubuf, | |
412 | size_t cnt, loff_t *ppos); | |
3c56819b EGM |
413 | ssize_t (*splice_read)(struct trace_iterator *iter, |
414 | struct file *filp, | |
415 | loff_t *ppos, | |
416 | struct pipe_inode_info *pipe, | |
417 | size_t len, | |
418 | unsigned int flags); | |
60a11774 SR |
419 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
420 | int (*selftest)(struct tracer *trace, | |
421 | struct trace_array *tr); | |
422 | #endif | |
8bba1bf5 | 423 | void (*print_header)(struct seq_file *m); |
2c4f035f | 424 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f195 | 425 | /* If you handled the flag setting, return 0 */ |
8c1a49ae SRRH |
426 | int (*set_flag)(struct trace_array *tr, |
427 | u32 old_flags, u32 bit, int set); | |
613f04a0 | 428 | /* Return 0 if OK with change, else return non-zero */ |
bf6065b5 | 429 | int (*flag_changed)(struct trace_array *tr, |
613f04a0 | 430 | u32 mask, int set); |
bc0c38d1 | 431 | struct tracer *next; |
9de36825 | 432 | struct tracer_flags *flags; |
50512ab5 | 433 | int enabled; |
cf6ab6d9 | 434 | int ref; |
f43c738b | 435 | bool print_max; |
607e2ea1 | 436 | bool allow_instances; |
12883efb | 437 | #ifdef CONFIG_TRACER_MAX_TRACE |
f43c738b | 438 | bool use_max_tr; |
12883efb | 439 | #endif |
bc0c38d1 SR |
440 | }; |
441 | ||
f9520750 | 442 | |
e4a3f541 | 443 | /* Only current can touch trace_recursion */ |
e4a3f541 | 444 | |
edc15caf SR |
445 | /* |
446 | * For function tracing recursion: | |
447 | * The order of these bits are important. | |
448 | * | |
449 | * When function tracing occurs, the following steps are made: | |
450 | * If arch does not support a ftrace feature: | |
451 | * call internal function (uses INTERNAL bits) which calls... | |
452 | * If callback is registered to the "global" list, the list | |
453 | * function is called and recursion checks the GLOBAL bits. | |
454 | * then this function calls... | |
455 | * The function callback, which can use the FTRACE bits to | |
456 | * check for recursion. | |
457 | * | |
458 | * Now if the arch does not suppport a feature, and it calls | |
459 | * the global list function which calls the ftrace callback | |
460 | * all three of these steps will do a recursion protection. | |
461 | * There's no reason to do one if the previous caller already | |
462 | * did. The recursion that we are protecting against will | |
463 | * go through the same steps again. | |
464 | * | |
465 | * To prevent the multiple recursion checks, if a recursion | |
466 | * bit is set that is higher than the MAX bit of the current | |
467 | * check, then we know that the check was made by the previous | |
468 | * caller, and we can skip the current check. | |
469 | */ | |
e46cbf75 | 470 | enum { |
567cd4da SR |
471 | TRACE_BUFFER_BIT, |
472 | TRACE_BUFFER_NMI_BIT, | |
473 | TRACE_BUFFER_IRQ_BIT, | |
474 | TRACE_BUFFER_SIRQ_BIT, | |
475 | ||
476 | /* Start of function recursion bits */ | |
477 | TRACE_FTRACE_BIT, | |
edc15caf SR |
478 | TRACE_FTRACE_NMI_BIT, |
479 | TRACE_FTRACE_IRQ_BIT, | |
480 | TRACE_FTRACE_SIRQ_BIT, | |
e46cbf75 | 481 | |
4104d326 | 482 | /* INTERNAL_BITs must be greater than FTRACE_BITs */ |
edc15caf SR |
483 | TRACE_INTERNAL_BIT, |
484 | TRACE_INTERNAL_NMI_BIT, | |
485 | TRACE_INTERNAL_IRQ_BIT, | |
486 | TRACE_INTERNAL_SIRQ_BIT, | |
487 | ||
6224beb1 | 488 | TRACE_BRANCH_BIT, |
e4a3f541 SR |
489 | /* |
490 | * Abuse of the trace_recursion. | |
491 | * As we need a way to maintain state if we are tracing the function | |
492 | * graph in irq because we want to trace a particular function that | |
493 | * was called in irq context but we have irq tracing off. Since this | |
494 | * can only be modified by current, we can reuse trace_recursion. | |
495 | */ | |
e46cbf75 SR |
496 | TRACE_IRQ_BIT, |
497 | }; | |
e4a3f541 | 498 | |
e46cbf75 SR |
499 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) |
500 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) | |
501 | #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) | |
e4a3f541 | 502 | |
edc15caf SR |
503 | #define TRACE_CONTEXT_BITS 4 |
504 | ||
505 | #define TRACE_FTRACE_START TRACE_FTRACE_BIT | |
506 | #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) | |
507 | ||
edc15caf SR |
508 | #define TRACE_LIST_START TRACE_INTERNAL_BIT |
509 | #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) | |
510 | ||
511 | #define TRACE_CONTEXT_MASK TRACE_LIST_MAX | |
512 | ||
513 | static __always_inline int trace_get_context_bit(void) | |
514 | { | |
515 | int bit; | |
516 | ||
517 | if (in_interrupt()) { | |
518 | if (in_nmi()) | |
519 | bit = 0; | |
520 | ||
521 | else if (in_irq()) | |
522 | bit = 1; | |
523 | else | |
524 | bit = 2; | |
525 | } else | |
526 | bit = 3; | |
527 | ||
528 | return bit; | |
529 | } | |
530 | ||
531 | static __always_inline int trace_test_and_set_recursion(int start, int max) | |
532 | { | |
533 | unsigned int val = current->trace_recursion; | |
534 | int bit; | |
535 | ||
536 | /* A previous recursion check was made */ | |
537 | if ((val & TRACE_CONTEXT_MASK) > max) | |
538 | return 0; | |
539 | ||
540 | bit = trace_get_context_bit() + start; | |
541 | if (unlikely(val & (1 << bit))) | |
542 | return -1; | |
543 | ||
544 | val |= 1 << bit; | |
545 | current->trace_recursion = val; | |
546 | barrier(); | |
547 | ||
548 | return bit; | |
549 | } | |
550 | ||
551 | static __always_inline void trace_clear_recursion(int bit) | |
552 | { | |
553 | unsigned int val = current->trace_recursion; | |
554 | ||
555 | if (!bit) | |
556 | return; | |
557 | ||
558 | bit = 1 << bit; | |
559 | val &= ~bit; | |
560 | ||
561 | barrier(); | |
562 | current->trace_recursion = val; | |
563 | } | |
564 | ||
6d158a81 SR |
565 | static inline struct ring_buffer_iter * |
566 | trace_buffer_iter(struct trace_iterator *iter, int cpu) | |
567 | { | |
568 | if (iter->buffer_iter && iter->buffer_iter[cpu]) | |
569 | return iter->buffer_iter[cpu]; | |
570 | return NULL; | |
571 | } | |
572 | ||
b6f11df2 | 573 | int tracer_init(struct tracer *t, struct trace_array *tr); |
9036990d | 574 | int tracing_is_enabled(void); |
12883efb SRRH |
575 | void tracing_reset(struct trace_buffer *buf, int cpu); |
576 | void tracing_reset_online_cpus(struct trace_buffer *buf); | |
9456f0fa | 577 | void tracing_reset_current(int cpu); |
873c642f | 578 | void tracing_reset_all_online_cpus(void); |
bc0c38d1 | 579 | int tracing_open_generic(struct inode *inode, struct file *filp); |
2e86421d | 580 | bool tracing_is_disabled(void); |
e7c15cd8 | 581 | int tracer_tracing_is_on(struct trace_array *tr); |
5452af66 | 582 | struct dentry *trace_create_file(const char *name, |
f4ae40a6 | 583 | umode_t mode, |
5452af66 FW |
584 | struct dentry *parent, |
585 | void *data, | |
586 | const struct file_operations *fops); | |
587 | ||
bc0c38d1 | 588 | struct dentry *tracing_init_dentry(void); |
d618b3e6 | 589 | |
51a763dd ACM |
590 | struct ring_buffer_event; |
591 | ||
e77405ad SR |
592 | struct ring_buffer_event * |
593 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | |
594 | int type, | |
595 | unsigned long len, | |
596 | unsigned long flags, | |
597 | int pc); | |
51a763dd | 598 | |
45dcd8b8 PP |
599 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
600 | struct trace_array_cpu *data); | |
c4a8e8be FW |
601 | |
602 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |
603 | int *ent_cpu, u64 *ent_ts); | |
604 | ||
52ffabe3 SRRH |
605 | void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, |
606 | struct ring_buffer_event *event); | |
7ffbd48d | 607 | |
955b61e5 JW |
608 | int trace_empty(struct trace_iterator *iter); |
609 | ||
610 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | |
611 | ||
612 | void trace_init_global_iter(struct trace_iterator *iter); | |
613 | ||
614 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | |
615 | ||
6fb44b71 | 616 | void trace_function(struct trace_array *tr, |
6fb44b71 SR |
617 | unsigned long ip, |
618 | unsigned long parent_ip, | |
38697053 | 619 | unsigned long flags, int pc); |
0a772620 JO |
620 | void trace_graph_function(struct trace_array *tr, |
621 | unsigned long ip, | |
622 | unsigned long parent_ip, | |
623 | unsigned long flags, int pc); | |
7e9a49ef | 624 | void trace_latency_header(struct seq_file *m); |
62b915f1 JO |
625 | void trace_default_header(struct seq_file *m); |
626 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); | |
627 | int trace_empty(struct trace_iterator *iter); | |
bc0c38d1 | 628 | |
287b6e68 | 629 | void trace_graph_return(struct ftrace_graph_ret *trace); |
e49dc19c | 630 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
1a0799a8 | 631 | void set_graph_array(struct trace_array *tr); |
1e9b51c2 | 632 | |
41bc8144 SR |
633 | void tracing_start_cmdline_record(void); |
634 | void tracing_stop_cmdline_record(void); | |
bc0c38d1 | 635 | int register_tracer(struct tracer *type); |
b5130b1e | 636 | int is_tracing_stopped(void); |
955b61e5 | 637 | |
098c879e SRRH |
638 | loff_t tracing_lseek(struct file *file, loff_t offset, int whence); |
639 | ||
955b61e5 JW |
640 | extern cpumask_var_t __read_mostly tracing_buffer_mask; |
641 | ||
642 | #define for_each_tracing_cpu(cpu) \ | |
643 | for_each_cpu(cpu, tracing_buffer_mask) | |
bc0c38d1 SR |
644 | |
645 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |
646 | ||
0e950173 TB |
647 | extern unsigned long tracing_thresh; |
648 | ||
4e267db1 | 649 | /* PID filtering */ |
76c813e2 SRRH |
650 | |
651 | extern int pid_max; | |
652 | ||
4e267db1 SR |
653 | bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, |
654 | pid_t search_pid); | |
655 | bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, | |
656 | struct task_struct *task); | |
657 | void trace_filter_add_remove_task(struct trace_pid_list *pid_list, | |
658 | struct task_struct *self, | |
659 | struct task_struct *task); | |
5cc8976b SRRH |
660 | void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); |
661 | void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); | |
662 | int trace_pid_show(struct seq_file *m, void *v); | |
76c813e2 SRRH |
663 | void trace_free_pid_list(struct trace_pid_list *pid_list); |
664 | int trace_pid_write(struct trace_pid_list *filtered_pids, | |
665 | struct trace_pid_list **new_pid_list, | |
666 | const char __user *ubuf, size_t cnt); | |
4e267db1 | 667 | |
5d4a9dba | 668 | #ifdef CONFIG_TRACER_MAX_TRACE |
bc0c38d1 SR |
669 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
670 | void update_max_tr_single(struct trace_array *tr, | |
671 | struct task_struct *tsk, int cpu); | |
5d4a9dba | 672 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
bc0c38d1 | 673 | |
c0a0d0d3 | 674 | #ifdef CONFIG_STACKTRACE |
e77405ad | 675 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
c0a0d0d3 FW |
676 | int pc); |
677 | ||
678 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |
679 | int pc); | |
680 | #else | |
e1f7992e | 681 | static inline void ftrace_trace_userstack(struct ring_buffer *buffer, |
c0a0d0d3 FW |
682 | unsigned long flags, int pc) |
683 | { | |
684 | } | |
685 | ||
686 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | |
687 | int skip, int pc) | |
688 | { | |
689 | } | |
690 | #endif /* CONFIG_STACKTRACE */ | |
53614991 | 691 | |
a5a1d1c2 | 692 | extern u64 ftrace_now(int cpu); |
bc0c38d1 | 693 | |
4ca53085 | 694 | extern void trace_find_cmdline(int pid, char comm[]); |
c37775d5 | 695 | extern void trace_event_follow_fork(struct trace_array *tr, bool enable); |
f7d48cbd | 696 | |
bc0c38d1 SR |
697 | #ifdef CONFIG_DYNAMIC_FTRACE |
698 | extern unsigned long ftrace_update_tot_cnt; | |
ad97772a | 699 | #endif |
d05cdb25 SR |
700 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
701 | extern int DYN_FTRACE_TEST_NAME(void); | |
95950c2e SR |
702 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 |
703 | extern int DYN_FTRACE_TEST_NAME2(void); | |
bc0c38d1 | 704 | |
55034cd6 | 705 | extern bool ring_buffer_expanded; |
020e5f85 LZ |
706 | extern bool tracing_selftest_disabled; |
707 | ||
60a11774 | 708 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60a11774 SR |
709 | extern int trace_selftest_startup_function(struct tracer *trace, |
710 | struct trace_array *tr); | |
7447dce9 FW |
711 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
712 | struct trace_array *tr); | |
60a11774 SR |
713 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
714 | struct trace_array *tr); | |
60a11774 SR |
715 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
716 | struct trace_array *tr); | |
60a11774 SR |
717 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
718 | struct trace_array *tr); | |
60a11774 SR |
719 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
720 | struct trace_array *tr); | |
fb1b6d8b SN |
721 | extern int trace_selftest_startup_nop(struct tracer *trace, |
722 | struct trace_array *tr); | |
60a11774 SR |
723 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
724 | struct trace_array *tr); | |
80e5ea45 SR |
725 | extern int trace_selftest_startup_branch(struct tracer *trace, |
726 | struct trace_array *tr); | |
8f768993 SRRH |
727 | /* |
728 | * Tracer data references selftest functions that only occur | |
729 | * on boot up. These can be __init functions. Thus, when selftests | |
730 | * are enabled, then the tracers need to reference __init functions. | |
731 | */ | |
732 | #define __tracer_data __refdata | |
733 | #else | |
734 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ | |
735 | #define __tracer_data __read_mostly | |
60a11774 SR |
736 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
737 | ||
c7aafc54 | 738 | extern void *head_page(struct trace_array_cpu *data); |
a5a1d1c2 | 739 | extern unsigned long long ns2usecs(u64 nsec); |
1fd8f2a3 | 740 | extern int |
40ce74f1 | 741 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
48ead020 | 742 | extern int |
40ce74f1 | 743 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
659372d3 SR |
744 | extern int |
745 | trace_array_vprintk(struct trace_array *tr, | |
746 | unsigned long ip, const char *fmt, va_list args); | |
747 | int trace_array_printk(struct trace_array *tr, | |
748 | unsigned long ip, const char *fmt, ...); | |
12883efb SRRH |
749 | int trace_array_printk_buf(struct ring_buffer *buffer, |
750 | unsigned long ip, const char *fmt, ...); | |
955b61e5 JW |
751 | void trace_printk_seq(struct trace_seq *s); |
752 | enum print_line_t print_trace_line(struct trace_iterator *iter); | |
c7aafc54 | 753 | |
8e1e1df2 BP |
754 | extern char trace_find_mark(unsigned long long duration); |
755 | ||
15e6cb36 | 756 | /* Standard output formatting function used for function return traces */ |
fb52607a | 757 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
62b915f1 JO |
758 | |
759 | /* Flag options */ | |
760 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | |
761 | #define TRACE_GRAPH_PRINT_CPU 0x2 | |
762 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | |
763 | #define TRACE_GRAPH_PRINT_PROC 0x8 | |
764 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | |
765 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | |
ccdb5946 | 766 | #define TRACE_GRAPH_PRINT_IRQS 0x40 |
607e3a29 | 767 | #define TRACE_GRAPH_PRINT_TAIL 0x80 |
55577204 SRRH |
768 | #define TRACE_GRAPH_SLEEP_TIME 0x100 |
769 | #define TRACE_GRAPH_GRAPH_TIME 0x200 | |
6fc84ea7 SRRH |
770 | #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 |
771 | #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) | |
62b915f1 | 772 | |
55577204 SRRH |
773 | extern void ftrace_graph_sleep_time_control(bool enable); |
774 | extern void ftrace_graph_graph_time_control(bool enable); | |
775 | ||
d7a8d9e9 JO |
776 | extern enum print_line_t |
777 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | |
778 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | |
9d9add34 | 779 | extern void |
0706f1c4 | 780 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
62b915f1 JO |
781 | extern void graph_trace_open(struct trace_iterator *iter); |
782 | extern void graph_trace_close(struct trace_iterator *iter); | |
783 | extern int __trace_graph_entry(struct trace_array *tr, | |
784 | struct ftrace_graph_ent *trace, | |
785 | unsigned long flags, int pc); | |
786 | extern void __trace_graph_return(struct trace_array *tr, | |
787 | struct ftrace_graph_ret *trace, | |
788 | unsigned long flags, int pc); | |
789 | ||
ea4e2bc4 SR |
790 | |
791 | #ifdef CONFIG_DYNAMIC_FTRACE | |
792 | /* TODO: make this variable */ | |
793 | #define FTRACE_GRAPH_MAX_FUNCS 32 | |
794 | extern int ftrace_graph_count; | |
795 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | |
29ad23b0 NK |
796 | extern int ftrace_graph_notrace_count; |
797 | extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; | |
ea4e2bc4 SR |
798 | |
799 | static inline int ftrace_graph_addr(unsigned long addr) | |
800 | { | |
801 | int i; | |
802 | ||
9aa72b4b | 803 | if (!ftrace_graph_count) |
ea4e2bc4 SR |
804 | return 1; |
805 | ||
806 | for (i = 0; i < ftrace_graph_count; i++) { | |
e4a3f541 SR |
807 | if (addr == ftrace_graph_funcs[i]) { |
808 | /* | |
809 | * If no irqs are to be traced, but a set_graph_function | |
810 | * is set, and called by an interrupt handler, we still | |
811 | * want to trace it. | |
812 | */ | |
813 | if (in_irq()) | |
814 | trace_recursion_set(TRACE_IRQ_BIT); | |
815 | else | |
816 | trace_recursion_clear(TRACE_IRQ_BIT); | |
ea4e2bc4 | 817 | return 1; |
e4a3f541 | 818 | } |
ea4e2bc4 SR |
819 | } |
820 | ||
821 | return 0; | |
822 | } | |
29ad23b0 NK |
823 | |
824 | static inline int ftrace_graph_notrace_addr(unsigned long addr) | |
825 | { | |
826 | int i; | |
827 | ||
828 | if (!ftrace_graph_notrace_count) | |
829 | return 0; | |
830 | ||
831 | for (i = 0; i < ftrace_graph_notrace_count; i++) { | |
832 | if (addr == ftrace_graph_notrace_funcs[i]) | |
833 | return 1; | |
834 | } | |
835 | ||
836 | return 0; | |
837 | } | |
15e6cb36 | 838 | #else |
6b253930 IM |
839 | static inline int ftrace_graph_addr(unsigned long addr) |
840 | { | |
841 | return 1; | |
ea4e2bc4 | 842 | } |
29ad23b0 NK |
843 | |
844 | static inline int ftrace_graph_notrace_addr(unsigned long addr) | |
845 | { | |
846 | return 0; | |
847 | } | |
ea4e2bc4 | 848 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1a414428 SRRH |
849 | |
850 | extern unsigned int fgraph_max_depth; | |
851 | ||
852 | static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) | |
853 | { | |
854 | /* trace it when it is-nested-in or is a function enabled. */ | |
855 | return !(trace->depth || ftrace_graph_addr(trace->func)) || | |
856 | (trace->depth < 0) || | |
857 | (fgraph_max_depth && trace->depth >= fgraph_max_depth); | |
858 | } | |
859 | ||
ea4e2bc4 | 860 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 861 | static inline enum print_line_t |
d7a8d9e9 | 862 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
15e6cb36 FW |
863 | { |
864 | return TRACE_TYPE_UNHANDLED; | |
865 | } | |
ea4e2bc4 | 866 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 867 | |
756d17ee | 868 | extern struct list_head ftrace_pids; |
804a6851 | 869 | |
1155de47 | 870 | #ifdef CONFIG_FUNCTION_TRACER |
f1ed7c74 | 871 | extern bool ftrace_filter_param __initdata; |
345ddcc8 | 872 | static inline int ftrace_trace_task(struct trace_array *tr) |
804a6851 | 873 | { |
345ddcc8 | 874 | return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); |
804a6851 | 875 | } |
e0a413f6 | 876 | extern int ftrace_is_dead(void); |
591dffda SRRH |
877 | int ftrace_create_function_files(struct trace_array *tr, |
878 | struct dentry *parent); | |
879 | void ftrace_destroy_function_files(struct trace_array *tr); | |
4104d326 SRRH |
880 | void ftrace_init_global_array_ops(struct trace_array *tr); |
881 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); | |
882 | void ftrace_reset_array_ops(struct trace_array *tr); | |
7eea4fce | 883 | int using_ftrace_ops_list_func(void); |
345ddcc8 | 884 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
501c2375 SRRH |
885 | void ftrace_init_tracefs_toplevel(struct trace_array *tr, |
886 | struct dentry *d_tracer); | |
1155de47 | 887 | #else |
345ddcc8 | 888 | static inline int ftrace_trace_task(struct trace_array *tr) |
1155de47 PM |
889 | { |
890 | return 1; | |
891 | } | |
e0a413f6 | 892 | static inline int ftrace_is_dead(void) { return 0; } |
591dffda SRRH |
893 | static inline int |
894 | ftrace_create_function_files(struct trace_array *tr, | |
895 | struct dentry *parent) | |
896 | { | |
897 | return 0; | |
898 | } | |
899 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } | |
4104d326 SRRH |
900 | static inline __init void |
901 | ftrace_init_global_array_ops(struct trace_array *tr) { } | |
902 | static inline void ftrace_reset_array_ops(struct trace_array *tr) { } | |
345ddcc8 | 903 | static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } |
501c2375 | 904 | static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } |
4104d326 SRRH |
905 | /* ftace_func_t type is not defined, use macro instead of static inline */ |
906 | #define ftrace_init_array_ops(tr, func) do { } while (0) | |
591dffda SRRH |
907 | #endif /* CONFIG_FUNCTION_TRACER */ |
908 | ||
909 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | |
910 | void ftrace_create_filter_files(struct ftrace_ops *ops, | |
911 | struct dentry *parent); | |
912 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); | |
913 | #else | |
914 | /* | |
915 | * The ops parameter passed in is usually undefined. | |
916 | * This must be a macro. | |
917 | */ | |
918 | #define ftrace_create_filter_files(ops, parent) do { } while (0) | |
919 | #define ftrace_destroy_filter_files(ops) do { } while (0) | |
920 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ | |
804a6851 | 921 | |
c6650b2e | 922 | bool ftrace_event_is_function(struct trace_event_call *call); |
ced39002 | 923 | |
b63f39ea | 924 | /* |
925 | * struct trace_parser - servers for reading the user input separated by spaces | |
926 | * @cont: set if the input is not complete - no final space char was found | |
927 | * @buffer: holds the parsed user input | |
1537a363 | 928 | * @idx: user input length |
b63f39ea | 929 | * @size: buffer size |
930 | */ | |
931 | struct trace_parser { | |
932 | bool cont; | |
933 | char *buffer; | |
934 | unsigned idx; | |
935 | unsigned size; | |
936 | }; | |
937 | ||
938 | static inline bool trace_parser_loaded(struct trace_parser *parser) | |
939 | { | |
940 | return (parser->idx != 0); | |
941 | } | |
942 | ||
943 | static inline bool trace_parser_cont(struct trace_parser *parser) | |
944 | { | |
945 | return parser->cont; | |
946 | } | |
947 | ||
948 | static inline void trace_parser_clear(struct trace_parser *parser) | |
949 | { | |
950 | parser->cont = false; | |
951 | parser->idx = 0; | |
952 | } | |
953 | ||
954 | extern int trace_parser_get_init(struct trace_parser *parser, int size); | |
955 | extern void trace_parser_put(struct trace_parser *parser); | |
956 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |
957 | size_t cnt, loff_t *ppos); | |
958 | ||
729358da SRRH |
959 | /* |
960 | * Only create function graph options if function graph is configured. | |
961 | */ | |
962 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
963 | # define FGRAPH_FLAGS \ | |
729358da | 964 | C(DISPLAY_GRAPH, "display-graph"), |
729358da SRRH |
965 | #else |
966 | # define FGRAPH_FLAGS | |
729358da SRRH |
967 | #endif |
968 | ||
4ee4301c SRRH |
969 | #ifdef CONFIG_BRANCH_TRACER |
970 | # define BRANCH_FLAGS \ | |
971 | C(BRANCH, "branch"), | |
972 | #else | |
973 | # define BRANCH_FLAGS | |
974 | #endif | |
975 | ||
8179e8a1 SRRH |
976 | #ifdef CONFIG_FUNCTION_TRACER |
977 | # define FUNCTION_FLAGS \ | |
978 | C(FUNCTION, "function-trace"), | |
979 | # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION | |
980 | #else | |
981 | # define FUNCTION_FLAGS | |
982 | # define FUNCTION_DEFAULT_FLAGS 0UL | |
983 | #endif | |
984 | ||
73dddbb5 SRRH |
985 | #ifdef CONFIG_STACKTRACE |
986 | # define STACK_FLAGS \ | |
987 | C(STACKTRACE, "stacktrace"), | |
988 | #else | |
989 | # define STACK_FLAGS | |
990 | #endif | |
991 | ||
4fcdae83 SR |
992 | /* |
993 | * trace_iterator_flags is an enumeration that defines bit | |
994 | * positions into trace_flags that controls the output. | |
995 | * | |
996 | * NOTE: These bits must match the trace_options array in | |
a3418a36 | 997 | * trace.c (this macro guarantees it). |
4fcdae83 | 998 | */ |
a3418a36 SRRH |
999 | #define TRACE_FLAGS \ |
1000 | C(PRINT_PARENT, "print-parent"), \ | |
1001 | C(SYM_OFFSET, "sym-offset"), \ | |
1002 | C(SYM_ADDR, "sym-addr"), \ | |
1003 | C(VERBOSE, "verbose"), \ | |
1004 | C(RAW, "raw"), \ | |
1005 | C(HEX, "hex"), \ | |
1006 | C(BIN, "bin"), \ | |
1007 | C(BLOCK, "block"), \ | |
a3418a36 | 1008 | C(PRINTK, "trace_printk"), \ |
a3418a36 SRRH |
1009 | C(ANNOTATE, "annotate"), \ |
1010 | C(USERSTACKTRACE, "userstacktrace"), \ | |
1011 | C(SYM_USEROBJ, "sym-userobj"), \ | |
1012 | C(PRINTK_MSGONLY, "printk-msg-only"), \ | |
1013 | C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ | |
1014 | C(LATENCY_FMT, "latency-format"), \ | |
a3418a36 SRRH |
1015 | C(RECORD_CMD, "record-cmd"), \ |
1016 | C(OVERWRITE, "overwrite"), \ | |
1017 | C(STOP_ON_FREE, "disable_on_free"), \ | |
1018 | C(IRQ_INFO, "irq-info"), \ | |
1019 | C(MARKERS, "markers"), \ | |
c37775d5 | 1020 | C(EVENT_FORK, "event-fork"), \ |
8179e8a1 | 1021 | FUNCTION_FLAGS \ |
4ee4301c | 1022 | FGRAPH_FLAGS \ |
73dddbb5 | 1023 | STACK_FLAGS \ |
4ee4301c | 1024 | BRANCH_FLAGS |
ce3fed62 | 1025 | |
a3418a36 SRRH |
1026 | /* |
1027 | * By defining C, we can make TRACE_FLAGS a list of bit names | |
1028 | * that will define the bits for the flag masks. | |
1029 | */ | |
1030 | #undef C | |
1031 | #define C(a, b) TRACE_ITER_##a##_BIT | |
1032 | ||
b5e87c05 SRRH |
1033 | enum trace_iterator_bits { |
1034 | TRACE_FLAGS | |
1035 | /* Make sure we don't go more than we have bits for */ | |
1036 | TRACE_ITER_LAST_BIT | |
1037 | }; | |
a3418a36 SRRH |
1038 | |
1039 | /* | |
1040 | * By redefining C, we can make TRACE_FLAGS a list of masks that | |
1041 | * use the bits as defined above. | |
1042 | */ | |
1043 | #undef C | |
1044 | #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) | |
1045 | ||
1046 | enum trace_iterator_flags { TRACE_FLAGS }; | |
4e655519 | 1047 | |
15e6cb36 FW |
1048 | /* |
1049 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | |
1050 | * control the output of kernel symbols. | |
1051 | */ | |
1052 | #define TRACE_ITER_SYM_MASK \ | |
1053 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | |
1054 | ||
43a15386 FW |
1055 | extern struct tracer nop_trace; |
1056 | ||
2ed84eeb | 1057 | #ifdef CONFIG_BRANCH_TRACER |
9f029e83 SR |
1058 | extern int enable_branch_tracing(struct trace_array *tr); |
1059 | extern void disable_branch_tracing(void); | |
1060 | static inline int trace_branch_enable(struct trace_array *tr) | |
52f232cb | 1061 | { |
983f938a | 1062 | if (tr->trace_flags & TRACE_ITER_BRANCH) |
9f029e83 | 1063 | return enable_branch_tracing(tr); |
52f232cb SR |
1064 | return 0; |
1065 | } | |
9f029e83 | 1066 | static inline void trace_branch_disable(void) |
52f232cb SR |
1067 | { |
1068 | /* due to races, always disable */ | |
9f029e83 | 1069 | disable_branch_tracing(); |
52f232cb SR |
1070 | } |
1071 | #else | |
9f029e83 | 1072 | static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb SR |
1073 | { |
1074 | return 0; | |
1075 | } | |
9f029e83 | 1076 | static inline void trace_branch_disable(void) |
52f232cb SR |
1077 | { |
1078 | } | |
2ed84eeb | 1079 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 1080 | |
1852fcce SR |
1081 | /* set ring buffers to default size if not already done so */ |
1082 | int tracing_update_buffers(void); | |
1083 | ||
cf027f64 TZ |
1084 | struct ftrace_event_field { |
1085 | struct list_head link; | |
92edca07 SR |
1086 | const char *name; |
1087 | const char *type; | |
aa38e9fc | 1088 | int filter_type; |
cf027f64 TZ |
1089 | int offset; |
1090 | int size; | |
a118e4d1 | 1091 | int is_signed; |
cf027f64 TZ |
1092 | }; |
1093 | ||
30e673b2 | 1094 | struct event_filter { |
c9c53ca0 SR |
1095 | int n_preds; /* Number assigned */ |
1096 | int a_preds; /* allocated */ | |
74e9e58c | 1097 | struct filter_pred *preds; |
61e9dea2 | 1098 | struct filter_pred *root; |
8b372562 | 1099 | char *filter_string; |
30e673b2 TZ |
1100 | }; |
1101 | ||
cfb180f3 TZ |
1102 | struct event_subsystem { |
1103 | struct list_head list; | |
1104 | const char *name; | |
1f9963cb | 1105 | struct event_filter *filter; |
e9dbfae5 | 1106 | int ref_count; |
cfb180f3 TZ |
1107 | }; |
1108 | ||
7967b3e0 | 1109 | struct trace_subsystem_dir { |
ae63b31e SR |
1110 | struct list_head list; |
1111 | struct event_subsystem *subsystem; | |
1112 | struct trace_array *tr; | |
1113 | struct dentry *entry; | |
1114 | int ref_count; | |
1115 | int nr_events; | |
1116 | }; | |
1117 | ||
65da9a0a SRRH |
1118 | extern int call_filter_check_discard(struct trace_event_call *call, void *rec, |
1119 | struct ring_buffer *buffer, | |
1120 | struct ring_buffer_event *event); | |
fa66ddb8 | 1121 | |
fa66ddb8 SRRH |
1122 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
1123 | struct ring_buffer *buffer, | |
1124 | struct ring_buffer_event *event, | |
1125 | unsigned long flags, int pc, | |
1126 | struct pt_regs *regs); | |
33fddff2 SRRH |
1127 | |
1128 | static inline void trace_buffer_unlock_commit(struct trace_array *tr, | |
1129 | struct ring_buffer *buffer, | |
1130 | struct ring_buffer_event *event, | |
1131 | unsigned long flags, int pc) | |
1132 | { | |
1133 | trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); | |
1134 | } | |
1135 | ||
0fc1b09f SRRH |
1136 | DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); |
1137 | DECLARE_PER_CPU(int, trace_buffered_event_cnt); | |
1138 | void trace_buffered_event_disable(void); | |
1139 | void trace_buffered_event_enable(void); | |
1140 | ||
1141 | static inline void | |
1142 | __trace_event_discard_commit(struct ring_buffer *buffer, | |
1143 | struct ring_buffer_event *event) | |
1144 | { | |
1145 | if (this_cpu_read(trace_buffered_event) == event) { | |
1146 | /* Simply release the temp buffer */ | |
1147 | this_cpu_dec(trace_buffered_event_cnt); | |
1148 | return; | |
1149 | } | |
1150 | ring_buffer_discard_commit(buffer, event); | |
1151 | } | |
1152 | ||
dad56ee7 SRRH |
1153 | /* |
1154 | * Helper function for event_trigger_unlock_commit{_regs}(). | |
1155 | * If there are event triggers attached to this event that requires | |
1156 | * filtering against its fields, then they wil be called as the | |
1157 | * entry already holds the field information of the current event. | |
1158 | * | |
1159 | * It also checks if the event should be discarded or not. | |
1160 | * It is to be discarded if the event is soft disabled and the | |
1161 | * event was only recorded to process triggers, or if the event | |
1162 | * filter is active and this event did not match the filters. | |
1163 | * | |
1164 | * Returns true if the event is discarded, false otherwise. | |
1165 | */ | |
1166 | static inline bool | |
1167 | __event_trigger_test_discard(struct trace_event_file *file, | |
1168 | struct ring_buffer *buffer, | |
1169 | struct ring_buffer_event *event, | |
1170 | void *entry, | |
1171 | enum event_trigger_type *tt) | |
1172 | { | |
1173 | unsigned long eflags = file->flags; | |
1174 | ||
1175 | if (eflags & EVENT_FILE_FL_TRIGGER_COND) | |
1176 | *tt = event_triggers_call(file, entry); | |
1177 | ||
9cbb1506 SRRH |
1178 | if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || |
1179 | (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && | |
1180 | !filter_match_preds(file->filter, entry))) { | |
0fc1b09f | 1181 | __trace_event_discard_commit(buffer, event); |
9cbb1506 SRRH |
1182 | return true; |
1183 | } | |
dad56ee7 | 1184 | |
9cbb1506 | 1185 | return false; |
dad56ee7 SRRH |
1186 | } |
1187 | ||
1188 | /** | |
1189 | * event_trigger_unlock_commit - handle triggers and finish event commit | |
1190 | * @file: The file pointer assoctiated to the event | |
1191 | * @buffer: The ring buffer that the event is being written to | |
1192 | * @event: The event meta data in the ring buffer | |
1193 | * @entry: The event itself | |
1194 | * @irq_flags: The state of the interrupts at the start of the event | |
1195 | * @pc: The state of the preempt count at the start of the event. | |
1196 | * | |
1197 | * This is a helper function to handle triggers that require data | |
1198 | * from the event itself. It also tests the event against filters and | |
1199 | * if the event is soft disabled and should be discarded. | |
1200 | */ | |
1201 | static inline void | |
1202 | event_trigger_unlock_commit(struct trace_event_file *file, | |
1203 | struct ring_buffer *buffer, | |
1204 | struct ring_buffer_event *event, | |
1205 | void *entry, unsigned long irq_flags, int pc) | |
1206 | { | |
1207 | enum event_trigger_type tt = ETT_NONE; | |
1208 | ||
1209 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) | |
1210 | trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); | |
1211 | ||
1212 | if (tt) | |
1213 | event_triggers_post_call(file, tt, entry); | |
1214 | } | |
1215 | ||
1216 | /** | |
1217 | * event_trigger_unlock_commit_regs - handle triggers and finish event commit | |
1218 | * @file: The file pointer assoctiated to the event | |
1219 | * @buffer: The ring buffer that the event is being written to | |
1220 | * @event: The event meta data in the ring buffer | |
1221 | * @entry: The event itself | |
1222 | * @irq_flags: The state of the interrupts at the start of the event | |
1223 | * @pc: The state of the preempt count at the start of the event. | |
1224 | * | |
1225 | * This is a helper function to handle triggers that require data | |
1226 | * from the event itself. It also tests the event against filters and | |
1227 | * if the event is soft disabled and should be discarded. | |
1228 | * | |
1229 | * Same as event_trigger_unlock_commit() but calls | |
1230 | * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). | |
1231 | */ | |
1232 | static inline void | |
1233 | event_trigger_unlock_commit_regs(struct trace_event_file *file, | |
1234 | struct ring_buffer *buffer, | |
1235 | struct ring_buffer_event *event, | |
1236 | void *entry, unsigned long irq_flags, int pc, | |
1237 | struct pt_regs *regs) | |
1238 | { | |
1239 | enum event_trigger_type tt = ETT_NONE; | |
1240 | ||
1241 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) | |
1242 | trace_buffer_unlock_commit_regs(file->tr, buffer, event, | |
1243 | irq_flags, pc, regs); | |
1244 | ||
1245 | if (tt) | |
1246 | event_triggers_post_call(file, tt, entry); | |
1247 | } | |
1248 | ||
61e9dea2 SR |
1249 | #define FILTER_PRED_INVALID ((unsigned short)-1) |
1250 | #define FILTER_PRED_IS_RIGHT (1 << 15) | |
43cd4145 | 1251 | #define FILTER_PRED_FOLD (1 << 15) |
61e9dea2 | 1252 | |
bf93f9ed SR |
1253 | /* |
1254 | * The max preds is the size of unsigned short with | |
1255 | * two flags at the MSBs. One bit is used for both the IS_RIGHT | |
1256 | * and FOLD flags. The other is reserved. | |
1257 | * | |
1258 | * 2^14 preds is way more than enough. | |
1259 | */ | |
1260 | #define MAX_FILTER_PRED 16384 | |
4a3d27e9 | 1261 | |
7ce7e424 | 1262 | struct filter_pred; |
1889d209 | 1263 | struct regex; |
7ce7e424 | 1264 | |
58d9a597 | 1265 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); |
7ce7e424 | 1266 | |
1889d209 FW |
1267 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
1268 | ||
3f6fe06d | 1269 | enum regex_type { |
b0f1a59a | 1270 | MATCH_FULL = 0, |
3f6fe06d FW |
1271 | MATCH_FRONT_ONLY, |
1272 | MATCH_MIDDLE_ONLY, | |
1273 | MATCH_END_ONLY, | |
60f1d5e3 | 1274 | MATCH_GLOB, |
3f6fe06d FW |
1275 | }; |
1276 | ||
1889d209 FW |
1277 | struct regex { |
1278 | char pattern[MAX_FILTER_STR_VAL]; | |
1279 | int len; | |
1280 | int field_len; | |
1281 | regex_match_func match; | |
1282 | }; | |
1283 | ||
7ce7e424 | 1284 | struct filter_pred { |
1889d209 FW |
1285 | filter_pred_fn_t fn; |
1286 | u64 val; | |
1287 | struct regex regex; | |
61aaef55 | 1288 | unsigned short *ops; |
1d0e78e3 | 1289 | struct ftrace_event_field *field; |
1889d209 FW |
1290 | int offset; |
1291 | int not; | |
1292 | int op; | |
61e9dea2 SR |
1293 | unsigned short index; |
1294 | unsigned short parent; | |
1295 | unsigned short left; | |
1296 | unsigned short right; | |
7ce7e424 TZ |
1297 | }; |
1298 | ||
4ef56902 TZ |
1299 | static inline bool is_string_field(struct ftrace_event_field *field) |
1300 | { | |
1301 | return field->filter_type == FILTER_DYN_STRING || | |
1302 | field->filter_type == FILTER_STATIC_STRING || | |
1303 | field->filter_type == FILTER_PTR_STRING; | |
1304 | } | |
1305 | ||
1306 | static inline bool is_function_field(struct ftrace_event_field *field) | |
1307 | { | |
1308 | return field->filter_type == FILTER_TRACE_FN; | |
1309 | } | |
1310 | ||
3f6fe06d FW |
1311 | extern enum regex_type |
1312 | filter_parse_regex(char *buff, int len, char **search, int *not); | |
7f1d2f82 | 1313 | extern void print_event_filter(struct trace_event_file *file, |
4bda2d51 | 1314 | struct trace_seq *s); |
7f1d2f82 | 1315 | extern int apply_event_filter(struct trace_event_file *file, |
8b372562 | 1316 | char *filter_string); |
7967b3e0 | 1317 | extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, |
8b372562 TZ |
1318 | char *filter_string); |
1319 | extern void print_subsystem_event_filter(struct event_subsystem *system, | |
ac1adc55 | 1320 | struct trace_seq *s); |
aa38e9fc | 1321 | extern int filter_assign_type(const char *type); |
2425bcb9 | 1322 | extern int create_event_filter(struct trace_event_call *call, |
bac5fb97 TZ |
1323 | char *filter_str, bool set_str, |
1324 | struct event_filter **filterp); | |
1325 | extern void free_event_filter(struct event_filter *filter); | |
7ce7e424 | 1326 | |
b3a8c6fd | 1327 | struct ftrace_event_field * |
2425bcb9 | 1328 | trace_find_event_field(struct trace_event_call *call, char *name); |
2e33af02 | 1329 | |
e870e9a1 | 1330 | extern void trace_event_enable_cmd_record(bool enable); |
277ba044 | 1331 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
0c8916c3 | 1332 | extern int event_trace_del_tracer(struct trace_array *tr); |
e870e9a1 | 1333 | |
7f1d2f82 SRRH |
1334 | extern struct trace_event_file *find_event_file(struct trace_array *tr, |
1335 | const char *system, | |
1336 | const char *event); | |
7862ad18 | 1337 | |
85f2b082 TZ |
1338 | static inline void *event_file_data(struct file *filp) |
1339 | { | |
1340 | return ACCESS_ONCE(file_inode(filp)->i_private); | |
1341 | } | |
1342 | ||
20c8928a | 1343 | extern struct mutex event_mutex; |
a59fd602 | 1344 | extern struct list_head ftrace_events; |
ac199db0 | 1345 | |
85f2b082 | 1346 | extern const struct file_operations event_trigger_fops; |
7ef224d1 TZ |
1347 | extern const struct file_operations event_hist_fops; |
1348 | ||
1349 | #ifdef CONFIG_HIST_TRIGGERS | |
1350 | extern int register_trigger_hist_cmd(void); | |
d0bad49b | 1351 | extern int register_trigger_hist_enable_disable_cmds(void); |
7ef224d1 TZ |
1352 | #else |
1353 | static inline int register_trigger_hist_cmd(void) { return 0; } | |
d0bad49b | 1354 | static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } |
7ef224d1 | 1355 | #endif |
85f2b082 TZ |
1356 | |
1357 | extern int register_trigger_cmds(void); | |
1358 | extern void clear_event_triggers(struct trace_array *tr); | |
1359 | ||
1360 | struct event_trigger_data { | |
1361 | unsigned long count; | |
1362 | int ref; | |
1363 | struct event_trigger_ops *ops; | |
1364 | struct event_command *cmd_ops; | |
d8a30f20 | 1365 | struct event_filter __rcu *filter; |
85f2b082 TZ |
1366 | char *filter_str; |
1367 | void *private_data; | |
104f2810 | 1368 | bool paused; |
db1388b4 | 1369 | bool paused_tmp; |
85f2b082 | 1370 | struct list_head list; |
db1388b4 TZ |
1371 | char *name; |
1372 | struct list_head named_list; | |
1373 | struct event_trigger_data *named_data; | |
85f2b082 TZ |
1374 | }; |
1375 | ||
d0bad49b TZ |
1376 | /* Avoid typos */ |
1377 | #define ENABLE_EVENT_STR "enable_event" | |
1378 | #define DISABLE_EVENT_STR "disable_event" | |
1379 | #define ENABLE_HIST_STR "enable_hist" | |
1380 | #define DISABLE_HIST_STR "disable_hist" | |
1381 | ||
1382 | struct enable_trigger_data { | |
1383 | struct trace_event_file *file; | |
1384 | bool enable; | |
1385 | bool hist; | |
1386 | }; | |
1387 | ||
1388 | extern int event_enable_trigger_print(struct seq_file *m, | |
1389 | struct event_trigger_ops *ops, | |
1390 | struct event_trigger_data *data); | |
1391 | extern void event_enable_trigger_free(struct event_trigger_ops *ops, | |
1392 | struct event_trigger_data *data); | |
1393 | extern int event_enable_trigger_func(struct event_command *cmd_ops, | |
1394 | struct trace_event_file *file, | |
1395 | char *glob, char *cmd, char *param); | |
1396 | extern int event_enable_register_trigger(char *glob, | |
1397 | struct event_trigger_ops *ops, | |
1398 | struct event_trigger_data *data, | |
1399 | struct trace_event_file *file); | |
1400 | extern void event_enable_unregister_trigger(char *glob, | |
1401 | struct event_trigger_ops *ops, | |
1402 | struct event_trigger_data *test, | |
1403 | struct trace_event_file *file); | |
ab4bf008 TZ |
1404 | extern void trigger_data_free(struct event_trigger_data *data); |
1405 | extern int event_trigger_init(struct event_trigger_ops *ops, | |
1406 | struct event_trigger_data *data); | |
1407 | extern int trace_event_trigger_enable_disable(struct trace_event_file *file, | |
1408 | int trigger_enable); | |
1409 | extern void update_cond_flag(struct trace_event_file *file); | |
1410 | extern void unregister_trigger(char *glob, struct event_trigger_ops *ops, | |
1411 | struct event_trigger_data *test, | |
1412 | struct trace_event_file *file); | |
1413 | extern int set_trigger_filter(char *filter_str, | |
1414 | struct event_trigger_data *trigger_data, | |
1415 | struct trace_event_file *file); | |
db1388b4 TZ |
1416 | extern struct event_trigger_data *find_named_trigger(const char *name); |
1417 | extern bool is_named_trigger(struct event_trigger_data *test); | |
1418 | extern int save_named_trigger(const char *name, | |
1419 | struct event_trigger_data *data); | |
1420 | extern void del_named_trigger(struct event_trigger_data *data); | |
1421 | extern void pause_named_trigger(struct event_trigger_data *data); | |
1422 | extern void unpause_named_trigger(struct event_trigger_data *data); | |
1423 | extern void set_named_trigger_data(struct event_trigger_data *data, | |
1424 | struct event_trigger_data *named_data); | |
ab4bf008 | 1425 | extern int register_event_command(struct event_command *cmd); |
d0bad49b TZ |
1426 | extern int unregister_event_command(struct event_command *cmd); |
1427 | extern int register_trigger_hist_enable_disable_cmds(void); | |
ab4bf008 | 1428 | |
85f2b082 TZ |
1429 | /** |
1430 | * struct event_trigger_ops - callbacks for trace event triggers | |
1431 | * | |
1432 | * The methods in this structure provide per-event trigger hooks for | |
1433 | * various trigger operations. | |
1434 | * | |
1435 | * All the methods below, except for @init() and @free(), must be | |
1436 | * implemented. | |
1437 | * | |
1438 | * @func: The trigger 'probe' function called when the triggering | |
1439 | * event occurs. The data passed into this callback is the data | |
1440 | * that was supplied to the event_command @reg() function that | |
c4a59230 TZ |
1441 | * registered the trigger (see struct event_command) along with |
1442 | * the trace record, rec. | |
85f2b082 TZ |
1443 | * |
1444 | * @init: An optional initialization function called for the trigger | |
1445 | * when the trigger is registered (via the event_command reg() | |
1446 | * function). This can be used to perform per-trigger | |
1447 | * initialization such as incrementing a per-trigger reference | |
1448 | * count, for instance. This is usually implemented by the | |
1449 | * generic utility function @event_trigger_init() (see | |
1450 | * trace_event_triggers.c). | |
1451 | * | |
1452 | * @free: An optional de-initialization function called for the | |
1453 | * trigger when the trigger is unregistered (via the | |
1454 | * event_command @reg() function). This can be used to perform | |
1455 | * per-trigger de-initialization such as decrementing a | |
1456 | * per-trigger reference count and freeing corresponding trigger | |
1457 | * data, for instance. This is usually implemented by the | |
1458 | * generic utility function @event_trigger_free() (see | |
1459 | * trace_event_triggers.c). | |
1460 | * | |
1461 | * @print: The callback function invoked to have the trigger print | |
1462 | * itself. This is usually implemented by a wrapper function | |
1463 | * that calls the generic utility function @event_trigger_print() | |
1464 | * (see trace_event_triggers.c). | |
1465 | */ | |
1466 | struct event_trigger_ops { | |
c4a59230 TZ |
1467 | void (*func)(struct event_trigger_data *data, |
1468 | void *rec); | |
85f2b082 TZ |
1469 | int (*init)(struct event_trigger_ops *ops, |
1470 | struct event_trigger_data *data); | |
1471 | void (*free)(struct event_trigger_ops *ops, | |
1472 | struct event_trigger_data *data); | |
1473 | int (*print)(struct seq_file *m, | |
1474 | struct event_trigger_ops *ops, | |
1475 | struct event_trigger_data *data); | |
1476 | }; | |
1477 | ||
1478 | /** | |
1479 | * struct event_command - callbacks and data members for event commands | |
1480 | * | |
1481 | * Event commands are invoked by users by writing the command name | |
1482 | * into the 'trigger' file associated with a trace event. The | |
1483 | * parameters associated with a specific invocation of an event | |
1484 | * command are used to create an event trigger instance, which is | |
1485 | * added to the list of trigger instances associated with that trace | |
1486 | * event. When the event is hit, the set of triggers associated with | |
1487 | * that event is invoked. | |
1488 | * | |
1489 | * The data members in this structure provide per-event command data | |
1490 | * for various event commands. | |
1491 | * | |
1492 | * All the data members below, except for @post_trigger, must be set | |
1493 | * for each event command. | |
1494 | * | |
1495 | * @name: The unique name that identifies the event command. This is | |
1496 | * the name used when setting triggers via trigger files. | |
1497 | * | |
1498 | * @trigger_type: A unique id that identifies the event command | |
1499 | * 'type'. This value has two purposes, the first to ensure that | |
1500 | * only one trigger of the same type can be set at a given time | |
1501 | * for a particular event e.g. it doesn't make sense to have both | |
1502 | * a traceon and traceoff trigger attached to a single event at | |
1503 | * the same time, so traceon and traceoff have the same type | |
1504 | * though they have different names. The @trigger_type value is | |
1505 | * also used as a bit value for deferring the actual trigger | |
1506 | * action until after the current event is finished. Some | |
1507 | * commands need to do this if they themselves log to the trace | |
1508 | * buffer (see the @post_trigger() member below). @trigger_type | |
1509 | * values are defined by adding new values to the trigger_type | |
af658dca | 1510 | * enum in include/linux/trace_events.h. |
85f2b082 | 1511 | * |
353206f5 | 1512 | * @flags: See the enum event_command_flags below. |
a5863dae | 1513 | * |
a88e1cfb TZ |
1514 | * All the methods below, except for @set_filter() and @unreg_all(), |
1515 | * must be implemented. | |
85f2b082 TZ |
1516 | * |
1517 | * @func: The callback function responsible for parsing and | |
1518 | * registering the trigger written to the 'trigger' file by the | |
1519 | * user. It allocates the trigger instance and registers it with | |
1520 | * the appropriate trace event. It makes use of the other | |
1521 | * event_command callback functions to orchestrate this, and is | |
1522 | * usually implemented by the generic utility function | |
1523 | * @event_trigger_callback() (see trace_event_triggers.c). | |
1524 | * | |
1525 | * @reg: Adds the trigger to the list of triggers associated with the | |
1526 | * event, and enables the event trigger itself, after | |
1527 | * initializing it (via the event_trigger_ops @init() function). | |
1528 | * This is also where commands can use the @trigger_type value to | |
1529 | * make the decision as to whether or not multiple instances of | |
1530 | * the trigger should be allowed. This is usually implemented by | |
1531 | * the generic utility function @register_trigger() (see | |
1532 | * trace_event_triggers.c). | |
1533 | * | |
1534 | * @unreg: Removes the trigger from the list of triggers associated | |
1535 | * with the event, and disables the event trigger itself, after | |
1536 | * initializing it (via the event_trigger_ops @free() function). | |
1537 | * This is usually implemented by the generic utility function | |
1538 | * @unregister_trigger() (see trace_event_triggers.c). | |
1539 | * | |
a88e1cfb TZ |
1540 | * @unreg_all: An optional function called to remove all the triggers |
1541 | * from the list of triggers associated with the event. Called | |
1542 | * when a trigger file is opened in truncate mode. | |
1543 | * | |
85f2b082 TZ |
1544 | * @set_filter: An optional function called to parse and set a filter |
1545 | * for the trigger. If no @set_filter() method is set for the | |
1546 | * event command, filters set by the user for the command will be | |
1547 | * ignored. This is usually implemented by the generic utility | |
1548 | * function @set_trigger_filter() (see trace_event_triggers.c). | |
1549 | * | |
1550 | * @get_trigger_ops: The callback function invoked to retrieve the | |
1551 | * event_trigger_ops implementation associated with the command. | |
1552 | */ | |
1553 | struct event_command { | |
1554 | struct list_head list; | |
1555 | char *name; | |
1556 | enum event_trigger_type trigger_type; | |
353206f5 | 1557 | int flags; |
85f2b082 | 1558 | int (*func)(struct event_command *cmd_ops, |
7f1d2f82 | 1559 | struct trace_event_file *file, |
85f2b082 TZ |
1560 | char *glob, char *cmd, char *params); |
1561 | int (*reg)(char *glob, | |
1562 | struct event_trigger_ops *ops, | |
1563 | struct event_trigger_data *data, | |
7f1d2f82 | 1564 | struct trace_event_file *file); |
85f2b082 TZ |
1565 | void (*unreg)(char *glob, |
1566 | struct event_trigger_ops *ops, | |
1567 | struct event_trigger_data *data, | |
7f1d2f82 | 1568 | struct trace_event_file *file); |
a88e1cfb | 1569 | void (*unreg_all)(struct trace_event_file *file); |
85f2b082 TZ |
1570 | int (*set_filter)(char *filter_str, |
1571 | struct event_trigger_data *data, | |
7f1d2f82 | 1572 | struct trace_event_file *file); |
85f2b082 TZ |
1573 | struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); |
1574 | }; | |
1575 | ||
353206f5 SRRH |
1576 | /** |
1577 | * enum event_command_flags - flags for struct event_command | |
1578 | * | |
1579 | * @POST_TRIGGER: A flag that says whether or not this command needs | |
1580 | * to have its action delayed until after the current event has | |
1581 | * been closed. Some triggers need to avoid being invoked while | |
1582 | * an event is currently in the process of being logged, since | |
1583 | * the trigger may itself log data into the trace buffer. Thus | |
1584 | * we make sure the current event is committed before invoking | |
1585 | * those triggers. To do that, the trigger invocation is split | |
1586 | * in two - the first part checks the filter using the current | |
1587 | * trace record; if a command has the @post_trigger flag set, it | |
1588 | * sets a bit for itself in the return value, otherwise it | |
1589 | * directly invokes the trigger. Once all commands have been | |
1590 | * either invoked or set their return flag, the current record is | |
1591 | * either committed or discarded. At that point, if any commands | |
1592 | * have deferred their triggers, those commands are finally | |
1593 | * invoked following the close of the current event. In other | |
1594 | * words, if the event_trigger_ops @func() probe implementation | |
1595 | * itself logs to the trace buffer, this flag should be set, | |
1596 | * otherwise it can be left unspecified. | |
1597 | * | |
1598 | * @NEEDS_REC: A flag that says whether or not this command needs | |
1599 | * access to the trace record in order to perform its function, | |
1600 | * regardless of whether or not it has a filter associated with | |
1601 | * it (filters make a trigger require access to the trace record | |
1602 | * but are not always present). | |
1603 | */ | |
1604 | enum event_command_flags { | |
1605 | EVENT_CMD_FL_POST_TRIGGER = 1, | |
1606 | EVENT_CMD_FL_NEEDS_REC = 2, | |
1607 | }; | |
1608 | ||
1609 | static inline bool event_command_post_trigger(struct event_command *cmd_ops) | |
1610 | { | |
1611 | return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; | |
1612 | } | |
1613 | ||
1614 | static inline bool event_command_needs_rec(struct event_command *cmd_ops) | |
1615 | { | |
1616 | return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; | |
1617 | } | |
1618 | ||
7f1d2f82 | 1619 | extern int trace_event_enable_disable(struct trace_event_file *file, |
85f2b082 | 1620 | int enable, int soft_disable); |
93e31ffb | 1621 | extern int tracing_alloc_snapshot(void); |
85f2b082 | 1622 | |
e9fb2b6d SR |
1623 | extern const char *__start___trace_bprintk_fmt[]; |
1624 | extern const char *__stop___trace_bprintk_fmt[]; | |
1625 | ||
102c9323 SRRH |
1626 | extern const char *__start___tracepoint_str[]; |
1627 | extern const char *__stop___tracepoint_str[]; | |
1628 | ||
b9f9108c | 1629 | void trace_printk_control(bool enabled); |
07d777fe | 1630 | void trace_printk_init_buffers(void); |
81698831 | 1631 | void trace_printk_start_comm(void); |
613f04a0 | 1632 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
2b6080f2 | 1633 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
07d777fe | 1634 | |
ca268da6 SRRH |
1635 | /* |
1636 | * Normal trace_printk() and friends allocates special buffers | |
1637 | * to do the manipulation, as well as saves the print formats | |
1638 | * into sections to display. But the trace infrastructure wants | |
1639 | * to use these without the added overhead at the price of being | |
1640 | * a bit slower (used mainly for warnings, where we don't care | |
1641 | * about performance). The internal_trace_puts() is for such | |
1642 | * a purpose. | |
1643 | */ | |
1644 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) | |
1645 | ||
4e5292ea | 1646 | #undef FTRACE_ENTRY |
02aa3162 | 1647 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
2425bcb9 | 1648 | extern struct trace_event_call \ |
52f5684c | 1649 | __aligned(4) event_##call; |
4e5292ea | 1650 | #undef FTRACE_ENTRY_DUP |
02aa3162 JO |
1651 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
1652 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | |
1653 | filter) | |
a4a551b8 NK |
1654 | #undef FTRACE_ENTRY_PACKED |
1655 | #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ | |
1656 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | |
1657 | filter) | |
1658 | ||
4e5292ea | 1659 | #include "trace_entries.h" |
e1112b4d | 1660 | |
6e48b550 | 1661 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) |
2425bcb9 | 1662 | int perf_ftrace_event_register(struct trace_event_call *call, |
ced39002 JO |
1663 | enum trace_reg type, void *data); |
1664 | #else | |
1665 | #define perf_ftrace_event_register NULL | |
6e48b550 | 1666 | #endif |
ced39002 | 1667 | |
5f893b26 SRRH |
1668 | #ifdef CONFIG_FTRACE_SYSCALLS |
1669 | void init_ftrace_syscalls(void); | |
dbfeaa7a | 1670 | const char *get_syscall_name(int syscall); |
5f893b26 SRRH |
1671 | #else |
1672 | static inline void init_ftrace_syscalls(void) { } | |
dbfeaa7a TZ |
1673 | static inline const char *get_syscall_name(int syscall) |
1674 | { | |
1675 | return NULL; | |
1676 | } | |
5f893b26 SRRH |
1677 | #endif |
1678 | ||
1679 | #ifdef CONFIG_EVENT_TRACING | |
1680 | void trace_event_init(void); | |
0c564a53 | 1681 | void trace_event_enum_update(struct trace_enum_map **map, int len); |
5f893b26 SRRH |
1682 | #else |
1683 | static inline void __init trace_event_init(void) { } | |
cc9e4bde | 1684 | static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { } |
5f893b26 SRRH |
1685 | #endif |
1686 | ||
0daa2302 | 1687 | extern struct trace_iterator *tracepoint_print_iter; |
5f893b26 | 1688 | |
bc0c38d1 | 1689 | #endif /* _LINUX_KERNEL_TRACE_H */ |