]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/trace/trace.h
sfc: add missing annotation for efx_ef10_try_update_nic_stats_vf()
[mirror_ubuntu-hirsute-kernel.git] / kernel / trace / trace.h
CommitLineData
bb730b58 1// SPDX-License-Identifier: GPL-2.0
bac5fb97 2
bc0c38d1
SR
3#ifndef _LINUX_KERNEL_TRACE_H
4#define _LINUX_KERNEL_TRACE_H
5
6#include <linux/fs.h>
60063497 7#include <linux/atomic.h>
bc0c38d1
SR
8#include <linux/sched.h>
9#include <linux/clocksource.h>
3928a8a2 10#include <linux/ring_buffer.h>
bd8ac686 11#include <linux/mmiotrace.h>
4e5292ea 12#include <linux/tracepoint.h>
d13744cd 13#include <linux/ftrace.h>
2d6425af 14#include <linux/trace.h>
24f1e32c 15#include <linux/hw_breakpoint.h>
9504504c 16#include <linux/trace_seq.h>
af658dca 17#include <linux/trace_events.h>
52f5684c 18#include <linux/compiler.h>
60f1d5e3 19#include <linux/glob.h>
91edde2e
VRB
20#include <linux/irq_work.h>
21#include <linux/workqueue.h>
9504504c 22
12ab74ee
SR
23#ifdef CONFIG_FTRACE_SYSCALLS
24#include <asm/unistd.h> /* For NR_SYSCALLS */
25#include <asm/syscall.h> /* some archs define it here */
26#endif
27
72829bc3
TG
28enum trace_type {
29 __TRACE_FIRST_TYPE = 0,
30
31 TRACE_FN,
32 TRACE_CTX,
33 TRACE_WAKE,
34 TRACE_STACK,
dd0e545f 35 TRACE_PRINT,
48ead020 36 TRACE_BPRINT,
bd8ac686
PP
37 TRACE_MMIO_RW,
38 TRACE_MMIO_MAP,
9f029e83 39 TRACE_BRANCH,
287b6e68
FW
40 TRACE_GRAPH_RET,
41 TRACE_GRAPH_ENT,
02b67518 42 TRACE_USER_STACK,
c71a8961 43 TRACE_BLK,
09ae7234 44 TRACE_BPUTS,
e7c15cd8 45 TRACE_HWLAT,
fa32e855 46 TRACE_RAW_DATA,
72829bc3 47
f0868d1e 48 __TRACE_LAST_TYPE,
72829bc3
TG
49};
50
bc0c38d1 51
0a1c49db
SR
52#undef __field
53#define __field(type, item) type item;
86387f7e 54
04ae87a5
PZ
55#undef __field_fn
56#define __field_fn(type, item) type item;
57
d7315094
SR
58#undef __field_struct
59#define __field_struct(type, item) __field(type, item)
86387f7e 60
d7315094
SR
61#undef __field_desc
62#define __field_desc(type, container, item)
02b67518 63
0a1c49db
SR
64#undef __array
65#define __array(type, item, size) type item[size];
1427cdf0 66
d7315094
SR
67#undef __array_desc
68#define __array_desc(type, container, item, size)
777e208d 69
0a1c49db
SR
70#undef __dynamic_array
71#define __dynamic_array(type, item) type item[];
777e208d 72
0a1c49db
SR
73#undef F_STRUCT
74#define F_STRUCT(args...) args
74239072 75
0a1c49db 76#undef FTRACE_ENTRY
04ae87a5 77#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
02aa3162
JO
78 struct struct_name { \
79 struct trace_entry ent; \
80 tstruct \
0a1c49db 81 }
777e208d 82
0a1c49db 83#undef FTRACE_ENTRY_DUP
04ae87a5 84#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
1e9b51c2 85
e59a0bff 86#undef FTRACE_ENTRY_REG
04ae87a5
PZ
87#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
88 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
e59a0bff 89
a4a551b8 90#undef FTRACE_ENTRY_PACKED
04ae87a5
PZ
91#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
92 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
a4a551b8 93
0a1c49db 94#include "trace_entries.h"
36994e58 95
24589e3a
SRV
96/* Use this for memory failure errors */
97#define MEM_FAIL(condition, fmt, ...) ({ \
98 static bool __section(.data.once) __warned; \
99 int __ret_warn_once = !!(condition); \
100 \
101 if (unlikely(__ret_warn_once && !__warned)) { \
102 __warned = true; \
103 pr_err("ERROR: " fmt, ##__VA_ARGS__); \
104 } \
105 unlikely(__ret_warn_once); \
106})
107
0a1c49db
SR
108/*
109 * syscalls are special, and need special handling, this is why
110 * they are not included in trace_entries.h
111 */
bed1ffca
FW
112struct syscall_trace_enter {
113 struct trace_entry ent;
114 int nr;
115 unsigned long args[];
116};
117
118struct syscall_trace_exit {
119 struct trace_entry ent;
120 int nr;
99df5a6a 121 long ret;
bed1ffca
FW
122};
123
93ccae7a 124struct kprobe_trace_entry_head {
413d37d1
MH
125 struct trace_entry ent;
126 unsigned long ip;
413d37d1
MH
127};
128
93ccae7a 129struct kretprobe_trace_entry_head {
413d37d1
MH
130 struct trace_entry ent;
131 unsigned long func;
132 unsigned long ret_ip;
413d37d1
MH
133};
134
fc5e27ae
PP
135/*
136 * trace_flag_type is an enumeration that holds different
137 * states when a trace occurs. These are:
9244489a 138 * IRQS_OFF - interrupts were disabled
9de36825 139 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
bd9cfca9 140 * NEED_RESCHED - reschedule is requested
9244489a
SR
141 * HARDIRQ - inside an interrupt handler
142 * SOFTIRQ - inside a softirq handler
fc5e27ae
PP
143 */
144enum trace_flag_type {
145 TRACE_FLAG_IRQS_OFF = 0x01,
9244489a
SR
146 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
147 TRACE_FLAG_NEED_RESCHED = 0x04,
148 TRACE_FLAG_HARDIRQ = 0x08,
149 TRACE_FLAG_SOFTIRQ = 0x10,
e5137b50 150 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
7e6867bf 151 TRACE_FLAG_NMI = 0x40,
fc5e27ae
PP
152};
153
5bf9a1ee 154#define TRACE_BUF_SIZE 1024
bc0c38d1 155
2b6080f2
SR
156struct trace_array;
157
bc0c38d1
SR
158/*
159 * The CPU trace array - it consists of thousands of trace entries
160 * plus some other descriptor data: (for example which task started
161 * the trace, etc.)
162 */
163struct trace_array_cpu {
bc0c38d1 164 atomic_t disabled;
2cadf913 165 void *buffer_page; /* ring buffer spare */
4e3c3333 166
438ced17 167 unsigned long entries;
bc0c38d1
SR
168 unsigned long saved_latency;
169 unsigned long critical_start;
170 unsigned long critical_end;
171 unsigned long critical_sequence;
172 unsigned long nice;
173 unsigned long policy;
174 unsigned long rt_priority;
2f26ebd5 175 unsigned long skipped_entries;
a5a1d1c2 176 u64 preempt_timestamp;
bc0c38d1 177 pid_t pid;
d20b92ab 178 kuid_t uid;
bc0c38d1 179 char comm[TASK_COMM_LEN];
3fdaf80f 180
345ddcc8 181#ifdef CONFIG_FUNCTION_TRACER
717e3f5e 182 int ftrace_ignore_pid;
345ddcc8 183#endif
717e3f5e 184 bool ignore_pid;
bc0c38d1
SR
185};
186
2b6080f2 187struct tracer;
37aea98b 188struct trace_option_dentry;
2b6080f2 189
1c5eb448 190struct array_buffer {
12883efb 191 struct trace_array *tr;
13292494 192 struct trace_buffer *buffer;
12883efb 193 struct trace_array_cpu __percpu *data;
a5a1d1c2 194 u64 time_start;
12883efb
SRRH
195 int cpu;
196};
197
9a38a885
SRRH
198#define TRACE_FLAGS_MAX_SIZE 32
199
37aea98b
SRRH
200struct trace_options {
201 struct tracer *tracer;
202 struct trace_option_dentry *topts;
203};
204
49090107 205struct trace_pid_list {
f4d34a87
SR
206 int pid_max;
207 unsigned long *pids;
49090107
SRRH
208};
209
27683626
SRV
210enum {
211 TRACE_PIDS = BIT(0),
212 TRACE_NO_PIDS = BIT(1),
213};
214
215static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
216 struct trace_pid_list *no_pid_list)
217{
218 /* Return true if the pid list in type has pids */
219 return ((type & TRACE_PIDS) && pid_list) ||
220 ((type & TRACE_NO_PIDS) && no_pid_list);
221}
222
223static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
224 struct trace_pid_list *no_pid_list)
225{
226 /*
227 * Turning off what is in @type, return true if the "other"
228 * pid list, still has pids in it.
229 */
230 return (!(type & TRACE_PIDS) && pid_list) ||
231 (!(type & TRACE_NO_PIDS) && no_pid_list);
232}
233
a35873a0
TZ
234typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
235
236/**
237 * struct cond_snapshot - conditional snapshot data and callback
238 *
239 * The cond_snapshot structure encapsulates a callback function and
240 * data associated with the snapshot for a given tracing instance.
241 *
242 * When a snapshot is taken conditionally, by invoking
243 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
244 * passed in turn to the cond_snapshot.update() function. That data
245 * can be compared by the update() implementation with the cond_data
246 * contained wihin the struct cond_snapshot instance associated with
247 * the trace_array. Because the tr->max_lock is held throughout the
248 * update() call, the update() function can directly retrieve the
249 * cond_snapshot and cond_data associated with the per-instance
250 * snapshot associated with the trace_array.
251 *
252 * The cond_snapshot.update() implementation can save data to be
253 * associated with the snapshot if it decides to, and returns 'true'
254 * in that case, or it returns 'false' if the conditional snapshot
255 * shouldn't be taken.
256 *
257 * The cond_snapshot instance is created and associated with the
258 * user-defined cond_data by tracing_cond_snapshot_enable().
259 * Likewise, the cond_snapshot instance is destroyed and is no longer
260 * associated with the trace instance by
261 * tracing_cond_snapshot_disable().
262 *
263 * The method below is required.
264 *
265 * @update: When a conditional snapshot is invoked, the update()
266 * callback function is invoked with the tr->max_lock held. The
267 * update() implementation signals whether or not to actually
268 * take the snapshot, by returning 'true' if so, 'false' if no
269 * snapshot should be taken. Because the max_lock is held for
270 * the duration of update(), the implementation is safe to
271 * directly retrieven and save any implementation data it needs
272 * to in association with the snapshot.
273 */
274struct cond_snapshot {
275 void *cond_data;
276 cond_update_fn_t update;
277};
278
bc0c38d1
SR
279/*
280 * The trace array - an array of per-CPU trace arrays. This is the
281 * highest level data structure that individual tracers deal with.
282 * They have on/off state as well:
283 */
284struct trace_array {
ae63b31e 285 struct list_head list;
277ba044 286 char *name;
1c5eb448 287 struct array_buffer array_buffer;
12883efb
SRRH
288#ifdef CONFIG_TRACER_MAX_TRACE
289 /*
290 * The max_buffer is used to snapshot the trace when a maximum
291 * latency is reached, or when the user initiates a snapshot.
292 * Some tracers will use this to store a maximum trace while
293 * it continues examining live traces.
294 *
1c5eb448 295 * The buffers for the max_buffer are set up the same as the array_buffer
12883efb 296 * When a snapshot is taken, the buffer of the max_buffer is swapped
1c5eb448
SRV
297 * with the buffer of the array_buffer and the buffers are reset for
298 * the array_buffer so the tracing can continue.
12883efb 299 */
1c5eb448 300 struct array_buffer max_buffer;
45ad21ca 301 bool allocated_snapshot;
f971cc9a
SRRH
302#endif
303#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5 304 unsigned long max_latency;
91edde2e
VRB
305#ifdef CONFIG_FSNOTIFY
306 struct dentry *d_max_latency;
307 struct work_struct fsnotify_work;
308 struct irq_work fsnotify_irqwork;
309#endif
12883efb 310#endif
49090107 311 struct trace_pid_list __rcu *filtered_pids;
27683626 312 struct trace_pid_list __rcu *filtered_no_pids;
0b9b12c1
SRRH
313 /*
314 * max_lock is used to protect the swapping of buffers
315 * when taking a max snapshot. The buffers themselves are
316 * protected by per_cpu spinlocks. But the action of the swap
317 * needs its own lock.
318 *
319 * This is defined as a arch_spinlock_t in order to help
320 * with performance when lockdep debugging is enabled.
321 *
322 * It is also used in other places outside the update_max_tr
323 * so it needs to be defined outside of the
324 * CONFIG_TRACER_MAX_TRACE.
325 */
326 arch_spinlock_t max_lock;
499e5470 327 int buffer_disabled;
12ab74ee
SR
328#ifdef CONFIG_FTRACE_SYSCALLS
329 int sys_refcount_enter;
330 int sys_refcount_exit;
7f1d2f82
SRRH
331 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
332 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
12ab74ee 333#endif
2b6080f2
SR
334 int stop_count;
335 int clock_id;
37aea98b 336 int nr_topts;
065e63f9 337 bool clear_trace;
03329f99 338 int buffer_percent;
2f754e77 339 unsigned int n_err_log_entries;
2b6080f2 340 struct tracer *current_trace;
983f938a 341 unsigned int trace_flags;
9a38a885 342 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
ae63b31e 343 unsigned int flags;
2b6080f2 344 raw_spinlock_t start_lock;
2f754e77 345 struct list_head err_log;
ae63b31e 346 struct dentry *dir;
2b6080f2
SR
347 struct dentry *options;
348 struct dentry *percpu_dir;
ae63b31e 349 struct dentry *event_dir;
37aea98b 350 struct trace_options *topts;
ae63b31e
SR
351 struct list_head systems;
352 struct list_head events;
3dd80953 353 struct trace_event_file *trace_marker_file;
ccfe9e42 354 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
a695cb58 355 int ref;
f20a5806
SRRH
356#ifdef CONFIG_FUNCTION_TRACER
357 struct ftrace_ops *ops;
345ddcc8 358 struct trace_pid_list __rcu *function_pids;
b3b1e6ed 359 struct trace_pid_list __rcu *function_no_pids;
04ec7bb6 360#ifdef CONFIG_DYNAMIC_FTRACE
673feb9d 361 /* All of these are protected by the ftrace_lock */
04ec7bb6 362 struct list_head func_probes;
673feb9d
SRV
363 struct list_head mod_trace;
364 struct list_head mod_notrace;
04ec7bb6 365#endif
f20a5806
SRRH
366 /* function tracing enabled */
367 int function_enabled;
368#endif
00b41452 369 int time_stamp_abs_ref;
067fe038 370 struct list_head hist_vars;
a35873a0
TZ
371#ifdef CONFIG_TRACER_SNAPSHOT
372 struct cond_snapshot *cond_snapshot;
373#endif
bc0c38d1
SR
374};
375
ae63b31e
SR
376enum {
377 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
378};
379
380extern struct list_head ftrace_trace_arrays;
381
a8227415
AL
382extern struct mutex trace_types_lock;
383
8e2e2fa4 384extern int trace_array_get(struct trace_array *tr);
8530dec6 385extern int tracing_check_open_get_tr(struct trace_array *tr);
89c95fce
TZ
386extern struct trace_array *trace_array_find(const char *instance);
387extern struct trace_array *trace_array_find_get(const char *instance);
8e2e2fa4 388
00b41452 389extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
d71bd34d 390extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
00b41452 391
860f9f6b
TZ
392extern bool trace_clock_in_ns(struct trace_array *tr);
393
ae63b31e
SR
394/*
395 * The global tracer (top) should be the first trace array added,
396 * but we check the flag anyway.
397 */
398static inline struct trace_array *top_trace_array(void)
399{
400 struct trace_array *tr;
401
da9c3413 402 if (list_empty(&ftrace_trace_arrays))
dc81e5e3
YY
403 return NULL;
404
ae63b31e
SR
405 tr = list_entry(ftrace_trace_arrays.prev,
406 typeof(*tr), list);
407 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
408 return tr;
409}
410
7104f300
SR
411#define FTRACE_CMP_TYPE(var, type) \
412 __builtin_types_compatible_p(typeof(var), type *)
413
414#undef IF_ASSIGN
968e5170
NC
415#define IF_ASSIGN(var, entry, etype, id) \
416 if (FTRACE_CMP_TYPE(var, etype)) { \
417 var = (typeof(var))(entry); \
418 WARN_ON(id != 0 && (entry)->type != id); \
419 break; \
7104f300
SR
420 }
421
422/* Will cause compile errors if type is not found. */
423extern void __ftrace_bad_type(void);
424
425/*
426 * The trace_assign_type is a verifier that the entry type is
427 * the same as the type being assigned. To add new types simply
428 * add a line with the following format:
429 *
430 * IF_ASSIGN(var, ent, type, id);
431 *
432 * Where "type" is the trace type that includes the trace_entry
433 * as the "ent" item. And "id" is the trace identifier that is
434 * used in the trace_type enum.
435 *
436 * If the type can have more than one id, then use zero.
437 */
438#define trace_assign_type(var, ent) \
439 do { \
440 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
441 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 442 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 443 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300 444 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
48ead020 445 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
09ae7234 446 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
e7c15cd8 447 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
fa32e855 448 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
7104f300
SR
449 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
450 TRACE_MMIO_RW); \
451 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
452 TRACE_MMIO_MAP); \
9f029e83 453 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
454 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
455 TRACE_GRAPH_ENT); \
456 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
457 TRACE_GRAPH_RET); \
7104f300
SR
458 __ftrace_bad_type(); \
459 } while (0)
2c4f035f 460
adf9f195
FW
461/*
462 * An option specific to a tracer. This is a boolean value.
463 * The bit is the bit index that sets its value on the
464 * flags value in struct tracer_flags.
465 */
466struct tracer_opt {
9de36825
IM
467 const char *name; /* Will appear on the trace_options file */
468 u32 bit; /* Mask assigned in val field in tracer_flags */
adf9f195
FW
469};
470
471/*
472 * The set of specific options for a tracer. Your tracer
473 * have to set the initial value of the flags val.
474 */
475struct tracer_flags {
476 u32 val;
9de36825 477 struct tracer_opt *opts;
d39cdd20 478 struct tracer *trace;
adf9f195
FW
479};
480
481/* Makes more easy to define a tracer opt */
482#define TRACER_OPT(s, b) .name = #s, .bit = b
483
034939b6 484
41d9c0be
SRRH
485struct trace_option_dentry {
486 struct tracer_opt *opt;
487 struct tracer_flags *flags;
488 struct trace_array *tr;
489 struct dentry *entry;
490};
491
6eaaa5d5 492/**
8434dc93 493 * struct tracer - a specific tracer and its callbacks to interact with tracefs
6eaaa5d5
FW
494 * @name: the name chosen to select it on the available_tracers file
495 * @init: called when one switches to this tracer (echo name > current_tracer)
496 * @reset: called when one switches to another tracer
05a724bd
CH
497 * @start: called when tracing is unpaused (echo 1 > tracing_on)
498 * @stop: called when tracing is paused (echo 0 > tracing_on)
6508fa76 499 * @update_thresh: called when tracing_thresh is updated
6eaaa5d5
FW
500 * @open: called when the trace file is opened
501 * @pipe_open: called when the trace_pipe file is opened
6eaaa5d5 502 * @close: called when the trace file is released
c521efd1 503 * @pipe_close: called when the trace_pipe file is released
6eaaa5d5
FW
504 * @read: override the default read callback on trace_pipe
505 * @splice_read: override the default splice_read callback on trace_pipe
506 * @selftest: selftest to run on boot (see trace_selftest.c)
507 * @print_headers: override the first lines that describe your columns
508 * @print_line: callback that prints a trace
509 * @set_flag: signals one of your private flags changed (trace_options file)
510 * @flags: your private flags
bc0c38d1
SR
511 */
512struct tracer {
513 const char *name;
1c80025a 514 int (*init)(struct trace_array *tr);
bc0c38d1 515 void (*reset)(struct trace_array *tr);
9036990d
SR
516 void (*start)(struct trace_array *tr);
517 void (*stop)(struct trace_array *tr);
6508fa76 518 int (*update_thresh)(struct trace_array *tr);
bc0c38d1 519 void (*open)(struct trace_iterator *iter);
107bad8b 520 void (*pipe_open)(struct trace_iterator *iter);
bc0c38d1 521 void (*close)(struct trace_iterator *iter);
c521efd1 522 void (*pipe_close)(struct trace_iterator *iter);
107bad8b
SR
523 ssize_t (*read)(struct trace_iterator *iter,
524 struct file *filp, char __user *ubuf,
525 size_t cnt, loff_t *ppos);
3c56819b
EGM
526 ssize_t (*splice_read)(struct trace_iterator *iter,
527 struct file *filp,
528 loff_t *ppos,
529 struct pipe_inode_info *pipe,
530 size_t len,
531 unsigned int flags);
60a11774
SR
532#ifdef CONFIG_FTRACE_STARTUP_TEST
533 int (*selftest)(struct tracer *trace,
534 struct trace_array *tr);
535#endif
8bba1bf5 536 void (*print_header)(struct seq_file *m);
2c4f035f 537 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195 538 /* If you handled the flag setting, return 0 */
8c1a49ae
SRRH
539 int (*set_flag)(struct trace_array *tr,
540 u32 old_flags, u32 bit, int set);
613f04a0 541 /* Return 0 if OK with change, else return non-zero */
bf6065b5 542 int (*flag_changed)(struct trace_array *tr,
613f04a0 543 u32 mask, int set);
bc0c38d1 544 struct tracer *next;
9de36825 545 struct tracer_flags *flags;
50512ab5 546 int enabled;
cf6ab6d9 547 int ref;
f43c738b 548 bool print_max;
607e2ea1 549 bool allow_instances;
12883efb 550#ifdef CONFIG_TRACER_MAX_TRACE
f43c738b 551 bool use_max_tr;
12883efb 552#endif
c7b3ae0b
ZSZ
553 /* True if tracer cannot be enabled in kernel param */
554 bool noboot;
bc0c38d1
SR
555};
556
f9520750 557
e4a3f541 558/* Only current can touch trace_recursion */
e4a3f541 559
edc15caf
SR
560/*
561 * For function tracing recursion:
562 * The order of these bits are important.
563 *
564 * When function tracing occurs, the following steps are made:
565 * If arch does not support a ftrace feature:
566 * call internal function (uses INTERNAL bits) which calls...
567 * If callback is registered to the "global" list, the list
568 * function is called and recursion checks the GLOBAL bits.
569 * then this function calls...
570 * The function callback, which can use the FTRACE bits to
571 * check for recursion.
572 *
573 * Now if the arch does not suppport a feature, and it calls
574 * the global list function which calls the ftrace callback
575 * all three of these steps will do a recursion protection.
576 * There's no reason to do one if the previous caller already
577 * did. The recursion that we are protecting against will
578 * go through the same steps again.
579 *
580 * To prevent the multiple recursion checks, if a recursion
581 * bit is set that is higher than the MAX bit of the current
582 * check, then we know that the check was made by the previous
583 * caller, and we can skip the current check.
584 */
e46cbf75 585enum {
5412e0b7 586 /* Function recursion bits */
567cd4da 587 TRACE_FTRACE_BIT,
edc15caf
SR
588 TRACE_FTRACE_NMI_BIT,
589 TRACE_FTRACE_IRQ_BIT,
590 TRACE_FTRACE_SIRQ_BIT,
e46cbf75 591
4104d326 592 /* INTERNAL_BITs must be greater than FTRACE_BITs */
edc15caf
SR
593 TRACE_INTERNAL_BIT,
594 TRACE_INTERNAL_NMI_BIT,
595 TRACE_INTERNAL_IRQ_BIT,
596 TRACE_INTERNAL_SIRQ_BIT,
597
6224beb1 598 TRACE_BRANCH_BIT,
e4a3f541
SR
599/*
600 * Abuse of the trace_recursion.
601 * As we need a way to maintain state if we are tracing the function
602 * graph in irq because we want to trace a particular function that
603 * was called in irq context but we have irq tracing off. Since this
604 * can only be modified by current, we can reuse trace_recursion.
605 */
e46cbf75 606 TRACE_IRQ_BIT,
5cf99a0f
SRV
607
608 /* Set if the function is in the set_graph_function file */
609 TRACE_GRAPH_BIT,
610
611 /*
612 * In the very unlikely case that an interrupt came in
613 * at a start of graph tracing, and we want to trace
614 * the function in that interrupt, the depth can be greater
615 * than zero, because of the preempted start of a previous
616 * trace. In an even more unlikely case, depth could be 2
617 * if a softirq interrupted the start of graph tracing,
618 * followed by an interrupt preempting a start of graph
619 * tracing in the softirq, and depth can even be 3
620 * if an NMI came in at the start of an interrupt function
621 * that preempted a softirq start of a function that
622 * preempted normal context!!!! Luckily, it can't be
623 * greater than 3, so the next two bits are a mask
624 * of what the depth is when we set TRACE_GRAPH_BIT
625 */
626
627 TRACE_GRAPH_DEPTH_START_BIT,
628 TRACE_GRAPH_DEPTH_END_BIT,
9cd2992f
SRV
629
630 /*
631 * To implement set_graph_notrace, if this bit is set, we ignore
632 * function graph tracing of called functions, until the return
633 * function is called to clear it.
634 */
635 TRACE_GRAPH_NOTRACE_BIT,
e46cbf75 636};
e4a3f541 637
e46cbf75
SR
638#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
639#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
640#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
e4a3f541 641
5cf99a0f
SRV
642#define trace_recursion_depth() \
643 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
644#define trace_recursion_set_depth(depth) \
645 do { \
646 current->trace_recursion &= \
647 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
648 current->trace_recursion |= \
649 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
650 } while (0)
651
edc15caf
SR
652#define TRACE_CONTEXT_BITS 4
653
654#define TRACE_FTRACE_START TRACE_FTRACE_BIT
655#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
656
edc15caf
SR
657#define TRACE_LIST_START TRACE_INTERNAL_BIT
658#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
659
660#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
661
662static __always_inline int trace_get_context_bit(void)
663{
664 int bit;
665
666 if (in_interrupt()) {
667 if (in_nmi())
668 bit = 0;
669
670 else if (in_irq())
671 bit = 1;
672 else
673 bit = 2;
674 } else
675 bit = 3;
676
677 return bit;
678}
679
680static __always_inline int trace_test_and_set_recursion(int start, int max)
681{
682 unsigned int val = current->trace_recursion;
683 int bit;
684
685 /* A previous recursion check was made */
686 if ((val & TRACE_CONTEXT_MASK) > max)
687 return 0;
688
689 bit = trace_get_context_bit() + start;
690 if (unlikely(val & (1 << bit)))
691 return -1;
692
693 val |= 1 << bit;
694 current->trace_recursion = val;
695 barrier();
696
697 return bit;
698}
699
700static __always_inline void trace_clear_recursion(int bit)
701{
702 unsigned int val = current->trace_recursion;
703
704 if (!bit)
705 return;
706
707 bit = 1 << bit;
708 val &= ~bit;
709
710 barrier();
711 current->trace_recursion = val;
712}
713
6d158a81
SR
714static inline struct ring_buffer_iter *
715trace_buffer_iter(struct trace_iterator *iter, int cpu)
716{
f26808ba 717 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
6d158a81
SR
718}
719
b6f11df2 720int tracer_init(struct tracer *t, struct trace_array *tr);
9036990d 721int tracing_is_enabled(void);
1c5eb448 722void tracing_reset_online_cpus(struct array_buffer *buf);
9456f0fa 723void tracing_reset_current(int cpu);
873c642f 724void tracing_reset_all_online_cpus(void);
bc0c38d1 725int tracing_open_generic(struct inode *inode, struct file *filp);
aa07d71f 726int tracing_open_generic_tr(struct inode *inode, struct file *filp);
2e86421d 727bool tracing_is_disabled(void);
ec573508 728bool tracer_tracing_is_on(struct trace_array *tr);
2290f2c5
SRV
729void tracer_tracing_on(struct trace_array *tr);
730void tracer_tracing_off(struct trace_array *tr);
5452af66 731struct dentry *trace_create_file(const char *name,
f4ae40a6 732 umode_t mode,
5452af66
FW
733 struct dentry *parent,
734 void *data,
735 const struct file_operations *fops);
736
bc0c38d1 737struct dentry *tracing_init_dentry(void);
d618b3e6 738
51a763dd
ACM
739struct ring_buffer_event;
740
e77405ad 741struct ring_buffer_event *
13292494 742trace_buffer_lock_reserve(struct trace_buffer *buffer,
e77405ad
SR
743 int type,
744 unsigned long len,
745 unsigned long flags,
746 int pc);
51a763dd 747
45dcd8b8
PP
748struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
749 struct trace_array_cpu *data);
c4a8e8be
FW
750
751struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
752 int *ent_cpu, u64 *ent_ts);
753
13292494 754void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
52ffabe3 755 struct ring_buffer_event *event);
7ffbd48d 756
955b61e5
JW
757int trace_empty(struct trace_iterator *iter);
758
759void *trace_find_next_entry_inc(struct trace_iterator *iter);
760
761void trace_init_global_iter(struct trace_iterator *iter);
762
763void tracing_iter_reset(struct trace_iterator *iter, int cpu);
764
ecffc8a8
DA
765unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
766unsigned long trace_total_entries(struct trace_array *tr);
767
6fb44b71 768void trace_function(struct trace_array *tr,
6fb44b71
SR
769 unsigned long ip,
770 unsigned long parent_ip,
38697053 771 unsigned long flags, int pc);
0a772620
JO
772void trace_graph_function(struct trace_array *tr,
773 unsigned long ip,
774 unsigned long parent_ip,
775 unsigned long flags, int pc);
7e9a49ef 776void trace_latency_header(struct seq_file *m);
62b915f1
JO
777void trace_default_header(struct seq_file *m);
778void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
779int trace_empty(struct trace_iterator *iter);
bc0c38d1 780
287b6e68 781void trace_graph_return(struct ftrace_graph_ret *trace);
e49dc19c 782int trace_graph_entry(struct ftrace_graph_ent *trace);
1a0799a8 783void set_graph_array(struct trace_array *tr);
1e9b51c2 784
41bc8144
SR
785void tracing_start_cmdline_record(void);
786void tracing_stop_cmdline_record(void);
d914ba37
JF
787void tracing_start_tgid_record(void);
788void tracing_stop_tgid_record(void);
789
bc0c38d1 790int register_tracer(struct tracer *type);
b5130b1e 791int is_tracing_stopped(void);
955b61e5 792
098c879e
SRRH
793loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
794
955b61e5
JW
795extern cpumask_var_t __read_mostly tracing_buffer_mask;
796
797#define for_each_tracing_cpu(cpu) \
798 for_each_cpu(cpu, tracing_buffer_mask)
bc0c38d1
SR
799
800extern unsigned long nsecs_to_usecs(unsigned long nsecs);
801
0e950173
TB
802extern unsigned long tracing_thresh;
803
4e267db1 804/* PID filtering */
76c813e2
SRRH
805
806extern int pid_max;
807
4e267db1
SR
808bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
809 pid_t search_pid);
810bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
b3b1e6ed 811 struct trace_pid_list *filtered_no_pids,
4e267db1
SR
812 struct task_struct *task);
813void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
814 struct task_struct *self,
815 struct task_struct *task);
5cc8976b
SRRH
816void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
817void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
818int trace_pid_show(struct seq_file *m, void *v);
76c813e2
SRRH
819void trace_free_pid_list(struct trace_pid_list *pid_list);
820int trace_pid_write(struct trace_pid_list *filtered_pids,
821 struct trace_pid_list **new_pid_list,
822 const char __user *ubuf, size_t cnt);
4e267db1 823
5d4a9dba 824#ifdef CONFIG_TRACER_MAX_TRACE
a35873a0
TZ
825void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
826 void *cond_data);
bc0c38d1
SR
827void update_max_tr_single(struct trace_array *tr,
828 struct task_struct *tsk, int cpu);
5d4a9dba 829#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 830
91edde2e
VRB
831#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
832 defined(CONFIG_FSNOTIFY)
833
834void latency_fsnotify(struct trace_array *tr);
835
836#else
837
36b3615d 838static inline void latency_fsnotify(struct trace_array *tr) { }
91edde2e
VRB
839
840#endif
841
c0a0d0d3 842#ifdef CONFIG_STACKTRACE
c0a0d0d3
FW
843void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
844 int pc);
845#else
c0a0d0d3
FW
846static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
847 int skip, int pc)
848{
849}
850#endif /* CONFIG_STACKTRACE */
53614991 851
a5a1d1c2 852extern u64 ftrace_now(int cpu);
bc0c38d1 853
4ca53085 854extern void trace_find_cmdline(int pid, char comm[]);
d914ba37 855extern int trace_find_tgid(int pid);
c37775d5 856extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
f7d48cbd 857
bc0c38d1
SR
858#ifdef CONFIG_DYNAMIC_FTRACE
859extern unsigned long ftrace_update_tot_cnt;
da537f0a
SRV
860extern unsigned long ftrace_number_of_pages;
861extern unsigned long ftrace_number_of_groups;
04ec7bb6
SRV
862void ftrace_init_trace_array(struct trace_array *tr);
863#else
864static inline void ftrace_init_trace_array(struct trace_array *tr) { }
ad97772a 865#endif
d05cdb25
SR
866#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
867extern int DYN_FTRACE_TEST_NAME(void);
95950c2e
SR
868#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
869extern int DYN_FTRACE_TEST_NAME2(void);
bc0c38d1 870
55034cd6 871extern bool ring_buffer_expanded;
020e5f85
LZ
872extern bool tracing_selftest_disabled;
873
60a11774 874#ifdef CONFIG_FTRACE_STARTUP_TEST
60a11774
SR
875extern int trace_selftest_startup_function(struct tracer *trace,
876 struct trace_array *tr);
7447dce9
FW
877extern int trace_selftest_startup_function_graph(struct tracer *trace,
878 struct trace_array *tr);
60a11774
SR
879extern int trace_selftest_startup_irqsoff(struct tracer *trace,
880 struct trace_array *tr);
60a11774
SR
881extern int trace_selftest_startup_preemptoff(struct tracer *trace,
882 struct trace_array *tr);
60a11774
SR
883extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
884 struct trace_array *tr);
60a11774
SR
885extern int trace_selftest_startup_wakeup(struct tracer *trace,
886 struct trace_array *tr);
fb1b6d8b
SN
887extern int trace_selftest_startup_nop(struct tracer *trace,
888 struct trace_array *tr);
80e5ea45
SR
889extern int trace_selftest_startup_branch(struct tracer *trace,
890 struct trace_array *tr);
8f768993
SRRH
891/*
892 * Tracer data references selftest functions that only occur
893 * on boot up. These can be __init functions. Thus, when selftests
894 * are enabled, then the tracers need to reference __init functions.
895 */
896#define __tracer_data __refdata
897#else
898/* Tracers are seldom changed. Optimize when selftests are disabled. */
899#define __tracer_data __read_mostly
60a11774
SR
900#endif /* CONFIG_FTRACE_STARTUP_TEST */
901
c7aafc54 902extern void *head_page(struct trace_array_cpu *data);
a5a1d1c2 903extern unsigned long long ns2usecs(u64 nsec);
1fd8f2a3 904extern int
40ce74f1 905trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
48ead020 906extern int
40ce74f1 907trace_vprintk(unsigned long ip, const char *fmt, va_list args);
659372d3
SR
908extern int
909trace_array_vprintk(struct trace_array *tr,
910 unsigned long ip, const char *fmt, va_list args);
13292494 911int trace_array_printk_buf(struct trace_buffer *buffer,
12883efb 912 unsigned long ip, const char *fmt, ...);
955b61e5
JW
913void trace_printk_seq(struct trace_seq *s);
914enum print_line_t print_trace_line(struct trace_iterator *iter);
c7aafc54 915
8e1e1df2
BP
916extern char trace_find_mark(unsigned long long duration);
917
673feb9d
SRV
918struct ftrace_hash;
919
920struct ftrace_mod_load {
921 struct list_head list;
922 char *func;
923 char *module;
924 int enable;
925};
926
8c08f0d5
SRV
927enum {
928 FTRACE_HASH_FL_MOD = (1 << 0),
929};
930
4046bf02
NK
931struct ftrace_hash {
932 unsigned long size_bits;
933 struct hlist_head *buckets;
934 unsigned long count;
8c08f0d5 935 unsigned long flags;
4046bf02
NK
936 struct rcu_head rcu;
937};
938
939struct ftrace_func_entry *
940ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
941
eb583cd4 942static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
4046bf02 943{
8c08f0d5 944 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
4046bf02
NK
945}
946
15e6cb36 947/* Standard output formatting function used for function return traces */
fb52607a 948#ifdef CONFIG_FUNCTION_GRAPH_TRACER
62b915f1
JO
949
950/* Flag options */
951#define TRACE_GRAPH_PRINT_OVERRUN 0x1
952#define TRACE_GRAPH_PRINT_CPU 0x2
953#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
954#define TRACE_GRAPH_PRINT_PROC 0x8
955#define TRACE_GRAPH_PRINT_DURATION 0x10
956#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
9acd8de6
CD
957#define TRACE_GRAPH_PRINT_REL_TIME 0x40
958#define TRACE_GRAPH_PRINT_IRQS 0x80
959#define TRACE_GRAPH_PRINT_TAIL 0x100
960#define TRACE_GRAPH_SLEEP_TIME 0x200
961#define TRACE_GRAPH_GRAPH_TIME 0x400
6fc84ea7
SRRH
962#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
963#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
62b915f1 964
55577204 965extern void ftrace_graph_sleep_time_control(bool enable);
c8dd0f45
SRV
966
967#ifdef CONFIG_FUNCTION_PROFILER
55577204 968extern void ftrace_graph_graph_time_control(bool enable);
c8dd0f45
SRV
969#else
970static inline void ftrace_graph_graph_time_control(bool enable) { }
971#endif
55577204 972
d7a8d9e9
JO
973extern enum print_line_t
974print_graph_function_flags(struct trace_iterator *iter, u32 flags);
975extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
9d9add34 976extern void
0706f1c4 977trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
62b915f1
JO
978extern void graph_trace_open(struct trace_iterator *iter);
979extern void graph_trace_close(struct trace_iterator *iter);
980extern int __trace_graph_entry(struct trace_array *tr,
981 struct ftrace_graph_ent *trace,
982 unsigned long flags, int pc);
983extern void __trace_graph_return(struct trace_array *tr,
984 struct ftrace_graph_ret *trace,
985 unsigned long flags, int pc);
986
ea4e2bc4 987#ifdef CONFIG_DYNAMIC_FTRACE
24a9729f 988extern struct ftrace_hash __rcu *ftrace_graph_hash;
fd0e6852 989extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
ea4e2bc4 990
5cf99a0f 991static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
ea4e2bc4 992{
5cf99a0f 993 unsigned long addr = trace->func;
b9b0c831 994 int ret = 0;
24a9729f 995 struct ftrace_hash *hash;
b9b0c831
NK
996
997 preempt_disable_notrace();
998
16052dd5
SRV
999 /*
1000 * Have to open code "rcu_dereference_sched()" because the
1001 * function graph tracer can be called when RCU is not
1002 * "watching".
54a16ff6 1003 * Protected with schedule_on_each_cpu(ftrace_sync)
16052dd5 1004 */
24a9729f
AG
1005 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
1006
1007 if (ftrace_hash_empty(hash)) {
b9b0c831
NK
1008 ret = 1;
1009 goto out;
ea4e2bc4
SR
1010 }
1011
24a9729f 1012 if (ftrace_lookup_ip(hash, addr)) {
5cf99a0f
SRV
1013
1014 /*
1015 * This needs to be cleared on the return functions
1016 * when the depth is zero.
1017 */
1018 trace_recursion_set(TRACE_GRAPH_BIT);
1019 trace_recursion_set_depth(trace->depth);
1020
b9b0c831
NK
1021 /*
1022 * If no irqs are to be traced, but a set_graph_function
1023 * is set, and called by an interrupt handler, we still
1024 * want to trace it.
1025 */
1026 if (in_irq())
1027 trace_recursion_set(TRACE_IRQ_BIT);
1028 else
1029 trace_recursion_clear(TRACE_IRQ_BIT);
1030 ret = 1;
1031 }
1032
1033out:
1034 preempt_enable_notrace();
1035 return ret;
ea4e2bc4 1036}
29ad23b0 1037
5cf99a0f
SRV
1038static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1039{
1040 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
1041 trace->depth == trace_recursion_depth())
1042 trace_recursion_clear(TRACE_GRAPH_BIT);
1043}
1044
29ad23b0
NK
1045static inline int ftrace_graph_notrace_addr(unsigned long addr)
1046{
b9b0c831 1047 int ret = 0;
fd0e6852 1048 struct ftrace_hash *notrace_hash;
29ad23b0 1049
b9b0c831 1050 preempt_disable_notrace();
29ad23b0 1051
16052dd5
SRV
1052 /*
1053 * Have to open code "rcu_dereference_sched()" because the
1054 * function graph tracer can be called when RCU is not
1055 * "watching".
54a16ff6 1056 * Protected with schedule_on_each_cpu(ftrace_sync)
16052dd5 1057 */
fd0e6852
AG
1058 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1059 !preemptible());
1060
1061 if (ftrace_lookup_ip(notrace_hash, addr))
b9b0c831 1062 ret = 1;
29ad23b0 1063
b9b0c831
NK
1064 preempt_enable_notrace();
1065 return ret;
29ad23b0 1066}
15e6cb36 1067#else
5cf99a0f 1068static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
6b253930
IM
1069{
1070 return 1;
ea4e2bc4 1071}
29ad23b0
NK
1072
1073static inline int ftrace_graph_notrace_addr(unsigned long addr)
1074{
1075 return 0;
1076}
5cf99a0f
SRV
1077static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1078{ }
ea4e2bc4 1079#endif /* CONFIG_DYNAMIC_FTRACE */
1a414428
SRRH
1080
1081extern unsigned int fgraph_max_depth;
1082
1083static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1084{
1085 /* trace it when it is-nested-in or is a function enabled. */
5cf99a0f
SRV
1086 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1087 ftrace_graph_addr(trace)) ||
1a414428
SRRH
1088 (trace->depth < 0) ||
1089 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1090}
1091
ea4e2bc4 1092#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 1093static inline enum print_line_t
d7a8d9e9 1094print_graph_function_flags(struct trace_iterator *iter, u32 flags)
15e6cb36
FW
1095{
1096 return TRACE_TYPE_UNHANDLED;
1097}
ea4e2bc4 1098#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 1099
756d17ee 1100extern struct list_head ftrace_pids;
804a6851 1101
1155de47 1102#ifdef CONFIG_FUNCTION_TRACER
92a68fa0
SRV
1103struct ftrace_func_command {
1104 struct list_head list;
1105 char *name;
04ec7bb6
SRV
1106 int (*func)(struct trace_array *tr,
1107 struct ftrace_hash *hash,
92a68fa0
SRV
1108 char *func, char *cmd,
1109 char *params, int enable);
1110};
f1ed7c74 1111extern bool ftrace_filter_param __initdata;
345ddcc8 1112static inline int ftrace_trace_task(struct trace_array *tr)
804a6851 1113{
1c5eb448 1114 return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
804a6851 1115}
e0a413f6 1116extern int ftrace_is_dead(void);
591dffda
SRRH
1117int ftrace_create_function_files(struct trace_array *tr,
1118 struct dentry *parent);
1119void ftrace_destroy_function_files(struct trace_array *tr);
4104d326
SRRH
1120void ftrace_init_global_array_ops(struct trace_array *tr);
1121void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1122void ftrace_reset_array_ops(struct trace_array *tr);
345ddcc8 1123void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
501c2375
SRRH
1124void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1125 struct dentry *d_tracer);
d879d0b8 1126void ftrace_clear_pids(struct trace_array *tr);
dbeafd0d 1127int init_function_trace(void);
1e10486f 1128void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1155de47 1129#else
345ddcc8 1130static inline int ftrace_trace_task(struct trace_array *tr)
1155de47
PM
1131{
1132 return 1;
1133}
e0a413f6 1134static inline int ftrace_is_dead(void) { return 0; }
591dffda
SRRH
1135static inline int
1136ftrace_create_function_files(struct trace_array *tr,
1137 struct dentry *parent)
1138{
1139 return 0;
1140}
1141static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
4104d326
SRRH
1142static inline __init void
1143ftrace_init_global_array_ops(struct trace_array *tr) { }
1144static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
345ddcc8 1145static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
501c2375 1146static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
d879d0b8 1147static inline void ftrace_clear_pids(struct trace_array *tr) { }
dbeafd0d 1148static inline int init_function_trace(void) { return 0; }
1e10486f 1149static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
4104d326
SRRH
1150/* ftace_func_t type is not defined, use macro instead of static inline */
1151#define ftrace_init_array_ops(tr, func) do { } while (0)
591dffda
SRRH
1152#endif /* CONFIG_FUNCTION_TRACER */
1153
1154#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
ec19b859
SRV
1155
1156struct ftrace_probe_ops {
1157 void (*func)(unsigned long ip,
1158 unsigned long parent_ip,
b5f081b5 1159 struct trace_array *tr,
bca6c8d0 1160 struct ftrace_probe_ops *ops,
6e444319 1161 void *data);
ec19b859 1162 int (*init)(struct ftrace_probe_ops *ops,
b5f081b5 1163 struct trace_array *tr,
6e444319
SRV
1164 unsigned long ip, void *init_data,
1165 void **data);
ec19b859 1166 void (*free)(struct ftrace_probe_ops *ops,
b5f081b5 1167 struct trace_array *tr,
6e444319 1168 unsigned long ip, void *data);
ec19b859
SRV
1169 int (*print)(struct seq_file *m,
1170 unsigned long ip,
1171 struct ftrace_probe_ops *ops,
1172 void *data);
1173};
1174
41794f19
SRV
1175struct ftrace_func_mapper;
1176typedef int (*ftrace_mapper_func)(void *data);
1177
1178struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1179void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1180 unsigned long ip);
1181int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1182 unsigned long ip, void *data);
1183void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1184 unsigned long ip);
1185void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1186 ftrace_mapper_func free_func);
1187
ec19b859 1188extern int
04ec7bb6
SRV
1189register_ftrace_function_probe(char *glob, struct trace_array *tr,
1190 struct ftrace_probe_ops *ops, void *data);
d3d532d7 1191extern int
7b60f3d8
SRV
1192unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1193 struct ftrace_probe_ops *ops);
a0e6369e 1194extern void clear_ftrace_function_probes(struct trace_array *tr);
ec19b859 1195
92a68fa0
SRV
1196int register_ftrace_command(struct ftrace_func_command *cmd);
1197int unregister_ftrace_command(struct ftrace_func_command *cmd);
1198
591dffda
SRRH
1199void ftrace_create_filter_files(struct ftrace_ops *ops,
1200 struct dentry *parent);
1201void ftrace_destroy_filter_files(struct ftrace_ops *ops);
5c3469cb
MH
1202
1203extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1204 int len, int reset);
1205extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1206 int len, int reset);
591dffda 1207#else
92a68fa0
SRV
1208struct ftrace_func_command;
1209
1210static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1211{
1212 return -EINVAL;
1213}
1214static inline __init int unregister_ftrace_command(char *cmd_name)
1215{
1216 return -EINVAL;
1217}
8a49f3e0
SRV
1218static inline void clear_ftrace_function_probes(struct trace_array *tr)
1219{
1220}
1221
591dffda
SRRH
1222/*
1223 * The ops parameter passed in is usually undefined.
1224 * This must be a macro.
1225 */
1226#define ftrace_create_filter_files(ops, parent) do { } while (0)
1227#define ftrace_destroy_filter_files(ops) do { } while (0)
1228#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
804a6851 1229
c6650b2e 1230bool ftrace_event_is_function(struct trace_event_call *call);
ced39002 1231
b63f39ea 1232/*
1233 * struct trace_parser - servers for reading the user input separated by spaces
1234 * @cont: set if the input is not complete - no final space char was found
1235 * @buffer: holds the parsed user input
1537a363 1236 * @idx: user input length
b63f39ea 1237 * @size: buffer size
1238 */
1239struct trace_parser {
1240 bool cont;
1241 char *buffer;
1242 unsigned idx;
1243 unsigned size;
1244};
1245
1246static inline bool trace_parser_loaded(struct trace_parser *parser)
1247{
1248 return (parser->idx != 0);
1249}
1250
1251static inline bool trace_parser_cont(struct trace_parser *parser)
1252{
1253 return parser->cont;
1254}
1255
1256static inline void trace_parser_clear(struct trace_parser *parser)
1257{
1258 parser->cont = false;
1259 parser->idx = 0;
1260}
1261
1262extern int trace_parser_get_init(struct trace_parser *parser, int size);
1263extern void trace_parser_put(struct trace_parser *parser);
1264extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1265 size_t cnt, loff_t *ppos);
1266
729358da
SRRH
1267/*
1268 * Only create function graph options if function graph is configured.
1269 */
1270#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1271# define FGRAPH_FLAGS \
729358da 1272 C(DISPLAY_GRAPH, "display-graph"),
729358da
SRRH
1273#else
1274# define FGRAPH_FLAGS
729358da
SRRH
1275#endif
1276
4ee4301c
SRRH
1277#ifdef CONFIG_BRANCH_TRACER
1278# define BRANCH_FLAGS \
1279 C(BRANCH, "branch"),
1280#else
1281# define BRANCH_FLAGS
1282#endif
1283
8179e8a1
SRRH
1284#ifdef CONFIG_FUNCTION_TRACER
1285# define FUNCTION_FLAGS \
1e10486f
NK
1286 C(FUNCTION, "function-trace"), \
1287 C(FUNC_FORK, "function-fork"),
8179e8a1
SRRH
1288# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1289#else
1290# define FUNCTION_FLAGS
1291# define FUNCTION_DEFAULT_FLAGS 0UL
1e10486f 1292# define TRACE_ITER_FUNC_FORK 0UL
8179e8a1
SRRH
1293#endif
1294
73dddbb5
SRRH
1295#ifdef CONFIG_STACKTRACE
1296# define STACK_FLAGS \
1297 C(STACKTRACE, "stacktrace"),
1298#else
1299# define STACK_FLAGS
1300#endif
1301
4fcdae83
SR
1302/*
1303 * trace_iterator_flags is an enumeration that defines bit
1304 * positions into trace_flags that controls the output.
1305 *
1306 * NOTE: These bits must match the trace_options array in
a3418a36 1307 * trace.c (this macro guarantees it).
4fcdae83 1308 */
a3418a36
SRRH
1309#define TRACE_FLAGS \
1310 C(PRINT_PARENT, "print-parent"), \
1311 C(SYM_OFFSET, "sym-offset"), \
1312 C(SYM_ADDR, "sym-addr"), \
1313 C(VERBOSE, "verbose"), \
1314 C(RAW, "raw"), \
1315 C(HEX, "hex"), \
1316 C(BIN, "bin"), \
1317 C(BLOCK, "block"), \
a3418a36 1318 C(PRINTK, "trace_printk"), \
a3418a36
SRRH
1319 C(ANNOTATE, "annotate"), \
1320 C(USERSTACKTRACE, "userstacktrace"), \
1321 C(SYM_USEROBJ, "sym-userobj"), \
1322 C(PRINTK_MSGONLY, "printk-msg-only"), \
1323 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1324 C(LATENCY_FMT, "latency-format"), \
a3418a36 1325 C(RECORD_CMD, "record-cmd"), \
d914ba37 1326 C(RECORD_TGID, "record-tgid"), \
a3418a36
SRRH
1327 C(OVERWRITE, "overwrite"), \
1328 C(STOP_ON_FREE, "disable_on_free"), \
1329 C(IRQ_INFO, "irq-info"), \
1330 C(MARKERS, "markers"), \
c37775d5 1331 C(EVENT_FORK, "event-fork"), \
06e0a548 1332 C(PAUSE_ON_TRACE, "pause-on-trace"), \
8179e8a1 1333 FUNCTION_FLAGS \
4ee4301c 1334 FGRAPH_FLAGS \
73dddbb5 1335 STACK_FLAGS \
4ee4301c 1336 BRANCH_FLAGS
ce3fed62 1337
a3418a36
SRRH
1338/*
1339 * By defining C, we can make TRACE_FLAGS a list of bit names
1340 * that will define the bits for the flag masks.
1341 */
1342#undef C
1343#define C(a, b) TRACE_ITER_##a##_BIT
1344
b5e87c05
SRRH
1345enum trace_iterator_bits {
1346 TRACE_FLAGS
1347 /* Make sure we don't go more than we have bits for */
1348 TRACE_ITER_LAST_BIT
1349};
a3418a36
SRRH
1350
1351/*
1352 * By redefining C, we can make TRACE_FLAGS a list of masks that
1353 * use the bits as defined above.
1354 */
1355#undef C
1356#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1357
1358enum trace_iterator_flags { TRACE_FLAGS };
4e655519 1359
15e6cb36
FW
1360/*
1361 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1362 * control the output of kernel symbols.
1363 */
1364#define TRACE_ITER_SYM_MASK \
1365 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1366
43a15386
FW
1367extern struct tracer nop_trace;
1368
2ed84eeb 1369#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
1370extern int enable_branch_tracing(struct trace_array *tr);
1371extern void disable_branch_tracing(void);
1372static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 1373{
983f938a 1374 if (tr->trace_flags & TRACE_ITER_BRANCH)
9f029e83 1375 return enable_branch_tracing(tr);
52f232cb
SR
1376 return 0;
1377}
9f029e83 1378static inline void trace_branch_disable(void)
52f232cb
SR
1379{
1380 /* due to races, always disable */
9f029e83 1381 disable_branch_tracing();
52f232cb
SR
1382}
1383#else
9f029e83 1384static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
1385{
1386 return 0;
1387}
9f029e83 1388static inline void trace_branch_disable(void)
52f232cb
SR
1389{
1390}
2ed84eeb 1391#endif /* CONFIG_BRANCH_TRACER */
52f232cb 1392
1852fcce
SR
1393/* set ring buffers to default size if not already done so */
1394int tracing_update_buffers(void);
1395
cf027f64
TZ
1396struct ftrace_event_field {
1397 struct list_head link;
92edca07
SR
1398 const char *name;
1399 const char *type;
aa38e9fc 1400 int filter_type;
cf027f64
TZ
1401 int offset;
1402 int size;
a118e4d1 1403 int is_signed;
cf027f64
TZ
1404};
1405
80765597
SRV
1406struct prog_entry;
1407
30e673b2 1408struct event_filter {
80765597
SRV
1409 struct prog_entry __rcu *prog;
1410 char *filter_string;
30e673b2
TZ
1411};
1412
cfb180f3
TZ
1413struct event_subsystem {
1414 struct list_head list;
1415 const char *name;
1f9963cb 1416 struct event_filter *filter;
e9dbfae5 1417 int ref_count;
cfb180f3
TZ
1418};
1419
7967b3e0 1420struct trace_subsystem_dir {
ae63b31e
SR
1421 struct list_head list;
1422 struct event_subsystem *subsystem;
1423 struct trace_array *tr;
1424 struct dentry *entry;
1425 int ref_count;
1426 int nr_events;
1427};
1428
65da9a0a 1429extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
13292494 1430 struct trace_buffer *buffer,
65da9a0a 1431 struct ring_buffer_event *event);
fa66ddb8 1432
fa66ddb8 1433void trace_buffer_unlock_commit_regs(struct trace_array *tr,
13292494 1434 struct trace_buffer *buffer,
fa66ddb8
SRRH
1435 struct ring_buffer_event *event,
1436 unsigned long flags, int pc,
1437 struct pt_regs *regs);
33fddff2
SRRH
1438
1439static inline void trace_buffer_unlock_commit(struct trace_array *tr,
13292494 1440 struct trace_buffer *buffer,
33fddff2
SRRH
1441 struct ring_buffer_event *event,
1442 unsigned long flags, int pc)
1443{
1444 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1445}
1446
0fc1b09f
SRRH
1447DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1448DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1449void trace_buffered_event_disable(void);
1450void trace_buffered_event_enable(void);
1451
1452static inline void
13292494 1453__trace_event_discard_commit(struct trace_buffer *buffer,
0fc1b09f
SRRH
1454 struct ring_buffer_event *event)
1455{
1456 if (this_cpu_read(trace_buffered_event) == event) {
1457 /* Simply release the temp buffer */
1458 this_cpu_dec(trace_buffered_event_cnt);
1459 return;
1460 }
1461 ring_buffer_discard_commit(buffer, event);
1462}
1463
dad56ee7
SRRH
1464/*
1465 * Helper function for event_trigger_unlock_commit{_regs}().
1466 * If there are event triggers attached to this event that requires
1467 * filtering against its fields, then they wil be called as the
1468 * entry already holds the field information of the current event.
1469 *
1470 * It also checks if the event should be discarded or not.
1471 * It is to be discarded if the event is soft disabled and the
1472 * event was only recorded to process triggers, or if the event
1473 * filter is active and this event did not match the filters.
1474 *
1475 * Returns true if the event is discarded, false otherwise.
1476 */
1477static inline bool
1478__event_trigger_test_discard(struct trace_event_file *file,
13292494 1479 struct trace_buffer *buffer,
dad56ee7
SRRH
1480 struct ring_buffer_event *event,
1481 void *entry,
1482 enum event_trigger_type *tt)
1483{
1484 unsigned long eflags = file->flags;
1485
1486 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1ac4f51c 1487 *tt = event_triggers_call(file, entry, event);
dad56ee7 1488
9cbb1506
SRRH
1489 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1490 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1491 !filter_match_preds(file->filter, entry))) {
0fc1b09f 1492 __trace_event_discard_commit(buffer, event);
9cbb1506
SRRH
1493 return true;
1494 }
dad56ee7 1495
9cbb1506 1496 return false;
dad56ee7
SRRH
1497}
1498
1499/**
1500 * event_trigger_unlock_commit - handle triggers and finish event commit
1501 * @file: The file pointer assoctiated to the event
1502 * @buffer: The ring buffer that the event is being written to
1503 * @event: The event meta data in the ring buffer
1504 * @entry: The event itself
1505 * @irq_flags: The state of the interrupts at the start of the event
1506 * @pc: The state of the preempt count at the start of the event.
1507 *
1508 * This is a helper function to handle triggers that require data
1509 * from the event itself. It also tests the event against filters and
1510 * if the event is soft disabled and should be discarded.
1511 */
1512static inline void
1513event_trigger_unlock_commit(struct trace_event_file *file,
13292494 1514 struct trace_buffer *buffer,
dad56ee7
SRRH
1515 struct ring_buffer_event *event,
1516 void *entry, unsigned long irq_flags, int pc)
1517{
1518 enum event_trigger_type tt = ETT_NONE;
1519
1520 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1521 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1522
1523 if (tt)
c94e45bc 1524 event_triggers_post_call(file, tt);
dad56ee7
SRRH
1525}
1526
1527/**
1528 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1529 * @file: The file pointer assoctiated to the event
1530 * @buffer: The ring buffer that the event is being written to
1531 * @event: The event meta data in the ring buffer
1532 * @entry: The event itself
1533 * @irq_flags: The state of the interrupts at the start of the event
1534 * @pc: The state of the preempt count at the start of the event.
1535 *
1536 * This is a helper function to handle triggers that require data
1537 * from the event itself. It also tests the event against filters and
1538 * if the event is soft disabled and should be discarded.
1539 *
1540 * Same as event_trigger_unlock_commit() but calls
1541 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1542 */
1543static inline void
1544event_trigger_unlock_commit_regs(struct trace_event_file *file,
13292494 1545 struct trace_buffer *buffer,
dad56ee7
SRRH
1546 struct ring_buffer_event *event,
1547 void *entry, unsigned long irq_flags, int pc,
1548 struct pt_regs *regs)
1549{
1550 enum event_trigger_type tt = ETT_NONE;
1551
1552 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1553 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1554 irq_flags, pc, regs);
1555
1556 if (tt)
c94e45bc 1557 event_triggers_post_call(file, tt);
dad56ee7
SRRH
1558}
1559
61e9dea2
SR
1560#define FILTER_PRED_INVALID ((unsigned short)-1)
1561#define FILTER_PRED_IS_RIGHT (1 << 15)
43cd4145 1562#define FILTER_PRED_FOLD (1 << 15)
61e9dea2 1563
bf93f9ed
SR
1564/*
1565 * The max preds is the size of unsigned short with
1566 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1567 * and FOLD flags. The other is reserved.
1568 *
1569 * 2^14 preds is way more than enough.
1570 */
1571#define MAX_FILTER_PRED 16384
4a3d27e9 1572
7ce7e424 1573struct filter_pred;
1889d209 1574struct regex;
7ce7e424 1575
58d9a597 1576typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
7ce7e424 1577
1889d209
FW
1578typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1579
3f6fe06d 1580enum regex_type {
b0f1a59a 1581 MATCH_FULL = 0,
3f6fe06d
FW
1582 MATCH_FRONT_ONLY,
1583 MATCH_MIDDLE_ONLY,
1584 MATCH_END_ONLY,
60f1d5e3 1585 MATCH_GLOB,
f79b3f33 1586 MATCH_INDEX,
3f6fe06d
FW
1587};
1588
1889d209
FW
1589struct regex {
1590 char pattern[MAX_FILTER_STR_VAL];
1591 int len;
1592 int field_len;
1593 regex_match_func match;
1594};
1595
7ce7e424 1596struct filter_pred {
1889d209
FW
1597 filter_pred_fn_t fn;
1598 u64 val;
1599 struct regex regex;
61aaef55 1600 unsigned short *ops;
1d0e78e3 1601 struct ftrace_event_field *field;
1889d209 1602 int offset;
80765597 1603 int not;
1889d209 1604 int op;
7ce7e424
TZ
1605};
1606
4ef56902
TZ
1607static inline bool is_string_field(struct ftrace_event_field *field)
1608{
1609 return field->filter_type == FILTER_DYN_STRING ||
1610 field->filter_type == FILTER_STATIC_STRING ||
4c738413
SRV
1611 field->filter_type == FILTER_PTR_STRING ||
1612 field->filter_type == FILTER_COMM;
4ef56902
TZ
1613}
1614
1615static inline bool is_function_field(struct ftrace_event_field *field)
1616{
1617 return field->filter_type == FILTER_TRACE_FN;
1618}
1619
3f6fe06d
FW
1620extern enum regex_type
1621filter_parse_regex(char *buff, int len, char **search, int *not);
7f1d2f82 1622extern void print_event_filter(struct trace_event_file *file,
4bda2d51 1623 struct trace_seq *s);
7f1d2f82 1624extern int apply_event_filter(struct trace_event_file *file,
8b372562 1625 char *filter_string);
7967b3e0 1626extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
8b372562
TZ
1627 char *filter_string);
1628extern void print_subsystem_event_filter(struct event_subsystem *system,
ac1adc55 1629 struct trace_seq *s);
aa38e9fc 1630extern int filter_assign_type(const char *type);
1e144d73
SRV
1631extern int create_event_filter(struct trace_array *tr,
1632 struct trace_event_call *call,
bac5fb97
TZ
1633 char *filter_str, bool set_str,
1634 struct event_filter **filterp);
1635extern void free_event_filter(struct event_filter *filter);
7ce7e424 1636
b3a8c6fd 1637struct ftrace_event_field *
2425bcb9 1638trace_find_event_field(struct trace_event_call *call, char *name);
2e33af02 1639
e870e9a1 1640extern void trace_event_enable_cmd_record(bool enable);
d914ba37
JF
1641extern void trace_event_enable_tgid_record(bool enable);
1642
58b92547 1643extern int event_trace_init(void);
277ba044 1644extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
0c8916c3 1645extern int event_trace_del_tracer(struct trace_array *tr);
e870e9a1 1646
3c96529c
SRV
1647extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1648 const char *system,
1649 const char *event);
7f1d2f82
SRRH
1650extern struct trace_event_file *find_event_file(struct trace_array *tr,
1651 const char *system,
1652 const char *event);
7862ad18 1653
85f2b082
TZ
1654static inline void *event_file_data(struct file *filp)
1655{
6aa7de05 1656 return READ_ONCE(file_inode(filp)->i_private);
85f2b082
TZ
1657}
1658
20c8928a 1659extern struct mutex event_mutex;
a59fd602 1660extern struct list_head ftrace_events;
ac199db0 1661
85f2b082 1662extern const struct file_operations event_trigger_fops;
7ef224d1 1663extern const struct file_operations event_hist_fops;
6c3edaf9 1664extern const struct file_operations event_inject_fops;
7ef224d1
TZ
1665
1666#ifdef CONFIG_HIST_TRIGGERS
1667extern int register_trigger_hist_cmd(void);
d0bad49b 1668extern int register_trigger_hist_enable_disable_cmds(void);
7ef224d1
TZ
1669#else
1670static inline int register_trigger_hist_cmd(void) { return 0; }
d0bad49b 1671static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
7ef224d1 1672#endif
85f2b082
TZ
1673
1674extern int register_trigger_cmds(void);
1675extern void clear_event_triggers(struct trace_array *tr);
1676
1677struct event_trigger_data {
1678 unsigned long count;
1679 int ref;
1680 struct event_trigger_ops *ops;
1681 struct event_command *cmd_ops;
d8a30f20 1682 struct event_filter __rcu *filter;
85f2b082
TZ
1683 char *filter_str;
1684 void *private_data;
104f2810 1685 bool paused;
db1388b4 1686 bool paused_tmp;
85f2b082 1687 struct list_head list;
db1388b4
TZ
1688 char *name;
1689 struct list_head named_list;
1690 struct event_trigger_data *named_data;
85f2b082
TZ
1691};
1692
d0bad49b
TZ
1693/* Avoid typos */
1694#define ENABLE_EVENT_STR "enable_event"
1695#define DISABLE_EVENT_STR "disable_event"
1696#define ENABLE_HIST_STR "enable_hist"
1697#define DISABLE_HIST_STR "disable_hist"
1698
1699struct enable_trigger_data {
1700 struct trace_event_file *file;
1701 bool enable;
1702 bool hist;
1703};
1704
1705extern int event_enable_trigger_print(struct seq_file *m,
1706 struct event_trigger_ops *ops,
1707 struct event_trigger_data *data);
1708extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1709 struct event_trigger_data *data);
1710extern int event_enable_trigger_func(struct event_command *cmd_ops,
1711 struct trace_event_file *file,
1712 char *glob, char *cmd, char *param);
1713extern int event_enable_register_trigger(char *glob,
1714 struct event_trigger_ops *ops,
1715 struct event_trigger_data *data,
1716 struct trace_event_file *file);
1717extern void event_enable_unregister_trigger(char *glob,
1718 struct event_trigger_ops *ops,
1719 struct event_trigger_data *test,
1720 struct trace_event_file *file);
ab4bf008
TZ
1721extern void trigger_data_free(struct event_trigger_data *data);
1722extern int event_trigger_init(struct event_trigger_ops *ops,
1723 struct event_trigger_data *data);
1724extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1725 int trigger_enable);
1726extern void update_cond_flag(struct trace_event_file *file);
ab4bf008
TZ
1727extern int set_trigger_filter(char *filter_str,
1728 struct event_trigger_data *trigger_data,
1729 struct trace_event_file *file);
db1388b4
TZ
1730extern struct event_trigger_data *find_named_trigger(const char *name);
1731extern bool is_named_trigger(struct event_trigger_data *test);
1732extern int save_named_trigger(const char *name,
1733 struct event_trigger_data *data);
1734extern void del_named_trigger(struct event_trigger_data *data);
1735extern void pause_named_trigger(struct event_trigger_data *data);
1736extern void unpause_named_trigger(struct event_trigger_data *data);
1737extern void set_named_trigger_data(struct event_trigger_data *data,
1738 struct event_trigger_data *named_data);
067fe038
TZ
1739extern struct event_trigger_data *
1740get_named_trigger_data(struct event_trigger_data *data);
ab4bf008 1741extern int register_event_command(struct event_command *cmd);
d0bad49b
TZ
1742extern int unregister_event_command(struct event_command *cmd);
1743extern int register_trigger_hist_enable_disable_cmds(void);
ab4bf008 1744
85f2b082
TZ
1745/**
1746 * struct event_trigger_ops - callbacks for trace event triggers
1747 *
1748 * The methods in this structure provide per-event trigger hooks for
1749 * various trigger operations.
1750 *
1751 * All the methods below, except for @init() and @free(), must be
1752 * implemented.
1753 *
1754 * @func: The trigger 'probe' function called when the triggering
1755 * event occurs. The data passed into this callback is the data
1756 * that was supplied to the event_command @reg() function that
c4a59230
TZ
1757 * registered the trigger (see struct event_command) along with
1758 * the trace record, rec.
85f2b082
TZ
1759 *
1760 * @init: An optional initialization function called for the trigger
1761 * when the trigger is registered (via the event_command reg()
1762 * function). This can be used to perform per-trigger
1763 * initialization such as incrementing a per-trigger reference
1764 * count, for instance. This is usually implemented by the
1765 * generic utility function @event_trigger_init() (see
1766 * trace_event_triggers.c).
1767 *
1768 * @free: An optional de-initialization function called for the
1769 * trigger when the trigger is unregistered (via the
1770 * event_command @reg() function). This can be used to perform
1771 * per-trigger de-initialization such as decrementing a
1772 * per-trigger reference count and freeing corresponding trigger
1773 * data, for instance. This is usually implemented by the
1774 * generic utility function @event_trigger_free() (see
1775 * trace_event_triggers.c).
1776 *
1777 * @print: The callback function invoked to have the trigger print
1778 * itself. This is usually implemented by a wrapper function
1779 * that calls the generic utility function @event_trigger_print()
1780 * (see trace_event_triggers.c).
1781 */
1782struct event_trigger_ops {
c4a59230 1783 void (*func)(struct event_trigger_data *data,
1ac4f51c
TZ
1784 void *rec,
1785 struct ring_buffer_event *rbe);
85f2b082
TZ
1786 int (*init)(struct event_trigger_ops *ops,
1787 struct event_trigger_data *data);
1788 void (*free)(struct event_trigger_ops *ops,
1789 struct event_trigger_data *data);
1790 int (*print)(struct seq_file *m,
1791 struct event_trigger_ops *ops,
1792 struct event_trigger_data *data);
1793};
1794
1795/**
1796 * struct event_command - callbacks and data members for event commands
1797 *
1798 * Event commands are invoked by users by writing the command name
1799 * into the 'trigger' file associated with a trace event. The
1800 * parameters associated with a specific invocation of an event
1801 * command are used to create an event trigger instance, which is
1802 * added to the list of trigger instances associated with that trace
1803 * event. When the event is hit, the set of triggers associated with
1804 * that event is invoked.
1805 *
1806 * The data members in this structure provide per-event command data
1807 * for various event commands.
1808 *
1809 * All the data members below, except for @post_trigger, must be set
1810 * for each event command.
1811 *
1812 * @name: The unique name that identifies the event command. This is
1813 * the name used when setting triggers via trigger files.
1814 *
1815 * @trigger_type: A unique id that identifies the event command
1816 * 'type'. This value has two purposes, the first to ensure that
1817 * only one trigger of the same type can be set at a given time
1818 * for a particular event e.g. it doesn't make sense to have both
1819 * a traceon and traceoff trigger attached to a single event at
1820 * the same time, so traceon and traceoff have the same type
1821 * though they have different names. The @trigger_type value is
1822 * also used as a bit value for deferring the actual trigger
1823 * action until after the current event is finished. Some
1824 * commands need to do this if they themselves log to the trace
1825 * buffer (see the @post_trigger() member below). @trigger_type
1826 * values are defined by adding new values to the trigger_type
af658dca 1827 * enum in include/linux/trace_events.h.
85f2b082 1828 *
353206f5 1829 * @flags: See the enum event_command_flags below.
a5863dae 1830 *
a88e1cfb
TZ
1831 * All the methods below, except for @set_filter() and @unreg_all(),
1832 * must be implemented.
85f2b082
TZ
1833 *
1834 * @func: The callback function responsible for parsing and
1835 * registering the trigger written to the 'trigger' file by the
1836 * user. It allocates the trigger instance and registers it with
1837 * the appropriate trace event. It makes use of the other
1838 * event_command callback functions to orchestrate this, and is
1839 * usually implemented by the generic utility function
1840 * @event_trigger_callback() (see trace_event_triggers.c).
1841 *
1842 * @reg: Adds the trigger to the list of triggers associated with the
1843 * event, and enables the event trigger itself, after
1844 * initializing it (via the event_trigger_ops @init() function).
1845 * This is also where commands can use the @trigger_type value to
1846 * make the decision as to whether or not multiple instances of
1847 * the trigger should be allowed. This is usually implemented by
1848 * the generic utility function @register_trigger() (see
1849 * trace_event_triggers.c).
1850 *
1851 * @unreg: Removes the trigger from the list of triggers associated
1852 * with the event, and disables the event trigger itself, after
1853 * initializing it (via the event_trigger_ops @free() function).
1854 * This is usually implemented by the generic utility function
1855 * @unregister_trigger() (see trace_event_triggers.c).
1856 *
a88e1cfb
TZ
1857 * @unreg_all: An optional function called to remove all the triggers
1858 * from the list of triggers associated with the event. Called
1859 * when a trigger file is opened in truncate mode.
1860 *
85f2b082
TZ
1861 * @set_filter: An optional function called to parse and set a filter
1862 * for the trigger. If no @set_filter() method is set for the
1863 * event command, filters set by the user for the command will be
1864 * ignored. This is usually implemented by the generic utility
1865 * function @set_trigger_filter() (see trace_event_triggers.c).
1866 *
1867 * @get_trigger_ops: The callback function invoked to retrieve the
1868 * event_trigger_ops implementation associated with the command.
1869 */
1870struct event_command {
1871 struct list_head list;
1872 char *name;
1873 enum event_trigger_type trigger_type;
353206f5 1874 int flags;
85f2b082 1875 int (*func)(struct event_command *cmd_ops,
7f1d2f82 1876 struct trace_event_file *file,
85f2b082
TZ
1877 char *glob, char *cmd, char *params);
1878 int (*reg)(char *glob,
1879 struct event_trigger_ops *ops,
1880 struct event_trigger_data *data,
7f1d2f82 1881 struct trace_event_file *file);
85f2b082
TZ
1882 void (*unreg)(char *glob,
1883 struct event_trigger_ops *ops,
1884 struct event_trigger_data *data,
7f1d2f82 1885 struct trace_event_file *file);
a88e1cfb 1886 void (*unreg_all)(struct trace_event_file *file);
85f2b082
TZ
1887 int (*set_filter)(char *filter_str,
1888 struct event_trigger_data *data,
7f1d2f82 1889 struct trace_event_file *file);
85f2b082
TZ
1890 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1891};
1892
353206f5
SRRH
1893/**
1894 * enum event_command_flags - flags for struct event_command
1895 *
1896 * @POST_TRIGGER: A flag that says whether or not this command needs
1897 * to have its action delayed until after the current event has
1898 * been closed. Some triggers need to avoid being invoked while
1899 * an event is currently in the process of being logged, since
1900 * the trigger may itself log data into the trace buffer. Thus
1901 * we make sure the current event is committed before invoking
1902 * those triggers. To do that, the trigger invocation is split
1903 * in two - the first part checks the filter using the current
1904 * trace record; if a command has the @post_trigger flag set, it
1905 * sets a bit for itself in the return value, otherwise it
1906 * directly invokes the trigger. Once all commands have been
1907 * either invoked or set their return flag, the current record is
1908 * either committed or discarded. At that point, if any commands
1909 * have deferred their triggers, those commands are finally
1910 * invoked following the close of the current event. In other
1911 * words, if the event_trigger_ops @func() probe implementation
1912 * itself logs to the trace buffer, this flag should be set,
1913 * otherwise it can be left unspecified.
1914 *
1915 * @NEEDS_REC: A flag that says whether or not this command needs
1916 * access to the trace record in order to perform its function,
1917 * regardless of whether or not it has a filter associated with
1918 * it (filters make a trigger require access to the trace record
1919 * but are not always present).
1920 */
1921enum event_command_flags {
1922 EVENT_CMD_FL_POST_TRIGGER = 1,
1923 EVENT_CMD_FL_NEEDS_REC = 2,
1924};
1925
1926static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1927{
1928 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1929}
1930
1931static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1932{
1933 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1934}
1935
7f1d2f82 1936extern int trace_event_enable_disable(struct trace_event_file *file,
85f2b082 1937 int enable, int soft_disable);
93e31ffb 1938extern int tracing_alloc_snapshot(void);
a35873a0
TZ
1939extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1940extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1941
1942extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1943extern void *tracing_cond_snapshot_data(struct trace_array *tr);
85f2b082 1944
e9fb2b6d
SR
1945extern const char *__start___trace_bprintk_fmt[];
1946extern const char *__stop___trace_bprintk_fmt[];
1947
102c9323
SRRH
1948extern const char *__start___tracepoint_str[];
1949extern const char *__stop___tracepoint_str[];
1950
b9f9108c 1951void trace_printk_control(bool enabled);
81698831 1952void trace_printk_start_comm(void);
613f04a0 1953int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
2b6080f2 1954int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
07d777fe 1955
5c3469cb
MH
1956/* Used from boot time tracer */
1957extern int trace_set_options(struct trace_array *tr, char *option);
1958extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1959extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1960 unsigned long size, int cpu_id);
1961extern int tracing_set_cpumask(struct trace_array *tr,
1962 cpumask_var_t tracing_cpumask_new);
1963
1964
7e465baa
TZ
1965#define MAX_EVENT_NAME_LEN 64
1966
1967extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1968extern ssize_t trace_parse_run_command(struct file *file,
1969 const char __user *buffer, size_t count, loff_t *ppos,
1970 int (*createfn)(int, char**));
1971
8a062902 1972extern unsigned int err_pos(char *cmd, const char *str);
2f754e77
SRV
1973extern void tracing_log_err(struct trace_array *tr,
1974 const char *loc, const char *cmd,
8a062902
TZ
1975 const char **errs, u8 type, u8 pos);
1976
ca268da6
SRRH
1977/*
1978 * Normal trace_printk() and friends allocates special buffers
1979 * to do the manipulation, as well as saves the print formats
1980 * into sections to display. But the trace infrastructure wants
1981 * to use these without the added overhead at the price of being
1982 * a bit slower (used mainly for warnings, where we don't care
1983 * about performance). The internal_trace_puts() is for such
1984 * a purpose.
1985 */
1986#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1987
4e5292ea 1988#undef FTRACE_ENTRY
04ae87a5 1989#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
2425bcb9 1990 extern struct trace_event_call \
52f5684c 1991 __aligned(4) event_##call;
4e5292ea 1992#undef FTRACE_ENTRY_DUP
04ae87a5
PZ
1993#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
1994 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
a4a551b8 1995#undef FTRACE_ENTRY_PACKED
04ae87a5
PZ
1996#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1997 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
a4a551b8 1998
4e5292ea 1999#include "trace_entries.h"
e1112b4d 2000
6e48b550 2001#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
2425bcb9 2002int perf_ftrace_event_register(struct trace_event_call *call,
ced39002
JO
2003 enum trace_reg type, void *data);
2004#else
2005#define perf_ftrace_event_register NULL
6e48b550 2006#endif
ced39002 2007
5f893b26
SRRH
2008#ifdef CONFIG_FTRACE_SYSCALLS
2009void init_ftrace_syscalls(void);
dbfeaa7a 2010const char *get_syscall_name(int syscall);
5f893b26
SRRH
2011#else
2012static inline void init_ftrace_syscalls(void) { }
dbfeaa7a
TZ
2013static inline const char *get_syscall_name(int syscall)
2014{
2015 return NULL;
2016}
5f893b26
SRRH
2017#endif
2018
2019#ifdef CONFIG_EVENT_TRACING
2020void trace_event_init(void);
f57a4143 2021void trace_event_eval_update(struct trace_eval_map **map, int len);
5c3469cb
MH
2022/* Used from boot time tracer */
2023extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
2024extern int trigger_process_regex(struct trace_event_file *file, char *buff);
5f893b26
SRRH
2025#else
2026static inline void __init trace_event_init(void) { }
f57a4143 2027static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
5f893b26
SRRH
2028#endif
2029
2824f503
SRV
2030#ifdef CONFIG_TRACER_SNAPSHOT
2031void tracing_snapshot_instance(struct trace_array *tr);
2032int tracing_alloc_snapshot_instance(struct trace_array *tr);
2033#else
2034static inline void tracing_snapshot_instance(struct trace_array *tr) { }
2035static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
2036{
2037 return 0;
2038}
2039#endif
2040
3f1756dc
SRV
2041#ifdef CONFIG_PREEMPT_TRACER
2042void tracer_preempt_on(unsigned long a0, unsigned long a1);
2043void tracer_preempt_off(unsigned long a0, unsigned long a1);
2044#else
2045static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
2046static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
2047#endif
2048#ifdef CONFIG_IRQSOFF_TRACER
2049void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
2050void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
2051#else
2052static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
2053static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
2054#endif
2055
0daa2302 2056extern struct trace_iterator *tracepoint_print_iter;
5f893b26 2057
0c97bf86
MO
2058/*
2059 * Reset the state of the trace_iterator so that it can read consumed data.
2060 * Normally, the trace_iterator is used for reading the data when it is not
2061 * consumed, and must retain state.
2062 */
2063static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
2064{
2065 const size_t offset = offsetof(struct trace_iterator, seq);
2066
2067 /*
2068 * Keep gcc from complaining about overwriting more than just one
2069 * member in the structure.
2070 */
2071 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
2072
2073 iter->pos = -1;
2074}
2075
bc0c38d1 2076#endif /* _LINUX_KERNEL_TRACE_H */