]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace.h
ftrace: Protect ftrace_graph_hash with ftrace_sync
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
bac5fb97 2
bc0c38d1
SR
3#ifndef _LINUX_KERNEL_TRACE_H
4#define _LINUX_KERNEL_TRACE_H
5
6#include <linux/fs.h>
60063497 7#include <linux/atomic.h>
bc0c38d1
SR
8#include <linux/sched.h>
9#include <linux/clocksource.h>
3928a8a2 10#include <linux/ring_buffer.h>
bd8ac686 11#include <linux/mmiotrace.h>
4e5292ea 12#include <linux/tracepoint.h>
d13744cd 13#include <linux/ftrace.h>
24f1e32c 14#include <linux/hw_breakpoint.h>
9504504c 15#include <linux/trace_seq.h>
af658dca 16#include <linux/trace_events.h>
52f5684c 17#include <linux/compiler.h>
19a7fe20 18#include <linux/trace_seq.h>
60f1d5e3 19#include <linux/glob.h>
9504504c 20
12ab74ee
SR
21#ifdef CONFIG_FTRACE_SYSCALLS
22#include <asm/unistd.h> /* For NR_SYSCALLS */
23#include <asm/syscall.h> /* some archs define it here */
24#endif
25
72829bc3
TG
26enum trace_type {
27 __TRACE_FIRST_TYPE = 0,
28
29 TRACE_FN,
30 TRACE_CTX,
31 TRACE_WAKE,
32 TRACE_STACK,
dd0e545f 33 TRACE_PRINT,
48ead020 34 TRACE_BPRINT,
bd8ac686
PP
35 TRACE_MMIO_RW,
36 TRACE_MMIO_MAP,
9f029e83 37 TRACE_BRANCH,
287b6e68
FW
38 TRACE_GRAPH_RET,
39 TRACE_GRAPH_ENT,
02b67518 40 TRACE_USER_STACK,
c71a8961 41 TRACE_BLK,
09ae7234 42 TRACE_BPUTS,
e7c15cd8 43 TRACE_HWLAT,
fa32e855 44 TRACE_RAW_DATA,
72829bc3 45
f0868d1e 46 __TRACE_LAST_TYPE,
72829bc3
TG
47};
48
bc0c38d1 49
0a1c49db
SR
50#undef __field
51#define __field(type, item) type item;
86387f7e 52
d7315094
SR
53#undef __field_struct
54#define __field_struct(type, item) __field(type, item)
86387f7e 55
d7315094
SR
56#undef __field_desc
57#define __field_desc(type, container, item)
02b67518 58
0a1c49db
SR
59#undef __array
60#define __array(type, item, size) type item[size];
1427cdf0 61
d7315094
SR
62#undef __array_desc
63#define __array_desc(type, container, item, size)
777e208d 64
0a1c49db
SR
65#undef __dynamic_array
66#define __dynamic_array(type, item) type item[];
777e208d 67
0a1c49db
SR
68#undef F_STRUCT
69#define F_STRUCT(args...) args
74239072 70
0a1c49db 71#undef FTRACE_ENTRY
02aa3162
JO
72#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
73 struct struct_name { \
74 struct trace_entry ent; \
75 tstruct \
0a1c49db 76 }
777e208d 77
0a1c49db 78#undef FTRACE_ENTRY_DUP
02aa3162 79#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
1e9b51c2 80
e59a0bff 81#undef FTRACE_ENTRY_REG
02aa3162
JO
82#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
83 filter, regfn) \
84 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
85 filter)
e59a0bff 86
a4a551b8
NK
87#undef FTRACE_ENTRY_PACKED
88#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
89 filter) \
90 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
91 filter) __packed
92
0a1c49db 93#include "trace_entries.h"
36994e58 94
0a1c49db
SR
95/*
96 * syscalls are special, and need special handling, this is why
97 * they are not included in trace_entries.h
98 */
bed1ffca
FW
99struct syscall_trace_enter {
100 struct trace_entry ent;
101 int nr;
102 unsigned long args[];
103};
104
105struct syscall_trace_exit {
106 struct trace_entry ent;
107 int nr;
99df5a6a 108 long ret;
bed1ffca
FW
109};
110
93ccae7a 111struct kprobe_trace_entry_head {
413d37d1
MH
112 struct trace_entry ent;
113 unsigned long ip;
413d37d1
MH
114};
115
93ccae7a 116struct kretprobe_trace_entry_head {
413d37d1
MH
117 struct trace_entry ent;
118 unsigned long func;
119 unsigned long ret_ip;
413d37d1
MH
120};
121
fc5e27ae
PP
122/*
123 * trace_flag_type is an enumeration that holds different
124 * states when a trace occurs. These are:
9244489a 125 * IRQS_OFF - interrupts were disabled
9de36825 126 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
bd9cfca9 127 * NEED_RESCHED - reschedule is requested
9244489a
SR
128 * HARDIRQ - inside an interrupt handler
129 * SOFTIRQ - inside a softirq handler
fc5e27ae
PP
130 */
131enum trace_flag_type {
132 TRACE_FLAG_IRQS_OFF = 0x01,
9244489a
SR
133 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
134 TRACE_FLAG_NEED_RESCHED = 0x04,
135 TRACE_FLAG_HARDIRQ = 0x08,
136 TRACE_FLAG_SOFTIRQ = 0x10,
e5137b50 137 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
7e6867bf 138 TRACE_FLAG_NMI = 0x40,
fc5e27ae
PP
139};
140
5bf9a1ee 141#define TRACE_BUF_SIZE 1024
bc0c38d1 142
2b6080f2
SR
143struct trace_array;
144
bc0c38d1
SR
145/*
146 * The CPU trace array - it consists of thousands of trace entries
147 * plus some other descriptor data: (for example which task started
148 * the trace, etc.)
149 */
150struct trace_array_cpu {
bc0c38d1 151 atomic_t disabled;
2cadf913 152 void *buffer_page; /* ring buffer spare */
4e3c3333 153
438ced17 154 unsigned long entries;
bc0c38d1
SR
155 unsigned long saved_latency;
156 unsigned long critical_start;
157 unsigned long critical_end;
158 unsigned long critical_sequence;
159 unsigned long nice;
160 unsigned long policy;
161 unsigned long rt_priority;
2f26ebd5 162 unsigned long skipped_entries;
a5a1d1c2 163 u64 preempt_timestamp;
bc0c38d1 164 pid_t pid;
d20b92ab 165 kuid_t uid;
bc0c38d1 166 char comm[TASK_COMM_LEN];
3fdaf80f
SRRH
167
168 bool ignore_pid;
345ddcc8
SRRH
169#ifdef CONFIG_FUNCTION_TRACER
170 bool ftrace_ignore_pid;
171#endif
bc0c38d1
SR
172};
173
2b6080f2 174struct tracer;
37aea98b 175struct trace_option_dentry;
2b6080f2 176
12883efb
SRRH
177struct trace_buffer {
178 struct trace_array *tr;
179 struct ring_buffer *buffer;
180 struct trace_array_cpu __percpu *data;
a5a1d1c2 181 u64 time_start;
12883efb
SRRH
182 int cpu;
183};
184
9a38a885
SRRH
185#define TRACE_FLAGS_MAX_SIZE 32
186
37aea98b
SRRH
187struct trace_options {
188 struct tracer *tracer;
189 struct trace_option_dentry *topts;
190};
191
49090107 192struct trace_pid_list {
f4d34a87
SR
193 int pid_max;
194 unsigned long *pids;
49090107
SRRH
195};
196
bc0c38d1
SR
197/*
198 * The trace array - an array of per-CPU trace arrays. This is the
199 * highest level data structure that individual tracers deal with.
200 * They have on/off state as well:
201 */
202struct trace_array {
ae63b31e 203 struct list_head list;
277ba044 204 char *name;
12883efb
SRRH
205 struct trace_buffer trace_buffer;
206#ifdef CONFIG_TRACER_MAX_TRACE
207 /*
208 * The max_buffer is used to snapshot the trace when a maximum
209 * latency is reached, or when the user initiates a snapshot.
210 * Some tracers will use this to store a maximum trace while
211 * it continues examining live traces.
212 *
213 * The buffers for the max_buffer are set up the same as the trace_buffer
214 * When a snapshot is taken, the buffer of the max_buffer is swapped
215 * with the buffer of the trace_buffer and the buffers are reset for
216 * the trace_buffer so the tracing can continue.
217 */
218 struct trace_buffer max_buffer;
45ad21ca 219 bool allocated_snapshot;
f971cc9a
SRRH
220#endif
221#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5 222 unsigned long max_latency;
12883efb 223#endif
49090107 224 struct trace_pid_list __rcu *filtered_pids;
0b9b12c1
SRRH
225 /*
226 * max_lock is used to protect the swapping of buffers
227 * when taking a max snapshot. The buffers themselves are
228 * protected by per_cpu spinlocks. But the action of the swap
229 * needs its own lock.
230 *
231 * This is defined as a arch_spinlock_t in order to help
232 * with performance when lockdep debugging is enabled.
233 *
234 * It is also used in other places outside the update_max_tr
235 * so it needs to be defined outside of the
236 * CONFIG_TRACER_MAX_TRACE.
237 */
238 arch_spinlock_t max_lock;
499e5470 239 int buffer_disabled;
12ab74ee
SR
240#ifdef CONFIG_FTRACE_SYSCALLS
241 int sys_refcount_enter;
242 int sys_refcount_exit;
7f1d2f82
SRRH
243 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
244 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
12ab74ee 245#endif
2b6080f2
SR
246 int stop_count;
247 int clock_id;
37aea98b 248 int nr_topts;
065e63f9 249 bool clear_trace;
2b6080f2 250 struct tracer *current_trace;
983f938a 251 unsigned int trace_flags;
9a38a885 252 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
ae63b31e 253 unsigned int flags;
2b6080f2 254 raw_spinlock_t start_lock;
ae63b31e 255 struct dentry *dir;
2b6080f2
SR
256 struct dentry *options;
257 struct dentry *percpu_dir;
ae63b31e 258 struct dentry *event_dir;
37aea98b 259 struct trace_options *topts;
ae63b31e
SR
260 struct list_head systems;
261 struct list_head events;
ccfe9e42 262 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
a695cb58 263 int ref;
f20a5806
SRRH
264#ifdef CONFIG_FUNCTION_TRACER
265 struct ftrace_ops *ops;
345ddcc8 266 struct trace_pid_list __rcu *function_pids;
04ec7bb6 267#ifdef CONFIG_DYNAMIC_FTRACE
673feb9d 268 /* All of these are protected by the ftrace_lock */
04ec7bb6 269 struct list_head func_probes;
673feb9d
SRV
270 struct list_head mod_trace;
271 struct list_head mod_notrace;
04ec7bb6 272#endif
f20a5806
SRRH
273 /* function tracing enabled */
274 int function_enabled;
275#endif
bc0c38d1
SR
276};
277
ae63b31e
SR
278enum {
279 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
280};
281
282extern struct list_head ftrace_trace_arrays;
283
a8227415
AL
284extern struct mutex trace_types_lock;
285
8e2e2fa4
SRRH
286extern int trace_array_get(struct trace_array *tr);
287extern void trace_array_put(struct trace_array *tr);
288
ae63b31e
SR
289/*
290 * The global tracer (top) should be the first trace array added,
291 * but we check the flag anyway.
292 */
293static inline struct trace_array *top_trace_array(void)
294{
295 struct trace_array *tr;
296
da9c3413 297 if (list_empty(&ftrace_trace_arrays))
dc81e5e3
YY
298 return NULL;
299
ae63b31e
SR
300 tr = list_entry(ftrace_trace_arrays.prev,
301 typeof(*tr), list);
302 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
303 return tr;
304}
305
7104f300
SR
306#define FTRACE_CMP_TYPE(var, type) \
307 __builtin_types_compatible_p(typeof(var), type *)
308
309#undef IF_ASSIGN
310#define IF_ASSIGN(var, entry, etype, id) \
311 if (FTRACE_CMP_TYPE(var, etype)) { \
312 var = (typeof(var))(entry); \
313 WARN_ON(id && (entry)->type != id); \
314 break; \
315 }
316
317/* Will cause compile errors if type is not found. */
318extern void __ftrace_bad_type(void);
319
320/*
321 * The trace_assign_type is a verifier that the entry type is
322 * the same as the type being assigned. To add new types simply
323 * add a line with the following format:
324 *
325 * IF_ASSIGN(var, ent, type, id);
326 *
327 * Where "type" is the trace type that includes the trace_entry
328 * as the "ent" item. And "id" is the trace identifier that is
329 * used in the trace_type enum.
330 *
331 * If the type can have more than one id, then use zero.
332 */
333#define trace_assign_type(var, ent) \
334 do { \
335 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
336 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 337 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 338 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300 339 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
48ead020 340 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
09ae7234 341 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
e7c15cd8 342 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
fa32e855 343 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
7104f300
SR
344 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
345 TRACE_MMIO_RW); \
346 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
347 TRACE_MMIO_MAP); \
9f029e83 348 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
349 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
350 TRACE_GRAPH_ENT); \
351 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
352 TRACE_GRAPH_RET); \
7104f300
SR
353 __ftrace_bad_type(); \
354 } while (0)
2c4f035f 355
adf9f195
FW
356/*
357 * An option specific to a tracer. This is a boolean value.
358 * The bit is the bit index that sets its value on the
359 * flags value in struct tracer_flags.
360 */
361struct tracer_opt {
9de36825
IM
362 const char *name; /* Will appear on the trace_options file */
363 u32 bit; /* Mask assigned in val field in tracer_flags */
adf9f195
FW
364};
365
366/*
367 * The set of specific options for a tracer. Your tracer
368 * have to set the initial value of the flags val.
369 */
370struct tracer_flags {
371 u32 val;
9de36825 372 struct tracer_opt *opts;
d39cdd20 373 struct tracer *trace;
adf9f195
FW
374};
375
376/* Makes more easy to define a tracer opt */
377#define TRACER_OPT(s, b) .name = #s, .bit = b
378
034939b6 379
41d9c0be
SRRH
380struct trace_option_dentry {
381 struct tracer_opt *opt;
382 struct tracer_flags *flags;
383 struct trace_array *tr;
384 struct dentry *entry;
385};
386
6eaaa5d5 387/**
8434dc93 388 * struct tracer - a specific tracer and its callbacks to interact with tracefs
6eaaa5d5
FW
389 * @name: the name chosen to select it on the available_tracers file
390 * @init: called when one switches to this tracer (echo name > current_tracer)
391 * @reset: called when one switches to another tracer
05a724bd
CH
392 * @start: called when tracing is unpaused (echo 1 > tracing_on)
393 * @stop: called when tracing is paused (echo 0 > tracing_on)
6508fa76 394 * @update_thresh: called when tracing_thresh is updated
6eaaa5d5
FW
395 * @open: called when the trace file is opened
396 * @pipe_open: called when the trace_pipe file is opened
6eaaa5d5 397 * @close: called when the trace file is released
c521efd1 398 * @pipe_close: called when the trace_pipe file is released
6eaaa5d5
FW
399 * @read: override the default read callback on trace_pipe
400 * @splice_read: override the default splice_read callback on trace_pipe
401 * @selftest: selftest to run on boot (see trace_selftest.c)
402 * @print_headers: override the first lines that describe your columns
403 * @print_line: callback that prints a trace
404 * @set_flag: signals one of your private flags changed (trace_options file)
405 * @flags: your private flags
bc0c38d1
SR
406 */
407struct tracer {
408 const char *name;
1c80025a 409 int (*init)(struct trace_array *tr);
bc0c38d1 410 void (*reset)(struct trace_array *tr);
9036990d
SR
411 void (*start)(struct trace_array *tr);
412 void (*stop)(struct trace_array *tr);
6508fa76 413 int (*update_thresh)(struct trace_array *tr);
bc0c38d1 414 void (*open)(struct trace_iterator *iter);
107bad8b 415 void (*pipe_open)(struct trace_iterator *iter);
bc0c38d1 416 void (*close)(struct trace_iterator *iter);
c521efd1 417 void (*pipe_close)(struct trace_iterator *iter);
107bad8b
SR
418 ssize_t (*read)(struct trace_iterator *iter,
419 struct file *filp, char __user *ubuf,
420 size_t cnt, loff_t *ppos);
3c56819b
EGM
421 ssize_t (*splice_read)(struct trace_iterator *iter,
422 struct file *filp,
423 loff_t *ppos,
424 struct pipe_inode_info *pipe,
425 size_t len,
426 unsigned int flags);
60a11774
SR
427#ifdef CONFIG_FTRACE_STARTUP_TEST
428 int (*selftest)(struct tracer *trace,
429 struct trace_array *tr);
430#endif
8bba1bf5 431 void (*print_header)(struct seq_file *m);
2c4f035f 432 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195 433 /* If you handled the flag setting, return 0 */
8c1a49ae
SRRH
434 int (*set_flag)(struct trace_array *tr,
435 u32 old_flags, u32 bit, int set);
613f04a0 436 /* Return 0 if OK with change, else return non-zero */
bf6065b5 437 int (*flag_changed)(struct trace_array *tr,
613f04a0 438 u32 mask, int set);
bc0c38d1 439 struct tracer *next;
9de36825 440 struct tracer_flags *flags;
50512ab5 441 int enabled;
cf6ab6d9 442 int ref;
f43c738b 443 bool print_max;
607e2ea1 444 bool allow_instances;
12883efb 445#ifdef CONFIG_TRACER_MAX_TRACE
f43c738b 446 bool use_max_tr;
12883efb 447#endif
c7b3ae0b
ZSZ
448 /* True if tracer cannot be enabled in kernel param */
449 bool noboot;
bc0c38d1
SR
450};
451
f9520750 452
e4a3f541 453/* Only current can touch trace_recursion */
e4a3f541 454
edc15caf
SR
455/*
456 * For function tracing recursion:
457 * The order of these bits are important.
458 *
459 * When function tracing occurs, the following steps are made:
460 * If arch does not support a ftrace feature:
461 * call internal function (uses INTERNAL bits) which calls...
462 * If callback is registered to the "global" list, the list
463 * function is called and recursion checks the GLOBAL bits.
464 * then this function calls...
465 * The function callback, which can use the FTRACE bits to
466 * check for recursion.
467 *
468 * Now if the arch does not suppport a feature, and it calls
469 * the global list function which calls the ftrace callback
470 * all three of these steps will do a recursion protection.
471 * There's no reason to do one if the previous caller already
472 * did. The recursion that we are protecting against will
473 * go through the same steps again.
474 *
475 * To prevent the multiple recursion checks, if a recursion
476 * bit is set that is higher than the MAX bit of the current
477 * check, then we know that the check was made by the previous
478 * caller, and we can skip the current check.
479 */
e46cbf75 480enum {
567cd4da
SR
481 TRACE_BUFFER_BIT,
482 TRACE_BUFFER_NMI_BIT,
483 TRACE_BUFFER_IRQ_BIT,
484 TRACE_BUFFER_SIRQ_BIT,
485
486 /* Start of function recursion bits */
487 TRACE_FTRACE_BIT,
edc15caf
SR
488 TRACE_FTRACE_NMI_BIT,
489 TRACE_FTRACE_IRQ_BIT,
490 TRACE_FTRACE_SIRQ_BIT,
e46cbf75 491
4104d326 492 /* INTERNAL_BITs must be greater than FTRACE_BITs */
edc15caf
SR
493 TRACE_INTERNAL_BIT,
494 TRACE_INTERNAL_NMI_BIT,
495 TRACE_INTERNAL_IRQ_BIT,
496 TRACE_INTERNAL_SIRQ_BIT,
497
6224beb1 498 TRACE_BRANCH_BIT,
e4a3f541
SR
499/*
500 * Abuse of the trace_recursion.
501 * As we need a way to maintain state if we are tracing the function
502 * graph in irq because we want to trace a particular function that
503 * was called in irq context but we have irq tracing off. Since this
504 * can only be modified by current, we can reuse trace_recursion.
505 */
e46cbf75 506 TRACE_IRQ_BIT,
d0d7e894
SRV
507
508 /* Set if the function is in the set_graph_function file */
509 TRACE_GRAPH_BIT,
510
511 /*
512 * In the very unlikely case that an interrupt came in
513 * at a start of graph tracing, and we want to trace
514 * the function in that interrupt, the depth can be greater
515 * than zero, because of the preempted start of a previous
516 * trace. In an even more unlikely case, depth could be 2
517 * if a softirq interrupted the start of graph tracing,
518 * followed by an interrupt preempting a start of graph
519 * tracing in the softirq, and depth can even be 3
520 * if an NMI came in at the start of an interrupt function
521 * that preempted a softirq start of a function that
522 * preempted normal context!!!! Luckily, it can't be
523 * greater than 3, so the next two bits are a mask
524 * of what the depth is when we set TRACE_GRAPH_BIT
525 */
526
527 TRACE_GRAPH_DEPTH_START_BIT,
528 TRACE_GRAPH_DEPTH_END_BIT,
e46cbf75 529};
e4a3f541 530
e46cbf75
SR
531#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
532#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
533#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
e4a3f541 534
d0d7e894
SRV
535#define trace_recursion_depth() \
536 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
537#define trace_recursion_set_depth(depth) \
538 do { \
539 current->trace_recursion &= \
540 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
541 current->trace_recursion |= \
542 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
543 } while (0)
544
edc15caf
SR
545#define TRACE_CONTEXT_BITS 4
546
547#define TRACE_FTRACE_START TRACE_FTRACE_BIT
548#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
549
edc15caf
SR
550#define TRACE_LIST_START TRACE_INTERNAL_BIT
551#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
552
553#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
554
555static __always_inline int trace_get_context_bit(void)
556{
557 int bit;
558
559 if (in_interrupt()) {
560 if (in_nmi())
561 bit = 0;
562
563 else if (in_irq())
564 bit = 1;
565 else
566 bit = 2;
567 } else
568 bit = 3;
569
570 return bit;
571}
572
573static __always_inline int trace_test_and_set_recursion(int start, int max)
574{
575 unsigned int val = current->trace_recursion;
576 int bit;
577
578 /* A previous recursion check was made */
579 if ((val & TRACE_CONTEXT_MASK) > max)
580 return 0;
581
582 bit = trace_get_context_bit() + start;
583 if (unlikely(val & (1 << bit)))
584 return -1;
585
586 val |= 1 << bit;
587 current->trace_recursion = val;
588 barrier();
589
590 return bit;
591}
592
593static __always_inline void trace_clear_recursion(int bit)
594{
595 unsigned int val = current->trace_recursion;
596
597 if (!bit)
598 return;
599
600 bit = 1 << bit;
601 val &= ~bit;
602
603 barrier();
604 current->trace_recursion = val;
605}
606
6d158a81
SR
607static inline struct ring_buffer_iter *
608trace_buffer_iter(struct trace_iterator *iter, int cpu)
609{
610 if (iter->buffer_iter && iter->buffer_iter[cpu])
611 return iter->buffer_iter[cpu];
612 return NULL;
613}
614
b6f11df2 615int tracer_init(struct tracer *t, struct trace_array *tr);
9036990d 616int tracing_is_enabled(void);
12883efb
SRRH
617void tracing_reset(struct trace_buffer *buf, int cpu);
618void tracing_reset_online_cpus(struct trace_buffer *buf);
9456f0fa 619void tracing_reset_current(int cpu);
873c642f 620void tracing_reset_all_online_cpus(void);
bc0c38d1 621int tracing_open_generic(struct inode *inode, struct file *filp);
2e86421d 622bool tracing_is_disabled(void);
e7c15cd8 623int tracer_tracing_is_on(struct trace_array *tr);
2290f2c5
SRV
624void tracer_tracing_on(struct trace_array *tr);
625void tracer_tracing_off(struct trace_array *tr);
5452af66 626struct dentry *trace_create_file(const char *name,
f4ae40a6 627 umode_t mode,
5452af66
FW
628 struct dentry *parent,
629 void *data,
630 const struct file_operations *fops);
631
bc0c38d1 632struct dentry *tracing_init_dentry(void);
d618b3e6 633
51a763dd
ACM
634struct ring_buffer_event;
635
e77405ad
SR
636struct ring_buffer_event *
637trace_buffer_lock_reserve(struct ring_buffer *buffer,
638 int type,
639 unsigned long len,
640 unsigned long flags,
641 int pc);
51a763dd 642
45dcd8b8
PP
643struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
644 struct trace_array_cpu *data);
c4a8e8be
FW
645
646struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
647 int *ent_cpu, u64 *ent_ts);
648
52ffabe3
SRRH
649void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
650 struct ring_buffer_event *event);
7ffbd48d 651
955b61e5
JW
652int trace_empty(struct trace_iterator *iter);
653
654void *trace_find_next_entry_inc(struct trace_iterator *iter);
655
656void trace_init_global_iter(struct trace_iterator *iter);
657
658void tracing_iter_reset(struct trace_iterator *iter, int cpu);
659
6fb44b71 660void trace_function(struct trace_array *tr,
6fb44b71
SR
661 unsigned long ip,
662 unsigned long parent_ip,
38697053 663 unsigned long flags, int pc);
0a772620
JO
664void trace_graph_function(struct trace_array *tr,
665 unsigned long ip,
666 unsigned long parent_ip,
667 unsigned long flags, int pc);
7e9a49ef 668void trace_latency_header(struct seq_file *m);
62b915f1
JO
669void trace_default_header(struct seq_file *m);
670void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
671int trace_empty(struct trace_iterator *iter);
bc0c38d1 672
287b6e68 673void trace_graph_return(struct ftrace_graph_ret *trace);
e49dc19c 674int trace_graph_entry(struct ftrace_graph_ent *trace);
1a0799a8 675void set_graph_array(struct trace_array *tr);
1e9b51c2 676
41bc8144
SR
677void tracing_start_cmdline_record(void);
678void tracing_stop_cmdline_record(void);
d914ba37
JF
679void tracing_start_tgid_record(void);
680void tracing_stop_tgid_record(void);
681
bc0c38d1 682int register_tracer(struct tracer *type);
b5130b1e 683int is_tracing_stopped(void);
955b61e5 684
098c879e
SRRH
685loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
686
955b61e5
JW
687extern cpumask_var_t __read_mostly tracing_buffer_mask;
688
689#define for_each_tracing_cpu(cpu) \
690 for_each_cpu(cpu, tracing_buffer_mask)
bc0c38d1
SR
691
692extern unsigned long nsecs_to_usecs(unsigned long nsecs);
693
0e950173
TB
694extern unsigned long tracing_thresh;
695
4e267db1 696/* PID filtering */
76c813e2
SRRH
697
698extern int pid_max;
699
4e267db1
SR
700bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
701 pid_t search_pid);
702bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
703 struct task_struct *task);
704void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
705 struct task_struct *self,
706 struct task_struct *task);
5cc8976b
SRRH
707void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
708void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
709int trace_pid_show(struct seq_file *m, void *v);
76c813e2
SRRH
710void trace_free_pid_list(struct trace_pid_list *pid_list);
711int trace_pid_write(struct trace_pid_list *filtered_pids,
712 struct trace_pid_list **new_pid_list,
713 const char __user *ubuf, size_t cnt);
4e267db1 714
5d4a9dba 715#ifdef CONFIG_TRACER_MAX_TRACE
bc0c38d1
SR
716void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
717void update_max_tr_single(struct trace_array *tr,
718 struct task_struct *tsk, int cpu);
5d4a9dba 719#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 720
c0a0d0d3 721#ifdef CONFIG_STACKTRACE
e77405ad 722void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
723 int pc);
724
725void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
726 int pc);
727#else
e1f7992e 728static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
c0a0d0d3
FW
729 unsigned long flags, int pc)
730{
731}
732
733static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
734 int skip, int pc)
735{
736}
737#endif /* CONFIG_STACKTRACE */
53614991 738
a5a1d1c2 739extern u64 ftrace_now(int cpu);
bc0c38d1 740
4ca53085 741extern void trace_find_cmdline(int pid, char comm[]);
d914ba37 742extern int trace_find_tgid(int pid);
c37775d5 743extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
f7d48cbd 744
bc0c38d1
SR
745#ifdef CONFIG_DYNAMIC_FTRACE
746extern unsigned long ftrace_update_tot_cnt;
04ec7bb6
SRV
747void ftrace_init_trace_array(struct trace_array *tr);
748#else
749static inline void ftrace_init_trace_array(struct trace_array *tr) { }
ad97772a 750#endif
d05cdb25
SR
751#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
752extern int DYN_FTRACE_TEST_NAME(void);
95950c2e
SR
753#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
754extern int DYN_FTRACE_TEST_NAME2(void);
bc0c38d1 755
55034cd6 756extern bool ring_buffer_expanded;
020e5f85
LZ
757extern bool tracing_selftest_disabled;
758
60a11774 759#ifdef CONFIG_FTRACE_STARTUP_TEST
60a11774
SR
760extern int trace_selftest_startup_function(struct tracer *trace,
761 struct trace_array *tr);
7447dce9
FW
762extern int trace_selftest_startup_function_graph(struct tracer *trace,
763 struct trace_array *tr);
60a11774
SR
764extern int trace_selftest_startup_irqsoff(struct tracer *trace,
765 struct trace_array *tr);
60a11774
SR
766extern int trace_selftest_startup_preemptoff(struct tracer *trace,
767 struct trace_array *tr);
60a11774
SR
768extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
769 struct trace_array *tr);
60a11774
SR
770extern int trace_selftest_startup_wakeup(struct tracer *trace,
771 struct trace_array *tr);
fb1b6d8b
SN
772extern int trace_selftest_startup_nop(struct tracer *trace,
773 struct trace_array *tr);
80e5ea45
SR
774extern int trace_selftest_startup_branch(struct tracer *trace,
775 struct trace_array *tr);
8f768993
SRRH
776/*
777 * Tracer data references selftest functions that only occur
778 * on boot up. These can be __init functions. Thus, when selftests
779 * are enabled, then the tracers need to reference __init functions.
780 */
781#define __tracer_data __refdata
782#else
783/* Tracers are seldom changed. Optimize when selftests are disabled. */
784#define __tracer_data __read_mostly
60a11774
SR
785#endif /* CONFIG_FTRACE_STARTUP_TEST */
786
c7aafc54 787extern void *head_page(struct trace_array_cpu *data);
a5a1d1c2 788extern unsigned long long ns2usecs(u64 nsec);
1fd8f2a3 789extern int
40ce74f1 790trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
48ead020 791extern int
40ce74f1 792trace_vprintk(unsigned long ip, const char *fmt, va_list args);
659372d3
SR
793extern int
794trace_array_vprintk(struct trace_array *tr,
795 unsigned long ip, const char *fmt, va_list args);
796int trace_array_printk(struct trace_array *tr,
797 unsigned long ip, const char *fmt, ...);
12883efb
SRRH
798int trace_array_printk_buf(struct ring_buffer *buffer,
799 unsigned long ip, const char *fmt, ...);
955b61e5
JW
800void trace_printk_seq(struct trace_seq *s);
801enum print_line_t print_trace_line(struct trace_iterator *iter);
c7aafc54 802
8e1e1df2
BP
803extern char trace_find_mark(unsigned long long duration);
804
673feb9d
SRV
805struct ftrace_hash;
806
807struct ftrace_mod_load {
808 struct list_head list;
809 char *func;
810 char *module;
811 int enable;
812};
813
8c08f0d5
SRV
814enum {
815 FTRACE_HASH_FL_MOD = (1 << 0),
816};
817
4046bf02
NK
818struct ftrace_hash {
819 unsigned long size_bits;
820 struct hlist_head *buckets;
821 unsigned long count;
8c08f0d5 822 unsigned long flags;
4046bf02
NK
823 struct rcu_head rcu;
824};
825
826struct ftrace_func_entry *
827ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
828
eb583cd4 829static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
4046bf02 830{
8c08f0d5 831 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
4046bf02
NK
832}
833
15e6cb36 834/* Standard output formatting function used for function return traces */
fb52607a 835#ifdef CONFIG_FUNCTION_GRAPH_TRACER
62b915f1
JO
836
837/* Flag options */
838#define TRACE_GRAPH_PRINT_OVERRUN 0x1
839#define TRACE_GRAPH_PRINT_CPU 0x2
840#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
841#define TRACE_GRAPH_PRINT_PROC 0x8
842#define TRACE_GRAPH_PRINT_DURATION 0x10
843#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
ccdb5946 844#define TRACE_GRAPH_PRINT_IRQS 0x40
607e3a29 845#define TRACE_GRAPH_PRINT_TAIL 0x80
55577204
SRRH
846#define TRACE_GRAPH_SLEEP_TIME 0x100
847#define TRACE_GRAPH_GRAPH_TIME 0x200
6fc84ea7
SRRH
848#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
849#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
62b915f1 850
55577204
SRRH
851extern void ftrace_graph_sleep_time_control(bool enable);
852extern void ftrace_graph_graph_time_control(bool enable);
853
d7a8d9e9
JO
854extern enum print_line_t
855print_graph_function_flags(struct trace_iterator *iter, u32 flags);
856extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
9d9add34 857extern void
0706f1c4 858trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
62b915f1
JO
859extern void graph_trace_open(struct trace_iterator *iter);
860extern void graph_trace_close(struct trace_iterator *iter);
861extern int __trace_graph_entry(struct trace_array *tr,
862 struct ftrace_graph_ent *trace,
863 unsigned long flags, int pc);
864extern void __trace_graph_return(struct trace_array *tr,
865 struct ftrace_graph_ret *trace,
866 unsigned long flags, int pc);
867
ea4e2bc4 868#ifdef CONFIG_DYNAMIC_FTRACE
15d5bb7f 869extern struct ftrace_hash __rcu *ftrace_graph_hash;
b8202bd6 870extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
ea4e2bc4 871
d0d7e894 872static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
ea4e2bc4 873{
d0d7e894 874 unsigned long addr = trace->func;
b9b0c831 875 int ret = 0;
15d5bb7f 876 struct ftrace_hash *hash;
b9b0c831
NK
877
878 preempt_disable_notrace();
879
dc8018c7
SRV
880 /*
881 * Have to open code "rcu_dereference_sched()" because the
882 * function graph tracer can be called when RCU is not
883 * "watching".
108fdfb3 884 * Protected with schedule_on_each_cpu(ftrace_sync)
dc8018c7 885 */
15d5bb7f
AG
886 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
887
888 if (ftrace_hash_empty(hash)) {
b9b0c831
NK
889 ret = 1;
890 goto out;
ea4e2bc4
SR
891 }
892
15d5bb7f 893 if (ftrace_lookup_ip(hash, addr)) {
d0d7e894
SRV
894
895 /*
896 * This needs to be cleared on the return functions
897 * when the depth is zero.
898 */
899 trace_recursion_set(TRACE_GRAPH_BIT);
900 trace_recursion_set_depth(trace->depth);
901
b9b0c831
NK
902 /*
903 * If no irqs are to be traced, but a set_graph_function
904 * is set, and called by an interrupt handler, we still
905 * want to trace it.
906 */
907 if (in_irq())
908 trace_recursion_set(TRACE_IRQ_BIT);
909 else
910 trace_recursion_clear(TRACE_IRQ_BIT);
911 ret = 1;
912 }
913
914out:
915 preempt_enable_notrace();
916 return ret;
ea4e2bc4 917}
29ad23b0 918
d0d7e894
SRV
919static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
920{
921 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
922 trace->depth == trace_recursion_depth())
923 trace_recursion_clear(TRACE_GRAPH_BIT);
924}
925
29ad23b0
NK
926static inline int ftrace_graph_notrace_addr(unsigned long addr)
927{
b9b0c831 928 int ret = 0;
b8202bd6 929 struct ftrace_hash *notrace_hash;
29ad23b0 930
b9b0c831 931 preempt_disable_notrace();
29ad23b0 932
dc8018c7
SRV
933 /*
934 * Have to open code "rcu_dereference_sched()" because the
935 * function graph tracer can be called when RCU is not
936 * "watching".
108fdfb3 937 * Protected with schedule_on_each_cpu(ftrace_sync)
dc8018c7 938 */
b8202bd6
AG
939 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
940 !preemptible());
941
942 if (ftrace_lookup_ip(notrace_hash, addr))
b9b0c831 943 ret = 1;
29ad23b0 944
b9b0c831
NK
945 preempt_enable_notrace();
946 return ret;
29ad23b0 947}
15e6cb36 948#else
d0d7e894 949static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
6b253930
IM
950{
951 return 1;
ea4e2bc4 952}
29ad23b0
NK
953
954static inline int ftrace_graph_notrace_addr(unsigned long addr)
955{
956 return 0;
957}
d0d7e894
SRV
958static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
959{ }
ea4e2bc4 960#endif /* CONFIG_DYNAMIC_FTRACE */
1a414428
SRRH
961
962extern unsigned int fgraph_max_depth;
963
964static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
965{
966 /* trace it when it is-nested-in or is a function enabled. */
d0d7e894
SRV
967 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
968 ftrace_graph_addr(trace)) ||
1a414428
SRRH
969 (trace->depth < 0) ||
970 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
971}
972
ea4e2bc4 973#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 974static inline enum print_line_t
d7a8d9e9 975print_graph_function_flags(struct trace_iterator *iter, u32 flags)
15e6cb36
FW
976{
977 return TRACE_TYPE_UNHANDLED;
978}
ea4e2bc4 979#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 980
756d17ee 981extern struct list_head ftrace_pids;
804a6851 982
1155de47 983#ifdef CONFIG_FUNCTION_TRACER
92a68fa0
SRV
984struct ftrace_func_command {
985 struct list_head list;
986 char *name;
04ec7bb6
SRV
987 int (*func)(struct trace_array *tr,
988 struct ftrace_hash *hash,
92a68fa0
SRV
989 char *func, char *cmd,
990 char *params, int enable);
991};
f1ed7c74 992extern bool ftrace_filter_param __initdata;
345ddcc8 993static inline int ftrace_trace_task(struct trace_array *tr)
804a6851 994{
345ddcc8 995 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
804a6851 996}
e0a413f6 997extern int ftrace_is_dead(void);
591dffda
SRRH
998int ftrace_create_function_files(struct trace_array *tr,
999 struct dentry *parent);
1000void ftrace_destroy_function_files(struct trace_array *tr);
4104d326
SRRH
1001void ftrace_init_global_array_ops(struct trace_array *tr);
1002void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1003void ftrace_reset_array_ops(struct trace_array *tr);
7eea4fce 1004int using_ftrace_ops_list_func(void);
345ddcc8 1005void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
501c2375
SRRH
1006void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1007 struct dentry *d_tracer);
d879d0b8 1008void ftrace_clear_pids(struct trace_array *tr);
dbeafd0d 1009int init_function_trace(void);
1e10486f 1010void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1155de47 1011#else
345ddcc8 1012static inline int ftrace_trace_task(struct trace_array *tr)
1155de47
PM
1013{
1014 return 1;
1015}
e0a413f6 1016static inline int ftrace_is_dead(void) { return 0; }
591dffda
SRRH
1017static inline int
1018ftrace_create_function_files(struct trace_array *tr,
1019 struct dentry *parent)
1020{
1021 return 0;
1022}
1023static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
4104d326
SRRH
1024static inline __init void
1025ftrace_init_global_array_ops(struct trace_array *tr) { }
1026static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
345ddcc8 1027static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
501c2375 1028static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
d879d0b8 1029static inline void ftrace_clear_pids(struct trace_array *tr) { }
dbeafd0d 1030static inline int init_function_trace(void) { return 0; }
1e10486f 1031static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
4104d326
SRRH
1032/* ftace_func_t type is not defined, use macro instead of static inline */
1033#define ftrace_init_array_ops(tr, func) do { } while (0)
591dffda
SRRH
1034#endif /* CONFIG_FUNCTION_TRACER */
1035
1036#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
ec19b859
SRV
1037
1038struct ftrace_probe_ops {
1039 void (*func)(unsigned long ip,
1040 unsigned long parent_ip,
b5f081b5 1041 struct trace_array *tr,
bca6c8d0 1042 struct ftrace_probe_ops *ops,
6e444319 1043 void *data);
ec19b859 1044 int (*init)(struct ftrace_probe_ops *ops,
b5f081b5 1045 struct trace_array *tr,
6e444319
SRV
1046 unsigned long ip, void *init_data,
1047 void **data);
ec19b859 1048 void (*free)(struct ftrace_probe_ops *ops,
b5f081b5 1049 struct trace_array *tr,
6e444319 1050 unsigned long ip, void *data);
ec19b859
SRV
1051 int (*print)(struct seq_file *m,
1052 unsigned long ip,
1053 struct ftrace_probe_ops *ops,
1054 void *data);
1055};
1056
41794f19
SRV
1057struct ftrace_func_mapper;
1058typedef int (*ftrace_mapper_func)(void *data);
1059
1060struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1061void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1062 unsigned long ip);
1063int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1064 unsigned long ip, void *data);
1065void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1066 unsigned long ip);
1067void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1068 ftrace_mapper_func free_func);
1069
ec19b859 1070extern int
04ec7bb6
SRV
1071register_ftrace_function_probe(char *glob, struct trace_array *tr,
1072 struct ftrace_probe_ops *ops, void *data);
d3d532d7 1073extern int
7b60f3d8
SRV
1074unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1075 struct ftrace_probe_ops *ops);
a0e6369e 1076extern void clear_ftrace_function_probes(struct trace_array *tr);
ec19b859 1077
92a68fa0
SRV
1078int register_ftrace_command(struct ftrace_func_command *cmd);
1079int unregister_ftrace_command(struct ftrace_func_command *cmd);
1080
591dffda
SRRH
1081void ftrace_create_filter_files(struct ftrace_ops *ops,
1082 struct dentry *parent);
1083void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1084#else
92a68fa0
SRV
1085struct ftrace_func_command;
1086
1087static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1088{
1089 return -EINVAL;
1090}
1091static inline __init int unregister_ftrace_command(char *cmd_name)
1092{
1093 return -EINVAL;
1094}
8a49f3e0
SRV
1095static inline void clear_ftrace_function_probes(struct trace_array *tr)
1096{
1097}
1098
591dffda
SRRH
1099/*
1100 * The ops parameter passed in is usually undefined.
1101 * This must be a macro.
1102 */
1103#define ftrace_create_filter_files(ops, parent) do { } while (0)
1104#define ftrace_destroy_filter_files(ops) do { } while (0)
1105#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
804a6851 1106
c6650b2e 1107bool ftrace_event_is_function(struct trace_event_call *call);
ced39002 1108
b63f39ea 1109/*
1110 * struct trace_parser - servers for reading the user input separated by spaces
1111 * @cont: set if the input is not complete - no final space char was found
1112 * @buffer: holds the parsed user input
1537a363 1113 * @idx: user input length
b63f39ea 1114 * @size: buffer size
1115 */
1116struct trace_parser {
1117 bool cont;
1118 char *buffer;
1119 unsigned idx;
1120 unsigned size;
1121};
1122
1123static inline bool trace_parser_loaded(struct trace_parser *parser)
1124{
1125 return (parser->idx != 0);
1126}
1127
1128static inline bool trace_parser_cont(struct trace_parser *parser)
1129{
1130 return parser->cont;
1131}
1132
1133static inline void trace_parser_clear(struct trace_parser *parser)
1134{
1135 parser->cont = false;
1136 parser->idx = 0;
1137}
1138
1139extern int trace_parser_get_init(struct trace_parser *parser, int size);
1140extern void trace_parser_put(struct trace_parser *parser);
1141extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1142 size_t cnt, loff_t *ppos);
1143
729358da
SRRH
1144/*
1145 * Only create function graph options if function graph is configured.
1146 */
1147#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1148# define FGRAPH_FLAGS \
729358da 1149 C(DISPLAY_GRAPH, "display-graph"),
729358da
SRRH
1150#else
1151# define FGRAPH_FLAGS
729358da
SRRH
1152#endif
1153
4ee4301c
SRRH
1154#ifdef CONFIG_BRANCH_TRACER
1155# define BRANCH_FLAGS \
1156 C(BRANCH, "branch"),
1157#else
1158# define BRANCH_FLAGS
1159#endif
1160
8179e8a1
SRRH
1161#ifdef CONFIG_FUNCTION_TRACER
1162# define FUNCTION_FLAGS \
1e10486f
NK
1163 C(FUNCTION, "function-trace"), \
1164 C(FUNC_FORK, "function-fork"),
8179e8a1
SRRH
1165# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1166#else
1167# define FUNCTION_FLAGS
1168# define FUNCTION_DEFAULT_FLAGS 0UL
1e10486f 1169# define TRACE_ITER_FUNC_FORK 0UL
8179e8a1
SRRH
1170#endif
1171
73dddbb5
SRRH
1172#ifdef CONFIG_STACKTRACE
1173# define STACK_FLAGS \
1174 C(STACKTRACE, "stacktrace"),
1175#else
1176# define STACK_FLAGS
1177#endif
1178
4fcdae83
SR
1179/*
1180 * trace_iterator_flags is an enumeration that defines bit
1181 * positions into trace_flags that controls the output.
1182 *
1183 * NOTE: These bits must match the trace_options array in
a3418a36 1184 * trace.c (this macro guarantees it).
4fcdae83 1185 */
a3418a36
SRRH
1186#define TRACE_FLAGS \
1187 C(PRINT_PARENT, "print-parent"), \
1188 C(SYM_OFFSET, "sym-offset"), \
1189 C(SYM_ADDR, "sym-addr"), \
1190 C(VERBOSE, "verbose"), \
1191 C(RAW, "raw"), \
1192 C(HEX, "hex"), \
1193 C(BIN, "bin"), \
1194 C(BLOCK, "block"), \
a3418a36 1195 C(PRINTK, "trace_printk"), \
a3418a36
SRRH
1196 C(ANNOTATE, "annotate"), \
1197 C(USERSTACKTRACE, "userstacktrace"), \
1198 C(SYM_USEROBJ, "sym-userobj"), \
1199 C(PRINTK_MSGONLY, "printk-msg-only"), \
1200 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1201 C(LATENCY_FMT, "latency-format"), \
a3418a36 1202 C(RECORD_CMD, "record-cmd"), \
d914ba37 1203 C(RECORD_TGID, "record-tgid"), \
a3418a36
SRRH
1204 C(OVERWRITE, "overwrite"), \
1205 C(STOP_ON_FREE, "disable_on_free"), \
1206 C(IRQ_INFO, "irq-info"), \
1207 C(MARKERS, "markers"), \
c37775d5 1208 C(EVENT_FORK, "event-fork"), \
8179e8a1 1209 FUNCTION_FLAGS \
4ee4301c 1210 FGRAPH_FLAGS \
73dddbb5 1211 STACK_FLAGS \
4ee4301c 1212 BRANCH_FLAGS
ce3fed62 1213
a3418a36
SRRH
1214/*
1215 * By defining C, we can make TRACE_FLAGS a list of bit names
1216 * that will define the bits for the flag masks.
1217 */
1218#undef C
1219#define C(a, b) TRACE_ITER_##a##_BIT
1220
b5e87c05
SRRH
1221enum trace_iterator_bits {
1222 TRACE_FLAGS
1223 /* Make sure we don't go more than we have bits for */
1224 TRACE_ITER_LAST_BIT
1225};
a3418a36
SRRH
1226
1227/*
1228 * By redefining C, we can make TRACE_FLAGS a list of masks that
1229 * use the bits as defined above.
1230 */
1231#undef C
1232#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1233
1234enum trace_iterator_flags { TRACE_FLAGS };
4e655519 1235
15e6cb36
FW
1236/*
1237 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1238 * control the output of kernel symbols.
1239 */
1240#define TRACE_ITER_SYM_MASK \
1241 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1242
43a15386
FW
1243extern struct tracer nop_trace;
1244
2ed84eeb 1245#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
1246extern int enable_branch_tracing(struct trace_array *tr);
1247extern void disable_branch_tracing(void);
1248static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 1249{
983f938a 1250 if (tr->trace_flags & TRACE_ITER_BRANCH)
9f029e83 1251 return enable_branch_tracing(tr);
52f232cb
SR
1252 return 0;
1253}
9f029e83 1254static inline void trace_branch_disable(void)
52f232cb
SR
1255{
1256 /* due to races, always disable */
9f029e83 1257 disable_branch_tracing();
52f232cb
SR
1258}
1259#else
9f029e83 1260static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
1261{
1262 return 0;
1263}
9f029e83 1264static inline void trace_branch_disable(void)
52f232cb
SR
1265{
1266}
2ed84eeb 1267#endif /* CONFIG_BRANCH_TRACER */
52f232cb 1268
1852fcce
SR
1269/* set ring buffers to default size if not already done so */
1270int tracing_update_buffers(void);
1271
cf027f64
TZ
1272struct ftrace_event_field {
1273 struct list_head link;
92edca07
SR
1274 const char *name;
1275 const char *type;
aa38e9fc 1276 int filter_type;
cf027f64
TZ
1277 int offset;
1278 int size;
a118e4d1 1279 int is_signed;
cf027f64
TZ
1280};
1281
30e673b2 1282struct event_filter {
c9c53ca0
SR
1283 int n_preds; /* Number assigned */
1284 int a_preds; /* allocated */
f86f4180
CZ
1285 struct filter_pred __rcu *preds;
1286 struct filter_pred __rcu *root;
1287 char *filter_string;
30e673b2
TZ
1288};
1289
cfb180f3
TZ
1290struct event_subsystem {
1291 struct list_head list;
1292 const char *name;
1f9963cb 1293 struct event_filter *filter;
e9dbfae5 1294 int ref_count;
cfb180f3
TZ
1295};
1296
7967b3e0 1297struct trace_subsystem_dir {
ae63b31e
SR
1298 struct list_head list;
1299 struct event_subsystem *subsystem;
1300 struct trace_array *tr;
1301 struct dentry *entry;
1302 int ref_count;
1303 int nr_events;
1304};
1305
65da9a0a
SRRH
1306extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1307 struct ring_buffer *buffer,
1308 struct ring_buffer_event *event);
fa66ddb8 1309
fa66ddb8
SRRH
1310void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1311 struct ring_buffer *buffer,
1312 struct ring_buffer_event *event,
1313 unsigned long flags, int pc,
1314 struct pt_regs *regs);
33fddff2
SRRH
1315
1316static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1317 struct ring_buffer *buffer,
1318 struct ring_buffer_event *event,
1319 unsigned long flags, int pc)
1320{
1321 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1322}
1323
0fc1b09f
SRRH
1324DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1325DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1326void trace_buffered_event_disable(void);
1327void trace_buffered_event_enable(void);
1328
1329static inline void
1330__trace_event_discard_commit(struct ring_buffer *buffer,
1331 struct ring_buffer_event *event)
1332{
1333 if (this_cpu_read(trace_buffered_event) == event) {
1334 /* Simply release the temp buffer */
1335 this_cpu_dec(trace_buffered_event_cnt);
1336 return;
1337 }
1338 ring_buffer_discard_commit(buffer, event);
1339}
1340
dad56ee7
SRRH
1341/*
1342 * Helper function for event_trigger_unlock_commit{_regs}().
1343 * If there are event triggers attached to this event that requires
1344 * filtering against its fields, then they wil be called as the
1345 * entry already holds the field information of the current event.
1346 *
1347 * It also checks if the event should be discarded or not.
1348 * It is to be discarded if the event is soft disabled and the
1349 * event was only recorded to process triggers, or if the event
1350 * filter is active and this event did not match the filters.
1351 *
1352 * Returns true if the event is discarded, false otherwise.
1353 */
1354static inline bool
1355__event_trigger_test_discard(struct trace_event_file *file,
1356 struct ring_buffer *buffer,
1357 struct ring_buffer_event *event,
1358 void *entry,
1359 enum event_trigger_type *tt)
1360{
1361 unsigned long eflags = file->flags;
1362
1363 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1364 *tt = event_triggers_call(file, entry);
1365
9cbb1506
SRRH
1366 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1367 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1368 !filter_match_preds(file->filter, entry))) {
0fc1b09f 1369 __trace_event_discard_commit(buffer, event);
9cbb1506
SRRH
1370 return true;
1371 }
dad56ee7 1372
9cbb1506 1373 return false;
dad56ee7
SRRH
1374}
1375
1376/**
1377 * event_trigger_unlock_commit - handle triggers and finish event commit
1378 * @file: The file pointer assoctiated to the event
1379 * @buffer: The ring buffer that the event is being written to
1380 * @event: The event meta data in the ring buffer
1381 * @entry: The event itself
1382 * @irq_flags: The state of the interrupts at the start of the event
1383 * @pc: The state of the preempt count at the start of the event.
1384 *
1385 * This is a helper function to handle triggers that require data
1386 * from the event itself. It also tests the event against filters and
1387 * if the event is soft disabled and should be discarded.
1388 */
1389static inline void
1390event_trigger_unlock_commit(struct trace_event_file *file,
1391 struct ring_buffer *buffer,
1392 struct ring_buffer_event *event,
1393 void *entry, unsigned long irq_flags, int pc)
1394{
1395 enum event_trigger_type tt = ETT_NONE;
1396
1397 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1398 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1399
1400 if (tt)
1401 event_triggers_post_call(file, tt, entry);
1402}
1403
1404/**
1405 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1406 * @file: The file pointer assoctiated to the event
1407 * @buffer: The ring buffer that the event is being written to
1408 * @event: The event meta data in the ring buffer
1409 * @entry: The event itself
1410 * @irq_flags: The state of the interrupts at the start of the event
1411 * @pc: The state of the preempt count at the start of the event.
1412 *
1413 * This is a helper function to handle triggers that require data
1414 * from the event itself. It also tests the event against filters and
1415 * if the event is soft disabled and should be discarded.
1416 *
1417 * Same as event_trigger_unlock_commit() but calls
1418 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1419 */
1420static inline void
1421event_trigger_unlock_commit_regs(struct trace_event_file *file,
1422 struct ring_buffer *buffer,
1423 struct ring_buffer_event *event,
1424 void *entry, unsigned long irq_flags, int pc,
1425 struct pt_regs *regs)
1426{
1427 enum event_trigger_type tt = ETT_NONE;
1428
1429 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1430 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1431 irq_flags, pc, regs);
1432
1433 if (tt)
1434 event_triggers_post_call(file, tt, entry);
1435}
1436
61e9dea2
SR
1437#define FILTER_PRED_INVALID ((unsigned short)-1)
1438#define FILTER_PRED_IS_RIGHT (1 << 15)
43cd4145 1439#define FILTER_PRED_FOLD (1 << 15)
61e9dea2 1440
bf93f9ed
SR
1441/*
1442 * The max preds is the size of unsigned short with
1443 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1444 * and FOLD flags. The other is reserved.
1445 *
1446 * 2^14 preds is way more than enough.
1447 */
1448#define MAX_FILTER_PRED 16384
4a3d27e9 1449
7ce7e424 1450struct filter_pred;
1889d209 1451struct regex;
7ce7e424 1452
58d9a597 1453typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
7ce7e424 1454
1889d209
FW
1455typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1456
3f6fe06d 1457enum regex_type {
b0f1a59a 1458 MATCH_FULL = 0,
3f6fe06d
FW
1459 MATCH_FRONT_ONLY,
1460 MATCH_MIDDLE_ONLY,
1461 MATCH_END_ONLY,
60f1d5e3 1462 MATCH_GLOB,
3f6fe06d
FW
1463};
1464
1889d209
FW
1465struct regex {
1466 char pattern[MAX_FILTER_STR_VAL];
1467 int len;
1468 int field_len;
1469 regex_match_func match;
1470};
1471
7ce7e424 1472struct filter_pred {
1889d209
FW
1473 filter_pred_fn_t fn;
1474 u64 val;
1475 struct regex regex;
61aaef55 1476 unsigned short *ops;
1d0e78e3 1477 struct ftrace_event_field *field;
1889d209
FW
1478 int offset;
1479 int not;
1480 int op;
61e9dea2
SR
1481 unsigned short index;
1482 unsigned short parent;
1483 unsigned short left;
1484 unsigned short right;
7ce7e424
TZ
1485};
1486
4ef56902
TZ
1487static inline bool is_string_field(struct ftrace_event_field *field)
1488{
1489 return field->filter_type == FILTER_DYN_STRING ||
1490 field->filter_type == FILTER_STATIC_STRING ||
4c738413
SRV
1491 field->filter_type == FILTER_PTR_STRING ||
1492 field->filter_type == FILTER_COMM;
4ef56902
TZ
1493}
1494
1495static inline bool is_function_field(struct ftrace_event_field *field)
1496{
1497 return field->filter_type == FILTER_TRACE_FN;
1498}
1499
3f6fe06d
FW
1500extern enum regex_type
1501filter_parse_regex(char *buff, int len, char **search, int *not);
7f1d2f82 1502extern void print_event_filter(struct trace_event_file *file,
4bda2d51 1503 struct trace_seq *s);
7f1d2f82 1504extern int apply_event_filter(struct trace_event_file *file,
8b372562 1505 char *filter_string);
7967b3e0 1506extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
8b372562
TZ
1507 char *filter_string);
1508extern void print_subsystem_event_filter(struct event_subsystem *system,
ac1adc55 1509 struct trace_seq *s);
aa38e9fc 1510extern int filter_assign_type(const char *type);
2425bcb9 1511extern int create_event_filter(struct trace_event_call *call,
bac5fb97
TZ
1512 char *filter_str, bool set_str,
1513 struct event_filter **filterp);
1514extern void free_event_filter(struct event_filter *filter);
7ce7e424 1515
b3a8c6fd 1516struct ftrace_event_field *
2425bcb9 1517trace_find_event_field(struct trace_event_call *call, char *name);
2e33af02 1518
e870e9a1 1519extern void trace_event_enable_cmd_record(bool enable);
d914ba37
JF
1520extern void trace_event_enable_tgid_record(bool enable);
1521
277ba044 1522extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
0c8916c3 1523extern int event_trace_del_tracer(struct trace_array *tr);
e870e9a1 1524
7f1d2f82
SRRH
1525extern struct trace_event_file *find_event_file(struct trace_array *tr,
1526 const char *system,
1527 const char *event);
7862ad18 1528
85f2b082
TZ
1529static inline void *event_file_data(struct file *filp)
1530{
6aa7de05 1531 return READ_ONCE(file_inode(filp)->i_private);
85f2b082
TZ
1532}
1533
20c8928a 1534extern struct mutex event_mutex;
a59fd602 1535extern struct list_head ftrace_events;
ac199db0 1536
85f2b082 1537extern const struct file_operations event_trigger_fops;
7ef224d1
TZ
1538extern const struct file_operations event_hist_fops;
1539
1540#ifdef CONFIG_HIST_TRIGGERS
1541extern int register_trigger_hist_cmd(void);
d0bad49b 1542extern int register_trigger_hist_enable_disable_cmds(void);
7ef224d1
TZ
1543#else
1544static inline int register_trigger_hist_cmd(void) { return 0; }
d0bad49b 1545static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
7ef224d1 1546#endif
85f2b082
TZ
1547
1548extern int register_trigger_cmds(void);
1549extern void clear_event_triggers(struct trace_array *tr);
1550
1551struct event_trigger_data {
1552 unsigned long count;
1553 int ref;
1554 struct event_trigger_ops *ops;
1555 struct event_command *cmd_ops;
d8a30f20 1556 struct event_filter __rcu *filter;
85f2b082
TZ
1557 char *filter_str;
1558 void *private_data;
104f2810 1559 bool paused;
db1388b4 1560 bool paused_tmp;
85f2b082 1561 struct list_head list;
db1388b4
TZ
1562 char *name;
1563 struct list_head named_list;
1564 struct event_trigger_data *named_data;
85f2b082
TZ
1565};
1566
d0bad49b
TZ
1567/* Avoid typos */
1568#define ENABLE_EVENT_STR "enable_event"
1569#define DISABLE_EVENT_STR "disable_event"
1570#define ENABLE_HIST_STR "enable_hist"
1571#define DISABLE_HIST_STR "disable_hist"
1572
1573struct enable_trigger_data {
1574 struct trace_event_file *file;
1575 bool enable;
1576 bool hist;
1577};
1578
1579extern int event_enable_trigger_print(struct seq_file *m,
1580 struct event_trigger_ops *ops,
1581 struct event_trigger_data *data);
1582extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1583 struct event_trigger_data *data);
1584extern int event_enable_trigger_func(struct event_command *cmd_ops,
1585 struct trace_event_file *file,
1586 char *glob, char *cmd, char *param);
1587extern int event_enable_register_trigger(char *glob,
1588 struct event_trigger_ops *ops,
1589 struct event_trigger_data *data,
1590 struct trace_event_file *file);
1591extern void event_enable_unregister_trigger(char *glob,
1592 struct event_trigger_ops *ops,
1593 struct event_trigger_data *test,
1594 struct trace_event_file *file);
ab4bf008
TZ
1595extern void trigger_data_free(struct event_trigger_data *data);
1596extern int event_trigger_init(struct event_trigger_ops *ops,
1597 struct event_trigger_data *data);
1598extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1599 int trigger_enable);
1600extern void update_cond_flag(struct trace_event_file *file);
1601extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
1602 struct event_trigger_data *test,
1603 struct trace_event_file *file);
1604extern int set_trigger_filter(char *filter_str,
1605 struct event_trigger_data *trigger_data,
1606 struct trace_event_file *file);
db1388b4
TZ
1607extern struct event_trigger_data *find_named_trigger(const char *name);
1608extern bool is_named_trigger(struct event_trigger_data *test);
1609extern int save_named_trigger(const char *name,
1610 struct event_trigger_data *data);
1611extern void del_named_trigger(struct event_trigger_data *data);
1612extern void pause_named_trigger(struct event_trigger_data *data);
1613extern void unpause_named_trigger(struct event_trigger_data *data);
1614extern void set_named_trigger_data(struct event_trigger_data *data,
1615 struct event_trigger_data *named_data);
ab4bf008 1616extern int register_event_command(struct event_command *cmd);
d0bad49b
TZ
1617extern int unregister_event_command(struct event_command *cmd);
1618extern int register_trigger_hist_enable_disable_cmds(void);
ab4bf008 1619
85f2b082
TZ
1620/**
1621 * struct event_trigger_ops - callbacks for trace event triggers
1622 *
1623 * The methods in this structure provide per-event trigger hooks for
1624 * various trigger operations.
1625 *
1626 * All the methods below, except for @init() and @free(), must be
1627 * implemented.
1628 *
1629 * @func: The trigger 'probe' function called when the triggering
1630 * event occurs. The data passed into this callback is the data
1631 * that was supplied to the event_command @reg() function that
c4a59230
TZ
1632 * registered the trigger (see struct event_command) along with
1633 * the trace record, rec.
85f2b082
TZ
1634 *
1635 * @init: An optional initialization function called for the trigger
1636 * when the trigger is registered (via the event_command reg()
1637 * function). This can be used to perform per-trigger
1638 * initialization such as incrementing a per-trigger reference
1639 * count, for instance. This is usually implemented by the
1640 * generic utility function @event_trigger_init() (see
1641 * trace_event_triggers.c).
1642 *
1643 * @free: An optional de-initialization function called for the
1644 * trigger when the trigger is unregistered (via the
1645 * event_command @reg() function). This can be used to perform
1646 * per-trigger de-initialization such as decrementing a
1647 * per-trigger reference count and freeing corresponding trigger
1648 * data, for instance. This is usually implemented by the
1649 * generic utility function @event_trigger_free() (see
1650 * trace_event_triggers.c).
1651 *
1652 * @print: The callback function invoked to have the trigger print
1653 * itself. This is usually implemented by a wrapper function
1654 * that calls the generic utility function @event_trigger_print()
1655 * (see trace_event_triggers.c).
1656 */
1657struct event_trigger_ops {
c4a59230
TZ
1658 void (*func)(struct event_trigger_data *data,
1659 void *rec);
85f2b082
TZ
1660 int (*init)(struct event_trigger_ops *ops,
1661 struct event_trigger_data *data);
1662 void (*free)(struct event_trigger_ops *ops,
1663 struct event_trigger_data *data);
1664 int (*print)(struct seq_file *m,
1665 struct event_trigger_ops *ops,
1666 struct event_trigger_data *data);
1667};
1668
1669/**
1670 * struct event_command - callbacks and data members for event commands
1671 *
1672 * Event commands are invoked by users by writing the command name
1673 * into the 'trigger' file associated with a trace event. The
1674 * parameters associated with a specific invocation of an event
1675 * command are used to create an event trigger instance, which is
1676 * added to the list of trigger instances associated with that trace
1677 * event. When the event is hit, the set of triggers associated with
1678 * that event is invoked.
1679 *
1680 * The data members in this structure provide per-event command data
1681 * for various event commands.
1682 *
1683 * All the data members below, except for @post_trigger, must be set
1684 * for each event command.
1685 *
1686 * @name: The unique name that identifies the event command. This is
1687 * the name used when setting triggers via trigger files.
1688 *
1689 * @trigger_type: A unique id that identifies the event command
1690 * 'type'. This value has two purposes, the first to ensure that
1691 * only one trigger of the same type can be set at a given time
1692 * for a particular event e.g. it doesn't make sense to have both
1693 * a traceon and traceoff trigger attached to a single event at
1694 * the same time, so traceon and traceoff have the same type
1695 * though they have different names. The @trigger_type value is
1696 * also used as a bit value for deferring the actual trigger
1697 * action until after the current event is finished. Some
1698 * commands need to do this if they themselves log to the trace
1699 * buffer (see the @post_trigger() member below). @trigger_type
1700 * values are defined by adding new values to the trigger_type
af658dca 1701 * enum in include/linux/trace_events.h.
85f2b082 1702 *
353206f5 1703 * @flags: See the enum event_command_flags below.
a5863dae 1704 *
a88e1cfb
TZ
1705 * All the methods below, except for @set_filter() and @unreg_all(),
1706 * must be implemented.
85f2b082
TZ
1707 *
1708 * @func: The callback function responsible for parsing and
1709 * registering the trigger written to the 'trigger' file by the
1710 * user. It allocates the trigger instance and registers it with
1711 * the appropriate trace event. It makes use of the other
1712 * event_command callback functions to orchestrate this, and is
1713 * usually implemented by the generic utility function
1714 * @event_trigger_callback() (see trace_event_triggers.c).
1715 *
1716 * @reg: Adds the trigger to the list of triggers associated with the
1717 * event, and enables the event trigger itself, after
1718 * initializing it (via the event_trigger_ops @init() function).
1719 * This is also where commands can use the @trigger_type value to
1720 * make the decision as to whether or not multiple instances of
1721 * the trigger should be allowed. This is usually implemented by
1722 * the generic utility function @register_trigger() (see
1723 * trace_event_triggers.c).
1724 *
1725 * @unreg: Removes the trigger from the list of triggers associated
1726 * with the event, and disables the event trigger itself, after
1727 * initializing it (via the event_trigger_ops @free() function).
1728 * This is usually implemented by the generic utility function
1729 * @unregister_trigger() (see trace_event_triggers.c).
1730 *
a88e1cfb
TZ
1731 * @unreg_all: An optional function called to remove all the triggers
1732 * from the list of triggers associated with the event. Called
1733 * when a trigger file is opened in truncate mode.
1734 *
85f2b082
TZ
1735 * @set_filter: An optional function called to parse and set a filter
1736 * for the trigger. If no @set_filter() method is set for the
1737 * event command, filters set by the user for the command will be
1738 * ignored. This is usually implemented by the generic utility
1739 * function @set_trigger_filter() (see trace_event_triggers.c).
1740 *
1741 * @get_trigger_ops: The callback function invoked to retrieve the
1742 * event_trigger_ops implementation associated with the command.
1743 */
1744struct event_command {
1745 struct list_head list;
1746 char *name;
1747 enum event_trigger_type trigger_type;
353206f5 1748 int flags;
85f2b082 1749 int (*func)(struct event_command *cmd_ops,
7f1d2f82 1750 struct trace_event_file *file,
85f2b082
TZ
1751 char *glob, char *cmd, char *params);
1752 int (*reg)(char *glob,
1753 struct event_trigger_ops *ops,
1754 struct event_trigger_data *data,
7f1d2f82 1755 struct trace_event_file *file);
85f2b082
TZ
1756 void (*unreg)(char *glob,
1757 struct event_trigger_ops *ops,
1758 struct event_trigger_data *data,
7f1d2f82 1759 struct trace_event_file *file);
a88e1cfb 1760 void (*unreg_all)(struct trace_event_file *file);
85f2b082
TZ
1761 int (*set_filter)(char *filter_str,
1762 struct event_trigger_data *data,
7f1d2f82 1763 struct trace_event_file *file);
85f2b082
TZ
1764 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1765};
1766
353206f5
SRRH
1767/**
1768 * enum event_command_flags - flags for struct event_command
1769 *
1770 * @POST_TRIGGER: A flag that says whether or not this command needs
1771 * to have its action delayed until after the current event has
1772 * been closed. Some triggers need to avoid being invoked while
1773 * an event is currently in the process of being logged, since
1774 * the trigger may itself log data into the trace buffer. Thus
1775 * we make sure the current event is committed before invoking
1776 * those triggers. To do that, the trigger invocation is split
1777 * in two - the first part checks the filter using the current
1778 * trace record; if a command has the @post_trigger flag set, it
1779 * sets a bit for itself in the return value, otherwise it
1780 * directly invokes the trigger. Once all commands have been
1781 * either invoked or set their return flag, the current record is
1782 * either committed or discarded. At that point, if any commands
1783 * have deferred their triggers, those commands are finally
1784 * invoked following the close of the current event. In other
1785 * words, if the event_trigger_ops @func() probe implementation
1786 * itself logs to the trace buffer, this flag should be set,
1787 * otherwise it can be left unspecified.
1788 *
1789 * @NEEDS_REC: A flag that says whether or not this command needs
1790 * access to the trace record in order to perform its function,
1791 * regardless of whether or not it has a filter associated with
1792 * it (filters make a trigger require access to the trace record
1793 * but are not always present).
1794 */
1795enum event_command_flags {
1796 EVENT_CMD_FL_POST_TRIGGER = 1,
1797 EVENT_CMD_FL_NEEDS_REC = 2,
1798};
1799
1800static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1801{
1802 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1803}
1804
1805static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1806{
1807 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1808}
1809
7f1d2f82 1810extern int trace_event_enable_disable(struct trace_event_file *file,
85f2b082 1811 int enable, int soft_disable);
93e31ffb 1812extern int tracing_alloc_snapshot(void);
85f2b082 1813
e9fb2b6d
SR
1814extern const char *__start___trace_bprintk_fmt[];
1815extern const char *__stop___trace_bprintk_fmt[];
1816
102c9323
SRRH
1817extern const char *__start___tracepoint_str[];
1818extern const char *__stop___tracepoint_str[];
1819
b9f9108c 1820void trace_printk_control(bool enabled);
07d777fe 1821void trace_printk_init_buffers(void);
81698831 1822void trace_printk_start_comm(void);
613f04a0 1823int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
2b6080f2 1824int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
07d777fe 1825
7e465baa
TZ
1826#define MAX_EVENT_NAME_LEN 64
1827
1828extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1829extern ssize_t trace_parse_run_command(struct file *file,
1830 const char __user *buffer, size_t count, loff_t *ppos,
1831 int (*createfn)(int, char**));
1832
ca268da6
SRRH
1833/*
1834 * Normal trace_printk() and friends allocates special buffers
1835 * to do the manipulation, as well as saves the print formats
1836 * into sections to display. But the trace infrastructure wants
1837 * to use these without the added overhead at the price of being
1838 * a bit slower (used mainly for warnings, where we don't care
1839 * about performance). The internal_trace_puts() is for such
1840 * a purpose.
1841 */
1842#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1843
4e5292ea 1844#undef FTRACE_ENTRY
02aa3162 1845#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
2425bcb9 1846 extern struct trace_event_call \
52f5684c 1847 __aligned(4) event_##call;
4e5292ea 1848#undef FTRACE_ENTRY_DUP
02aa3162
JO
1849#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1850 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1851 filter)
a4a551b8
NK
1852#undef FTRACE_ENTRY_PACKED
1853#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1854 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1855 filter)
1856
4e5292ea 1857#include "trace_entries.h"
e1112b4d 1858
6e48b550 1859#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
2425bcb9 1860int perf_ftrace_event_register(struct trace_event_call *call,
ced39002
JO
1861 enum trace_reg type, void *data);
1862#else
1863#define perf_ftrace_event_register NULL
6e48b550 1864#endif
ced39002 1865
5f893b26
SRRH
1866#ifdef CONFIG_FTRACE_SYSCALLS
1867void init_ftrace_syscalls(void);
dbfeaa7a 1868const char *get_syscall_name(int syscall);
5f893b26
SRRH
1869#else
1870static inline void init_ftrace_syscalls(void) { }
dbfeaa7a
TZ
1871static inline const char *get_syscall_name(int syscall)
1872{
1873 return NULL;
1874}
5f893b26
SRRH
1875#endif
1876
1877#ifdef CONFIG_EVENT_TRACING
1878void trace_event_init(void);
f57a4143 1879void trace_event_eval_update(struct trace_eval_map **map, int len);
5f893b26
SRRH
1880#else
1881static inline void __init trace_event_init(void) { }
f57a4143 1882static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
5f893b26
SRRH
1883#endif
1884
9ccd9a81
SRV
1885#ifdef CONFIG_TRACER_SNAPSHOT
1886void tracing_snapshot_instance(struct trace_array *tr);
1887int tracing_alloc_snapshot_instance(struct trace_array *tr);
1888#else
1889static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1890static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1891{
1892 return 0;
1893}
1894#endif
1895
0daa2302 1896extern struct trace_iterator *tracepoint_print_iter;
5f893b26 1897
4d73c7fb
MO
1898/*
1899 * Reset the state of the trace_iterator so that it can read consumed data.
1900 * Normally, the trace_iterator is used for reading the data when it is not
1901 * consumed, and must retain state.
1902 */
1903static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1904{
1905 const size_t offset = offsetof(struct trace_iterator, seq);
1906
1907 /*
1908 * Keep gcc from complaining about overwriting more than just one
1909 * member in the structure.
1910 */
1911 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
1912
1913 iter->pos = -1;
1914}
1915
bc0c38d1 1916#endif /* _LINUX_KERNEL_TRACE_H */