]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/trace/trace.h
Merge tag 'arm-soc/for-5.5/maintainers-part2' of https://github.com/Broadcom/stblinux...
[mirror_ubuntu-hirsute-kernel.git] / kernel / trace / trace.h
1 // SPDX-License-Identifier: GPL-2.0
2
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
5
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/trace_seq.h>
16 #include <linux/trace_events.h>
17 #include <linux/compiler.h>
18 #include <linux/glob.h>
19
20 #ifdef CONFIG_FTRACE_SYSCALLS
21 #include <asm/unistd.h> /* For NR_SYSCALLS */
22 #include <asm/syscall.h> /* some archs define it here */
23 #endif
24
25 enum trace_type {
26 __TRACE_FIRST_TYPE = 0,
27
28 TRACE_FN,
29 TRACE_CTX,
30 TRACE_WAKE,
31 TRACE_STACK,
32 TRACE_PRINT,
33 TRACE_BPRINT,
34 TRACE_MMIO_RW,
35 TRACE_MMIO_MAP,
36 TRACE_BRANCH,
37 TRACE_GRAPH_RET,
38 TRACE_GRAPH_ENT,
39 TRACE_USER_STACK,
40 TRACE_BLK,
41 TRACE_BPUTS,
42 TRACE_HWLAT,
43 TRACE_RAW_DATA,
44
45 __TRACE_LAST_TYPE,
46 };
47
48
49 #undef __field
50 #define __field(type, item) type item;
51
52 #undef __field_struct
53 #define __field_struct(type, item) __field(type, item)
54
55 #undef __field_desc
56 #define __field_desc(type, container, item)
57
58 #undef __array
59 #define __array(type, item, size) type item[size];
60
61 #undef __array_desc
62 #define __array_desc(type, container, item, size)
63
64 #undef __dynamic_array
65 #define __dynamic_array(type, item) type item[];
66
67 #undef F_STRUCT
68 #define F_STRUCT(args...) args
69
70 #undef FTRACE_ENTRY
71 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
72 struct struct_name { \
73 struct trace_entry ent; \
74 tstruct \
75 }
76
77 #undef FTRACE_ENTRY_DUP
78 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
79
80 #undef FTRACE_ENTRY_REG
81 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
82 filter, regfn) \
83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 filter)
85
86 #undef FTRACE_ENTRY_PACKED
87 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
88 filter) \
89 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
90 filter) __packed
91
92 #include "trace_entries.h"
93
94 /*
95 * syscalls are special, and need special handling, this is why
96 * they are not included in trace_entries.h
97 */
98 struct syscall_trace_enter {
99 struct trace_entry ent;
100 int nr;
101 unsigned long args[];
102 };
103
104 struct syscall_trace_exit {
105 struct trace_entry ent;
106 int nr;
107 long ret;
108 };
109
110 struct kprobe_trace_entry_head {
111 struct trace_entry ent;
112 unsigned long ip;
113 };
114
115 struct kretprobe_trace_entry_head {
116 struct trace_entry ent;
117 unsigned long func;
118 unsigned long ret_ip;
119 };
120
121 /*
122 * trace_flag_type is an enumeration that holds different
123 * states when a trace occurs. These are:
124 * IRQS_OFF - interrupts were disabled
125 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
126 * NEED_RESCHED - reschedule is requested
127 * HARDIRQ - inside an interrupt handler
128 * SOFTIRQ - inside a softirq handler
129 */
130 enum trace_flag_type {
131 TRACE_FLAG_IRQS_OFF = 0x01,
132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
133 TRACE_FLAG_NEED_RESCHED = 0x04,
134 TRACE_FLAG_HARDIRQ = 0x08,
135 TRACE_FLAG_SOFTIRQ = 0x10,
136 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
137 TRACE_FLAG_NMI = 0x40,
138 };
139
140 #define TRACE_BUF_SIZE 1024
141
142 struct trace_array;
143
144 /*
145 * The CPU trace array - it consists of thousands of trace entries
146 * plus some other descriptor data: (for example which task started
147 * the trace, etc.)
148 */
149 struct trace_array_cpu {
150 atomic_t disabled;
151 void *buffer_page; /* ring buffer spare */
152
153 unsigned long entries;
154 unsigned long saved_latency;
155 unsigned long critical_start;
156 unsigned long critical_end;
157 unsigned long critical_sequence;
158 unsigned long nice;
159 unsigned long policy;
160 unsigned long rt_priority;
161 unsigned long skipped_entries;
162 u64 preempt_timestamp;
163 pid_t pid;
164 kuid_t uid;
165 char comm[TASK_COMM_LEN];
166
167 bool ignore_pid;
168 #ifdef CONFIG_FUNCTION_TRACER
169 bool ftrace_ignore_pid;
170 #endif
171 };
172
173 struct tracer;
174 struct trace_option_dentry;
175
176 struct trace_buffer {
177 struct trace_array *tr;
178 struct ring_buffer *buffer;
179 struct trace_array_cpu __percpu *data;
180 u64 time_start;
181 int cpu;
182 };
183
184 #define TRACE_FLAGS_MAX_SIZE 32
185
186 struct trace_options {
187 struct tracer *tracer;
188 struct trace_option_dentry *topts;
189 };
190
191 struct trace_pid_list {
192 int pid_max;
193 unsigned long *pids;
194 };
195
196 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
197
198 /**
199 * struct cond_snapshot - conditional snapshot data and callback
200 *
201 * The cond_snapshot structure encapsulates a callback function and
202 * data associated with the snapshot for a given tracing instance.
203 *
204 * When a snapshot is taken conditionally, by invoking
205 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
206 * passed in turn to the cond_snapshot.update() function. That data
207 * can be compared by the update() implementation with the cond_data
208 * contained wihin the struct cond_snapshot instance associated with
209 * the trace_array. Because the tr->max_lock is held throughout the
210 * update() call, the update() function can directly retrieve the
211 * cond_snapshot and cond_data associated with the per-instance
212 * snapshot associated with the trace_array.
213 *
214 * The cond_snapshot.update() implementation can save data to be
215 * associated with the snapshot if it decides to, and returns 'true'
216 * in that case, or it returns 'false' if the conditional snapshot
217 * shouldn't be taken.
218 *
219 * The cond_snapshot instance is created and associated with the
220 * user-defined cond_data by tracing_cond_snapshot_enable().
221 * Likewise, the cond_snapshot instance is destroyed and is no longer
222 * associated with the trace instance by
223 * tracing_cond_snapshot_disable().
224 *
225 * The method below is required.
226 *
227 * @update: When a conditional snapshot is invoked, the update()
228 * callback function is invoked with the tr->max_lock held. The
229 * update() implementation signals whether or not to actually
230 * take the snapshot, by returning 'true' if so, 'false' if no
231 * snapshot should be taken. Because the max_lock is held for
232 * the duration of update(), the implementation is safe to
233 * directly retrieven and save any implementation data it needs
234 * to in association with the snapshot.
235 */
236 struct cond_snapshot {
237 void *cond_data;
238 cond_update_fn_t update;
239 };
240
241 /*
242 * The trace array - an array of per-CPU trace arrays. This is the
243 * highest level data structure that individual tracers deal with.
244 * They have on/off state as well:
245 */
246 struct trace_array {
247 struct list_head list;
248 char *name;
249 struct trace_buffer trace_buffer;
250 #ifdef CONFIG_TRACER_MAX_TRACE
251 /*
252 * The max_buffer is used to snapshot the trace when a maximum
253 * latency is reached, or when the user initiates a snapshot.
254 * Some tracers will use this to store a maximum trace while
255 * it continues examining live traces.
256 *
257 * The buffers for the max_buffer are set up the same as the trace_buffer
258 * When a snapshot is taken, the buffer of the max_buffer is swapped
259 * with the buffer of the trace_buffer and the buffers are reset for
260 * the trace_buffer so the tracing can continue.
261 */
262 struct trace_buffer max_buffer;
263 bool allocated_snapshot;
264 #endif
265 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
266 unsigned long max_latency;
267 #endif
268 struct trace_pid_list __rcu *filtered_pids;
269 /*
270 * max_lock is used to protect the swapping of buffers
271 * when taking a max snapshot. The buffers themselves are
272 * protected by per_cpu spinlocks. But the action of the swap
273 * needs its own lock.
274 *
275 * This is defined as a arch_spinlock_t in order to help
276 * with performance when lockdep debugging is enabled.
277 *
278 * It is also used in other places outside the update_max_tr
279 * so it needs to be defined outside of the
280 * CONFIG_TRACER_MAX_TRACE.
281 */
282 arch_spinlock_t max_lock;
283 int buffer_disabled;
284 #ifdef CONFIG_FTRACE_SYSCALLS
285 int sys_refcount_enter;
286 int sys_refcount_exit;
287 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
288 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
289 #endif
290 int stop_count;
291 int clock_id;
292 int nr_topts;
293 bool clear_trace;
294 int buffer_percent;
295 unsigned int n_err_log_entries;
296 struct tracer *current_trace;
297 unsigned int trace_flags;
298 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
299 unsigned int flags;
300 raw_spinlock_t start_lock;
301 struct list_head err_log;
302 struct dentry *dir;
303 struct dentry *options;
304 struct dentry *percpu_dir;
305 struct dentry *event_dir;
306 struct trace_options *topts;
307 struct list_head systems;
308 struct list_head events;
309 struct trace_event_file *trace_marker_file;
310 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
311 int ref;
312 #ifdef CONFIG_FUNCTION_TRACER
313 struct ftrace_ops *ops;
314 struct trace_pid_list __rcu *function_pids;
315 #ifdef CONFIG_DYNAMIC_FTRACE
316 /* All of these are protected by the ftrace_lock */
317 struct list_head func_probes;
318 struct list_head mod_trace;
319 struct list_head mod_notrace;
320 #endif
321 /* function tracing enabled */
322 int function_enabled;
323 #endif
324 int time_stamp_abs_ref;
325 struct list_head hist_vars;
326 #ifdef CONFIG_TRACER_SNAPSHOT
327 struct cond_snapshot *cond_snapshot;
328 #endif
329 };
330
331 enum {
332 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
333 };
334
335 extern struct list_head ftrace_trace_arrays;
336
337 extern struct mutex trace_types_lock;
338
339 extern int trace_array_get(struct trace_array *tr);
340 extern void trace_array_put(struct trace_array *tr);
341 extern int tracing_check_open_get_tr(struct trace_array *tr);
342
343 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
344 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
345
346 extern bool trace_clock_in_ns(struct trace_array *tr);
347
348 /*
349 * The global tracer (top) should be the first trace array added,
350 * but we check the flag anyway.
351 */
352 static inline struct trace_array *top_trace_array(void)
353 {
354 struct trace_array *tr;
355
356 if (list_empty(&ftrace_trace_arrays))
357 return NULL;
358
359 tr = list_entry(ftrace_trace_arrays.prev,
360 typeof(*tr), list);
361 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
362 return tr;
363 }
364
365 #define FTRACE_CMP_TYPE(var, type) \
366 __builtin_types_compatible_p(typeof(var), type *)
367
368 #undef IF_ASSIGN
369 #define IF_ASSIGN(var, entry, etype, id) \
370 if (FTRACE_CMP_TYPE(var, etype)) { \
371 var = (typeof(var))(entry); \
372 WARN_ON(id != 0 && (entry)->type != id); \
373 break; \
374 }
375
376 /* Will cause compile errors if type is not found. */
377 extern void __ftrace_bad_type(void);
378
379 /*
380 * The trace_assign_type is a verifier that the entry type is
381 * the same as the type being assigned. To add new types simply
382 * add a line with the following format:
383 *
384 * IF_ASSIGN(var, ent, type, id);
385 *
386 * Where "type" is the trace type that includes the trace_entry
387 * as the "ent" item. And "id" is the trace identifier that is
388 * used in the trace_type enum.
389 *
390 * If the type can have more than one id, then use zero.
391 */
392 #define trace_assign_type(var, ent) \
393 do { \
394 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
395 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
396 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
397 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
398 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
399 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
400 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
401 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
402 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
403 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
404 TRACE_MMIO_RW); \
405 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
406 TRACE_MMIO_MAP); \
407 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
408 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
409 TRACE_GRAPH_ENT); \
410 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
411 TRACE_GRAPH_RET); \
412 __ftrace_bad_type(); \
413 } while (0)
414
415 /*
416 * An option specific to a tracer. This is a boolean value.
417 * The bit is the bit index that sets its value on the
418 * flags value in struct tracer_flags.
419 */
420 struct tracer_opt {
421 const char *name; /* Will appear on the trace_options file */
422 u32 bit; /* Mask assigned in val field in tracer_flags */
423 };
424
425 /*
426 * The set of specific options for a tracer. Your tracer
427 * have to set the initial value of the flags val.
428 */
429 struct tracer_flags {
430 u32 val;
431 struct tracer_opt *opts;
432 struct tracer *trace;
433 };
434
435 /* Makes more easy to define a tracer opt */
436 #define TRACER_OPT(s, b) .name = #s, .bit = b
437
438
439 struct trace_option_dentry {
440 struct tracer_opt *opt;
441 struct tracer_flags *flags;
442 struct trace_array *tr;
443 struct dentry *entry;
444 };
445
446 /**
447 * struct tracer - a specific tracer and its callbacks to interact with tracefs
448 * @name: the name chosen to select it on the available_tracers file
449 * @init: called when one switches to this tracer (echo name > current_tracer)
450 * @reset: called when one switches to another tracer
451 * @start: called when tracing is unpaused (echo 1 > tracing_on)
452 * @stop: called when tracing is paused (echo 0 > tracing_on)
453 * @update_thresh: called when tracing_thresh is updated
454 * @open: called when the trace file is opened
455 * @pipe_open: called when the trace_pipe file is opened
456 * @close: called when the trace file is released
457 * @pipe_close: called when the trace_pipe file is released
458 * @read: override the default read callback on trace_pipe
459 * @splice_read: override the default splice_read callback on trace_pipe
460 * @selftest: selftest to run on boot (see trace_selftest.c)
461 * @print_headers: override the first lines that describe your columns
462 * @print_line: callback that prints a trace
463 * @set_flag: signals one of your private flags changed (trace_options file)
464 * @flags: your private flags
465 */
466 struct tracer {
467 const char *name;
468 int (*init)(struct trace_array *tr);
469 void (*reset)(struct trace_array *tr);
470 void (*start)(struct trace_array *tr);
471 void (*stop)(struct trace_array *tr);
472 int (*update_thresh)(struct trace_array *tr);
473 void (*open)(struct trace_iterator *iter);
474 void (*pipe_open)(struct trace_iterator *iter);
475 void (*close)(struct trace_iterator *iter);
476 void (*pipe_close)(struct trace_iterator *iter);
477 ssize_t (*read)(struct trace_iterator *iter,
478 struct file *filp, char __user *ubuf,
479 size_t cnt, loff_t *ppos);
480 ssize_t (*splice_read)(struct trace_iterator *iter,
481 struct file *filp,
482 loff_t *ppos,
483 struct pipe_inode_info *pipe,
484 size_t len,
485 unsigned int flags);
486 #ifdef CONFIG_FTRACE_STARTUP_TEST
487 int (*selftest)(struct tracer *trace,
488 struct trace_array *tr);
489 #endif
490 void (*print_header)(struct seq_file *m);
491 enum print_line_t (*print_line)(struct trace_iterator *iter);
492 /* If you handled the flag setting, return 0 */
493 int (*set_flag)(struct trace_array *tr,
494 u32 old_flags, u32 bit, int set);
495 /* Return 0 if OK with change, else return non-zero */
496 int (*flag_changed)(struct trace_array *tr,
497 u32 mask, int set);
498 struct tracer *next;
499 struct tracer_flags *flags;
500 int enabled;
501 int ref;
502 bool print_max;
503 bool allow_instances;
504 #ifdef CONFIG_TRACER_MAX_TRACE
505 bool use_max_tr;
506 #endif
507 /* True if tracer cannot be enabled in kernel param */
508 bool noboot;
509 };
510
511
512 /* Only current can touch trace_recursion */
513
514 /*
515 * For function tracing recursion:
516 * The order of these bits are important.
517 *
518 * When function tracing occurs, the following steps are made:
519 * If arch does not support a ftrace feature:
520 * call internal function (uses INTERNAL bits) which calls...
521 * If callback is registered to the "global" list, the list
522 * function is called and recursion checks the GLOBAL bits.
523 * then this function calls...
524 * The function callback, which can use the FTRACE bits to
525 * check for recursion.
526 *
527 * Now if the arch does not suppport a feature, and it calls
528 * the global list function which calls the ftrace callback
529 * all three of these steps will do a recursion protection.
530 * There's no reason to do one if the previous caller already
531 * did. The recursion that we are protecting against will
532 * go through the same steps again.
533 *
534 * To prevent the multiple recursion checks, if a recursion
535 * bit is set that is higher than the MAX bit of the current
536 * check, then we know that the check was made by the previous
537 * caller, and we can skip the current check.
538 */
539 enum {
540 TRACE_BUFFER_BIT,
541 TRACE_BUFFER_NMI_BIT,
542 TRACE_BUFFER_IRQ_BIT,
543 TRACE_BUFFER_SIRQ_BIT,
544
545 /* Start of function recursion bits */
546 TRACE_FTRACE_BIT,
547 TRACE_FTRACE_NMI_BIT,
548 TRACE_FTRACE_IRQ_BIT,
549 TRACE_FTRACE_SIRQ_BIT,
550
551 /* INTERNAL_BITs must be greater than FTRACE_BITs */
552 TRACE_INTERNAL_BIT,
553 TRACE_INTERNAL_NMI_BIT,
554 TRACE_INTERNAL_IRQ_BIT,
555 TRACE_INTERNAL_SIRQ_BIT,
556
557 TRACE_BRANCH_BIT,
558 /*
559 * Abuse of the trace_recursion.
560 * As we need a way to maintain state if we are tracing the function
561 * graph in irq because we want to trace a particular function that
562 * was called in irq context but we have irq tracing off. Since this
563 * can only be modified by current, we can reuse trace_recursion.
564 */
565 TRACE_IRQ_BIT,
566
567 /* Set if the function is in the set_graph_function file */
568 TRACE_GRAPH_BIT,
569
570 /*
571 * In the very unlikely case that an interrupt came in
572 * at a start of graph tracing, and we want to trace
573 * the function in that interrupt, the depth can be greater
574 * than zero, because of the preempted start of a previous
575 * trace. In an even more unlikely case, depth could be 2
576 * if a softirq interrupted the start of graph tracing,
577 * followed by an interrupt preempting a start of graph
578 * tracing in the softirq, and depth can even be 3
579 * if an NMI came in at the start of an interrupt function
580 * that preempted a softirq start of a function that
581 * preempted normal context!!!! Luckily, it can't be
582 * greater than 3, so the next two bits are a mask
583 * of what the depth is when we set TRACE_GRAPH_BIT
584 */
585
586 TRACE_GRAPH_DEPTH_START_BIT,
587 TRACE_GRAPH_DEPTH_END_BIT,
588
589 /*
590 * To implement set_graph_notrace, if this bit is set, we ignore
591 * function graph tracing of called functions, until the return
592 * function is called to clear it.
593 */
594 TRACE_GRAPH_NOTRACE_BIT,
595 };
596
597 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
598 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
599 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
600
601 #define trace_recursion_depth() \
602 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
603 #define trace_recursion_set_depth(depth) \
604 do { \
605 current->trace_recursion &= \
606 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
607 current->trace_recursion |= \
608 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
609 } while (0)
610
611 #define TRACE_CONTEXT_BITS 4
612
613 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
614 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
615
616 #define TRACE_LIST_START TRACE_INTERNAL_BIT
617 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
618
619 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
620
621 static __always_inline int trace_get_context_bit(void)
622 {
623 int bit;
624
625 if (in_interrupt()) {
626 if (in_nmi())
627 bit = 0;
628
629 else if (in_irq())
630 bit = 1;
631 else
632 bit = 2;
633 } else
634 bit = 3;
635
636 return bit;
637 }
638
639 static __always_inline int trace_test_and_set_recursion(int start, int max)
640 {
641 unsigned int val = current->trace_recursion;
642 int bit;
643
644 /* A previous recursion check was made */
645 if ((val & TRACE_CONTEXT_MASK) > max)
646 return 0;
647
648 bit = trace_get_context_bit() + start;
649 if (unlikely(val & (1 << bit)))
650 return -1;
651
652 val |= 1 << bit;
653 current->trace_recursion = val;
654 barrier();
655
656 return bit;
657 }
658
659 static __always_inline void trace_clear_recursion(int bit)
660 {
661 unsigned int val = current->trace_recursion;
662
663 if (!bit)
664 return;
665
666 bit = 1 << bit;
667 val &= ~bit;
668
669 barrier();
670 current->trace_recursion = val;
671 }
672
673 static inline struct ring_buffer_iter *
674 trace_buffer_iter(struct trace_iterator *iter, int cpu)
675 {
676 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
677 }
678
679 int tracer_init(struct tracer *t, struct trace_array *tr);
680 int tracing_is_enabled(void);
681 void tracing_reset_online_cpus(struct trace_buffer *buf);
682 void tracing_reset_current(int cpu);
683 void tracing_reset_all_online_cpus(void);
684 int tracing_open_generic(struct inode *inode, struct file *filp);
685 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
686 bool tracing_is_disabled(void);
687 bool tracer_tracing_is_on(struct trace_array *tr);
688 void tracer_tracing_on(struct trace_array *tr);
689 void tracer_tracing_off(struct trace_array *tr);
690 struct dentry *trace_create_file(const char *name,
691 umode_t mode,
692 struct dentry *parent,
693 void *data,
694 const struct file_operations *fops);
695
696 struct dentry *tracing_init_dentry(void);
697
698 struct ring_buffer_event;
699
700 struct ring_buffer_event *
701 trace_buffer_lock_reserve(struct ring_buffer *buffer,
702 int type,
703 unsigned long len,
704 unsigned long flags,
705 int pc);
706
707 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
708 struct trace_array_cpu *data);
709
710 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
711 int *ent_cpu, u64 *ent_ts);
712
713 void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
714 struct ring_buffer_event *event);
715
716 int trace_empty(struct trace_iterator *iter);
717
718 void *trace_find_next_entry_inc(struct trace_iterator *iter);
719
720 void trace_init_global_iter(struct trace_iterator *iter);
721
722 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
723
724 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
725 unsigned long trace_total_entries(struct trace_array *tr);
726
727 void trace_function(struct trace_array *tr,
728 unsigned long ip,
729 unsigned long parent_ip,
730 unsigned long flags, int pc);
731 void trace_graph_function(struct trace_array *tr,
732 unsigned long ip,
733 unsigned long parent_ip,
734 unsigned long flags, int pc);
735 void trace_latency_header(struct seq_file *m);
736 void trace_default_header(struct seq_file *m);
737 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
738 int trace_empty(struct trace_iterator *iter);
739
740 void trace_graph_return(struct ftrace_graph_ret *trace);
741 int trace_graph_entry(struct ftrace_graph_ent *trace);
742 void set_graph_array(struct trace_array *tr);
743
744 void tracing_start_cmdline_record(void);
745 void tracing_stop_cmdline_record(void);
746 void tracing_start_tgid_record(void);
747 void tracing_stop_tgid_record(void);
748
749 int register_tracer(struct tracer *type);
750 int is_tracing_stopped(void);
751
752 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
753
754 extern cpumask_var_t __read_mostly tracing_buffer_mask;
755
756 #define for_each_tracing_cpu(cpu) \
757 for_each_cpu(cpu, tracing_buffer_mask)
758
759 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
760
761 extern unsigned long tracing_thresh;
762
763 /* PID filtering */
764
765 extern int pid_max;
766
767 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
768 pid_t search_pid);
769 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
770 struct task_struct *task);
771 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
772 struct task_struct *self,
773 struct task_struct *task);
774 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
775 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
776 int trace_pid_show(struct seq_file *m, void *v);
777 void trace_free_pid_list(struct trace_pid_list *pid_list);
778 int trace_pid_write(struct trace_pid_list *filtered_pids,
779 struct trace_pid_list **new_pid_list,
780 const char __user *ubuf, size_t cnt);
781
782 #ifdef CONFIG_TRACER_MAX_TRACE
783 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
784 void *cond_data);
785 void update_max_tr_single(struct trace_array *tr,
786 struct task_struct *tsk, int cpu);
787 #endif /* CONFIG_TRACER_MAX_TRACE */
788
789 #ifdef CONFIG_STACKTRACE
790 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
791 int pc);
792 #else
793 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
794 int skip, int pc)
795 {
796 }
797 #endif /* CONFIG_STACKTRACE */
798
799 extern u64 ftrace_now(int cpu);
800
801 extern void trace_find_cmdline(int pid, char comm[]);
802 extern int trace_find_tgid(int pid);
803 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
804
805 #ifdef CONFIG_DYNAMIC_FTRACE
806 extern unsigned long ftrace_update_tot_cnt;
807 void ftrace_init_trace_array(struct trace_array *tr);
808 #else
809 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
810 #endif
811 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
812 extern int DYN_FTRACE_TEST_NAME(void);
813 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
814 extern int DYN_FTRACE_TEST_NAME2(void);
815
816 extern bool ring_buffer_expanded;
817 extern bool tracing_selftest_disabled;
818
819 #ifdef CONFIG_FTRACE_STARTUP_TEST
820 extern int trace_selftest_startup_function(struct tracer *trace,
821 struct trace_array *tr);
822 extern int trace_selftest_startup_function_graph(struct tracer *trace,
823 struct trace_array *tr);
824 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
825 struct trace_array *tr);
826 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
827 struct trace_array *tr);
828 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
829 struct trace_array *tr);
830 extern int trace_selftest_startup_wakeup(struct tracer *trace,
831 struct trace_array *tr);
832 extern int trace_selftest_startup_nop(struct tracer *trace,
833 struct trace_array *tr);
834 extern int trace_selftest_startup_branch(struct tracer *trace,
835 struct trace_array *tr);
836 /*
837 * Tracer data references selftest functions that only occur
838 * on boot up. These can be __init functions. Thus, when selftests
839 * are enabled, then the tracers need to reference __init functions.
840 */
841 #define __tracer_data __refdata
842 #else
843 /* Tracers are seldom changed. Optimize when selftests are disabled. */
844 #define __tracer_data __read_mostly
845 #endif /* CONFIG_FTRACE_STARTUP_TEST */
846
847 extern void *head_page(struct trace_array_cpu *data);
848 extern unsigned long long ns2usecs(u64 nsec);
849 extern int
850 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
851 extern int
852 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
853 extern int
854 trace_array_vprintk(struct trace_array *tr,
855 unsigned long ip, const char *fmt, va_list args);
856 int trace_array_printk(struct trace_array *tr,
857 unsigned long ip, const char *fmt, ...);
858 int trace_array_printk_buf(struct ring_buffer *buffer,
859 unsigned long ip, const char *fmt, ...);
860 void trace_printk_seq(struct trace_seq *s);
861 enum print_line_t print_trace_line(struct trace_iterator *iter);
862
863 extern char trace_find_mark(unsigned long long duration);
864
865 struct ftrace_hash;
866
867 struct ftrace_mod_load {
868 struct list_head list;
869 char *func;
870 char *module;
871 int enable;
872 };
873
874 enum {
875 FTRACE_HASH_FL_MOD = (1 << 0),
876 };
877
878 struct ftrace_hash {
879 unsigned long size_bits;
880 struct hlist_head *buckets;
881 unsigned long count;
882 unsigned long flags;
883 struct rcu_head rcu;
884 };
885
886 struct ftrace_func_entry *
887 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
888
889 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
890 {
891 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
892 }
893
894 /* Standard output formatting function used for function return traces */
895 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
896
897 /* Flag options */
898 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
899 #define TRACE_GRAPH_PRINT_CPU 0x2
900 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
901 #define TRACE_GRAPH_PRINT_PROC 0x8
902 #define TRACE_GRAPH_PRINT_DURATION 0x10
903 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
904 #define TRACE_GRAPH_PRINT_REL_TIME 0x40
905 #define TRACE_GRAPH_PRINT_IRQS 0x80
906 #define TRACE_GRAPH_PRINT_TAIL 0x100
907 #define TRACE_GRAPH_SLEEP_TIME 0x200
908 #define TRACE_GRAPH_GRAPH_TIME 0x400
909 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
910 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
911
912 extern void ftrace_graph_sleep_time_control(bool enable);
913
914 #ifdef CONFIG_FUNCTION_PROFILER
915 extern void ftrace_graph_graph_time_control(bool enable);
916 #else
917 static inline void ftrace_graph_graph_time_control(bool enable) { }
918 #endif
919
920 extern enum print_line_t
921 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
922 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
923 extern void
924 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
925 extern void graph_trace_open(struct trace_iterator *iter);
926 extern void graph_trace_close(struct trace_iterator *iter);
927 extern int __trace_graph_entry(struct trace_array *tr,
928 struct ftrace_graph_ent *trace,
929 unsigned long flags, int pc);
930 extern void __trace_graph_return(struct trace_array *tr,
931 struct ftrace_graph_ret *trace,
932 unsigned long flags, int pc);
933
934 #ifdef CONFIG_DYNAMIC_FTRACE
935 extern struct ftrace_hash *ftrace_graph_hash;
936 extern struct ftrace_hash *ftrace_graph_notrace_hash;
937
938 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
939 {
940 unsigned long addr = trace->func;
941 int ret = 0;
942
943 preempt_disable_notrace();
944
945 if (ftrace_hash_empty(ftrace_graph_hash)) {
946 ret = 1;
947 goto out;
948 }
949
950 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
951
952 /*
953 * This needs to be cleared on the return functions
954 * when the depth is zero.
955 */
956 trace_recursion_set(TRACE_GRAPH_BIT);
957 trace_recursion_set_depth(trace->depth);
958
959 /*
960 * If no irqs are to be traced, but a set_graph_function
961 * is set, and called by an interrupt handler, we still
962 * want to trace it.
963 */
964 if (in_irq())
965 trace_recursion_set(TRACE_IRQ_BIT);
966 else
967 trace_recursion_clear(TRACE_IRQ_BIT);
968 ret = 1;
969 }
970
971 out:
972 preempt_enable_notrace();
973 return ret;
974 }
975
976 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
977 {
978 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
979 trace->depth == trace_recursion_depth())
980 trace_recursion_clear(TRACE_GRAPH_BIT);
981 }
982
983 static inline int ftrace_graph_notrace_addr(unsigned long addr)
984 {
985 int ret = 0;
986
987 preempt_disable_notrace();
988
989 if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
990 ret = 1;
991
992 preempt_enable_notrace();
993 return ret;
994 }
995 #else
996 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
997 {
998 return 1;
999 }
1000
1001 static inline int ftrace_graph_notrace_addr(unsigned long addr)
1002 {
1003 return 0;
1004 }
1005 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1006 { }
1007 #endif /* CONFIG_DYNAMIC_FTRACE */
1008
1009 extern unsigned int fgraph_max_depth;
1010
1011 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1012 {
1013 /* trace it when it is-nested-in or is a function enabled. */
1014 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1015 ftrace_graph_addr(trace)) ||
1016 (trace->depth < 0) ||
1017 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1018 }
1019
1020 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
1021 static inline enum print_line_t
1022 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1023 {
1024 return TRACE_TYPE_UNHANDLED;
1025 }
1026 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1027
1028 extern struct list_head ftrace_pids;
1029
1030 #ifdef CONFIG_FUNCTION_TRACER
1031 struct ftrace_func_command {
1032 struct list_head list;
1033 char *name;
1034 int (*func)(struct trace_array *tr,
1035 struct ftrace_hash *hash,
1036 char *func, char *cmd,
1037 char *params, int enable);
1038 };
1039 extern bool ftrace_filter_param __initdata;
1040 static inline int ftrace_trace_task(struct trace_array *tr)
1041 {
1042 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
1043 }
1044 extern int ftrace_is_dead(void);
1045 int ftrace_create_function_files(struct trace_array *tr,
1046 struct dentry *parent);
1047 void ftrace_destroy_function_files(struct trace_array *tr);
1048 void ftrace_init_global_array_ops(struct trace_array *tr);
1049 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1050 void ftrace_reset_array_ops(struct trace_array *tr);
1051 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1052 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1053 struct dentry *d_tracer);
1054 void ftrace_clear_pids(struct trace_array *tr);
1055 int init_function_trace(void);
1056 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1057 #else
1058 static inline int ftrace_trace_task(struct trace_array *tr)
1059 {
1060 return 1;
1061 }
1062 static inline int ftrace_is_dead(void) { return 0; }
1063 static inline int
1064 ftrace_create_function_files(struct trace_array *tr,
1065 struct dentry *parent)
1066 {
1067 return 0;
1068 }
1069 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1070 static inline __init void
1071 ftrace_init_global_array_ops(struct trace_array *tr) { }
1072 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1073 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1074 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1075 static inline void ftrace_clear_pids(struct trace_array *tr) { }
1076 static inline int init_function_trace(void) { return 0; }
1077 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1078 /* ftace_func_t type is not defined, use macro instead of static inline */
1079 #define ftrace_init_array_ops(tr, func) do { } while (0)
1080 #endif /* CONFIG_FUNCTION_TRACER */
1081
1082 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1083
1084 struct ftrace_probe_ops {
1085 void (*func)(unsigned long ip,
1086 unsigned long parent_ip,
1087 struct trace_array *tr,
1088 struct ftrace_probe_ops *ops,
1089 void *data);
1090 int (*init)(struct ftrace_probe_ops *ops,
1091 struct trace_array *tr,
1092 unsigned long ip, void *init_data,
1093 void **data);
1094 void (*free)(struct ftrace_probe_ops *ops,
1095 struct trace_array *tr,
1096 unsigned long ip, void *data);
1097 int (*print)(struct seq_file *m,
1098 unsigned long ip,
1099 struct ftrace_probe_ops *ops,
1100 void *data);
1101 };
1102
1103 struct ftrace_func_mapper;
1104 typedef int (*ftrace_mapper_func)(void *data);
1105
1106 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1107 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1108 unsigned long ip);
1109 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1110 unsigned long ip, void *data);
1111 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1112 unsigned long ip);
1113 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1114 ftrace_mapper_func free_func);
1115
1116 extern int
1117 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1118 struct ftrace_probe_ops *ops, void *data);
1119 extern int
1120 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1121 struct ftrace_probe_ops *ops);
1122 extern void clear_ftrace_function_probes(struct trace_array *tr);
1123
1124 int register_ftrace_command(struct ftrace_func_command *cmd);
1125 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1126
1127 void ftrace_create_filter_files(struct ftrace_ops *ops,
1128 struct dentry *parent);
1129 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1130 #else
1131 struct ftrace_func_command;
1132
1133 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1134 {
1135 return -EINVAL;
1136 }
1137 static inline __init int unregister_ftrace_command(char *cmd_name)
1138 {
1139 return -EINVAL;
1140 }
1141 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1142 {
1143 }
1144
1145 /*
1146 * The ops parameter passed in is usually undefined.
1147 * This must be a macro.
1148 */
1149 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1150 #define ftrace_destroy_filter_files(ops) do { } while (0)
1151 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1152
1153 bool ftrace_event_is_function(struct trace_event_call *call);
1154
1155 /*
1156 * struct trace_parser - servers for reading the user input separated by spaces
1157 * @cont: set if the input is not complete - no final space char was found
1158 * @buffer: holds the parsed user input
1159 * @idx: user input length
1160 * @size: buffer size
1161 */
1162 struct trace_parser {
1163 bool cont;
1164 char *buffer;
1165 unsigned idx;
1166 unsigned size;
1167 };
1168
1169 static inline bool trace_parser_loaded(struct trace_parser *parser)
1170 {
1171 return (parser->idx != 0);
1172 }
1173
1174 static inline bool trace_parser_cont(struct trace_parser *parser)
1175 {
1176 return parser->cont;
1177 }
1178
1179 static inline void trace_parser_clear(struct trace_parser *parser)
1180 {
1181 parser->cont = false;
1182 parser->idx = 0;
1183 }
1184
1185 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1186 extern void trace_parser_put(struct trace_parser *parser);
1187 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1188 size_t cnt, loff_t *ppos);
1189
1190 /*
1191 * Only create function graph options if function graph is configured.
1192 */
1193 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1194 # define FGRAPH_FLAGS \
1195 C(DISPLAY_GRAPH, "display-graph"),
1196 #else
1197 # define FGRAPH_FLAGS
1198 #endif
1199
1200 #ifdef CONFIG_BRANCH_TRACER
1201 # define BRANCH_FLAGS \
1202 C(BRANCH, "branch"),
1203 #else
1204 # define BRANCH_FLAGS
1205 #endif
1206
1207 #ifdef CONFIG_FUNCTION_TRACER
1208 # define FUNCTION_FLAGS \
1209 C(FUNCTION, "function-trace"), \
1210 C(FUNC_FORK, "function-fork"),
1211 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1212 #else
1213 # define FUNCTION_FLAGS
1214 # define FUNCTION_DEFAULT_FLAGS 0UL
1215 # define TRACE_ITER_FUNC_FORK 0UL
1216 #endif
1217
1218 #ifdef CONFIG_STACKTRACE
1219 # define STACK_FLAGS \
1220 C(STACKTRACE, "stacktrace"),
1221 #else
1222 # define STACK_FLAGS
1223 #endif
1224
1225 /*
1226 * trace_iterator_flags is an enumeration that defines bit
1227 * positions into trace_flags that controls the output.
1228 *
1229 * NOTE: These bits must match the trace_options array in
1230 * trace.c (this macro guarantees it).
1231 */
1232 #define TRACE_FLAGS \
1233 C(PRINT_PARENT, "print-parent"), \
1234 C(SYM_OFFSET, "sym-offset"), \
1235 C(SYM_ADDR, "sym-addr"), \
1236 C(VERBOSE, "verbose"), \
1237 C(RAW, "raw"), \
1238 C(HEX, "hex"), \
1239 C(BIN, "bin"), \
1240 C(BLOCK, "block"), \
1241 C(PRINTK, "trace_printk"), \
1242 C(ANNOTATE, "annotate"), \
1243 C(USERSTACKTRACE, "userstacktrace"), \
1244 C(SYM_USEROBJ, "sym-userobj"), \
1245 C(PRINTK_MSGONLY, "printk-msg-only"), \
1246 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1247 C(LATENCY_FMT, "latency-format"), \
1248 C(RECORD_CMD, "record-cmd"), \
1249 C(RECORD_TGID, "record-tgid"), \
1250 C(OVERWRITE, "overwrite"), \
1251 C(STOP_ON_FREE, "disable_on_free"), \
1252 C(IRQ_INFO, "irq-info"), \
1253 C(MARKERS, "markers"), \
1254 C(EVENT_FORK, "event-fork"), \
1255 FUNCTION_FLAGS \
1256 FGRAPH_FLAGS \
1257 STACK_FLAGS \
1258 BRANCH_FLAGS
1259
1260 /*
1261 * By defining C, we can make TRACE_FLAGS a list of bit names
1262 * that will define the bits for the flag masks.
1263 */
1264 #undef C
1265 #define C(a, b) TRACE_ITER_##a##_BIT
1266
1267 enum trace_iterator_bits {
1268 TRACE_FLAGS
1269 /* Make sure we don't go more than we have bits for */
1270 TRACE_ITER_LAST_BIT
1271 };
1272
1273 /*
1274 * By redefining C, we can make TRACE_FLAGS a list of masks that
1275 * use the bits as defined above.
1276 */
1277 #undef C
1278 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1279
1280 enum trace_iterator_flags { TRACE_FLAGS };
1281
1282 /*
1283 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1284 * control the output of kernel symbols.
1285 */
1286 #define TRACE_ITER_SYM_MASK \
1287 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1288
1289 extern struct tracer nop_trace;
1290
1291 #ifdef CONFIG_BRANCH_TRACER
1292 extern int enable_branch_tracing(struct trace_array *tr);
1293 extern void disable_branch_tracing(void);
1294 static inline int trace_branch_enable(struct trace_array *tr)
1295 {
1296 if (tr->trace_flags & TRACE_ITER_BRANCH)
1297 return enable_branch_tracing(tr);
1298 return 0;
1299 }
1300 static inline void trace_branch_disable(void)
1301 {
1302 /* due to races, always disable */
1303 disable_branch_tracing();
1304 }
1305 #else
1306 static inline int trace_branch_enable(struct trace_array *tr)
1307 {
1308 return 0;
1309 }
1310 static inline void trace_branch_disable(void)
1311 {
1312 }
1313 #endif /* CONFIG_BRANCH_TRACER */
1314
1315 /* set ring buffers to default size if not already done so */
1316 int tracing_update_buffers(void);
1317
1318 struct ftrace_event_field {
1319 struct list_head link;
1320 const char *name;
1321 const char *type;
1322 int filter_type;
1323 int offset;
1324 int size;
1325 int is_signed;
1326 };
1327
1328 struct prog_entry;
1329
1330 struct event_filter {
1331 struct prog_entry __rcu *prog;
1332 char *filter_string;
1333 };
1334
1335 struct event_subsystem {
1336 struct list_head list;
1337 const char *name;
1338 struct event_filter *filter;
1339 int ref_count;
1340 };
1341
1342 struct trace_subsystem_dir {
1343 struct list_head list;
1344 struct event_subsystem *subsystem;
1345 struct trace_array *tr;
1346 struct dentry *entry;
1347 int ref_count;
1348 int nr_events;
1349 };
1350
1351 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1352 struct ring_buffer *buffer,
1353 struct ring_buffer_event *event);
1354
1355 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1356 struct ring_buffer *buffer,
1357 struct ring_buffer_event *event,
1358 unsigned long flags, int pc,
1359 struct pt_regs *regs);
1360
1361 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1362 struct ring_buffer *buffer,
1363 struct ring_buffer_event *event,
1364 unsigned long flags, int pc)
1365 {
1366 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1367 }
1368
1369 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1370 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1371 void trace_buffered_event_disable(void);
1372 void trace_buffered_event_enable(void);
1373
1374 static inline void
1375 __trace_event_discard_commit(struct ring_buffer *buffer,
1376 struct ring_buffer_event *event)
1377 {
1378 if (this_cpu_read(trace_buffered_event) == event) {
1379 /* Simply release the temp buffer */
1380 this_cpu_dec(trace_buffered_event_cnt);
1381 return;
1382 }
1383 ring_buffer_discard_commit(buffer, event);
1384 }
1385
1386 /*
1387 * Helper function for event_trigger_unlock_commit{_regs}().
1388 * If there are event triggers attached to this event that requires
1389 * filtering against its fields, then they wil be called as the
1390 * entry already holds the field information of the current event.
1391 *
1392 * It also checks if the event should be discarded or not.
1393 * It is to be discarded if the event is soft disabled and the
1394 * event was only recorded to process triggers, or if the event
1395 * filter is active and this event did not match the filters.
1396 *
1397 * Returns true if the event is discarded, false otherwise.
1398 */
1399 static inline bool
1400 __event_trigger_test_discard(struct trace_event_file *file,
1401 struct ring_buffer *buffer,
1402 struct ring_buffer_event *event,
1403 void *entry,
1404 enum event_trigger_type *tt)
1405 {
1406 unsigned long eflags = file->flags;
1407
1408 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1409 *tt = event_triggers_call(file, entry, event);
1410
1411 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1412 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1413 !filter_match_preds(file->filter, entry))) {
1414 __trace_event_discard_commit(buffer, event);
1415 return true;
1416 }
1417
1418 return false;
1419 }
1420
1421 /**
1422 * event_trigger_unlock_commit - handle triggers and finish event commit
1423 * @file: The file pointer assoctiated to the event
1424 * @buffer: The ring buffer that the event is being written to
1425 * @event: The event meta data in the ring buffer
1426 * @entry: The event itself
1427 * @irq_flags: The state of the interrupts at the start of the event
1428 * @pc: The state of the preempt count at the start of the event.
1429 *
1430 * This is a helper function to handle triggers that require data
1431 * from the event itself. It also tests the event against filters and
1432 * if the event is soft disabled and should be discarded.
1433 */
1434 static inline void
1435 event_trigger_unlock_commit(struct trace_event_file *file,
1436 struct ring_buffer *buffer,
1437 struct ring_buffer_event *event,
1438 void *entry, unsigned long irq_flags, int pc)
1439 {
1440 enum event_trigger_type tt = ETT_NONE;
1441
1442 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1443 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1444
1445 if (tt)
1446 event_triggers_post_call(file, tt);
1447 }
1448
1449 /**
1450 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1451 * @file: The file pointer assoctiated to the event
1452 * @buffer: The ring buffer that the event is being written to
1453 * @event: The event meta data in the ring buffer
1454 * @entry: The event itself
1455 * @irq_flags: The state of the interrupts at the start of the event
1456 * @pc: The state of the preempt count at the start of the event.
1457 *
1458 * This is a helper function to handle triggers that require data
1459 * from the event itself. It also tests the event against filters and
1460 * if the event is soft disabled and should be discarded.
1461 *
1462 * Same as event_trigger_unlock_commit() but calls
1463 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1464 */
1465 static inline void
1466 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1467 struct ring_buffer *buffer,
1468 struct ring_buffer_event *event,
1469 void *entry, unsigned long irq_flags, int pc,
1470 struct pt_regs *regs)
1471 {
1472 enum event_trigger_type tt = ETT_NONE;
1473
1474 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1475 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1476 irq_flags, pc, regs);
1477
1478 if (tt)
1479 event_triggers_post_call(file, tt);
1480 }
1481
1482 #define FILTER_PRED_INVALID ((unsigned short)-1)
1483 #define FILTER_PRED_IS_RIGHT (1 << 15)
1484 #define FILTER_PRED_FOLD (1 << 15)
1485
1486 /*
1487 * The max preds is the size of unsigned short with
1488 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1489 * and FOLD flags. The other is reserved.
1490 *
1491 * 2^14 preds is way more than enough.
1492 */
1493 #define MAX_FILTER_PRED 16384
1494
1495 struct filter_pred;
1496 struct regex;
1497
1498 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1499
1500 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1501
1502 enum regex_type {
1503 MATCH_FULL = 0,
1504 MATCH_FRONT_ONLY,
1505 MATCH_MIDDLE_ONLY,
1506 MATCH_END_ONLY,
1507 MATCH_GLOB,
1508 MATCH_INDEX,
1509 };
1510
1511 struct regex {
1512 char pattern[MAX_FILTER_STR_VAL];
1513 int len;
1514 int field_len;
1515 regex_match_func match;
1516 };
1517
1518 struct filter_pred {
1519 filter_pred_fn_t fn;
1520 u64 val;
1521 struct regex regex;
1522 unsigned short *ops;
1523 struct ftrace_event_field *field;
1524 int offset;
1525 int not;
1526 int op;
1527 };
1528
1529 static inline bool is_string_field(struct ftrace_event_field *field)
1530 {
1531 return field->filter_type == FILTER_DYN_STRING ||
1532 field->filter_type == FILTER_STATIC_STRING ||
1533 field->filter_type == FILTER_PTR_STRING ||
1534 field->filter_type == FILTER_COMM;
1535 }
1536
1537 static inline bool is_function_field(struct ftrace_event_field *field)
1538 {
1539 return field->filter_type == FILTER_TRACE_FN;
1540 }
1541
1542 extern enum regex_type
1543 filter_parse_regex(char *buff, int len, char **search, int *not);
1544 extern void print_event_filter(struct trace_event_file *file,
1545 struct trace_seq *s);
1546 extern int apply_event_filter(struct trace_event_file *file,
1547 char *filter_string);
1548 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1549 char *filter_string);
1550 extern void print_subsystem_event_filter(struct event_subsystem *system,
1551 struct trace_seq *s);
1552 extern int filter_assign_type(const char *type);
1553 extern int create_event_filter(struct trace_array *tr,
1554 struct trace_event_call *call,
1555 char *filter_str, bool set_str,
1556 struct event_filter **filterp);
1557 extern void free_event_filter(struct event_filter *filter);
1558
1559 struct ftrace_event_field *
1560 trace_find_event_field(struct trace_event_call *call, char *name);
1561
1562 extern void trace_event_enable_cmd_record(bool enable);
1563 extern void trace_event_enable_tgid_record(bool enable);
1564
1565 extern int event_trace_init(void);
1566 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1567 extern int event_trace_del_tracer(struct trace_array *tr);
1568
1569 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1570 const char *system,
1571 const char *event);
1572 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1573 const char *system,
1574 const char *event);
1575
1576 static inline void *event_file_data(struct file *filp)
1577 {
1578 return READ_ONCE(file_inode(filp)->i_private);
1579 }
1580
1581 extern struct mutex event_mutex;
1582 extern struct list_head ftrace_events;
1583
1584 extern const struct file_operations event_trigger_fops;
1585 extern const struct file_operations event_hist_fops;
1586
1587 #ifdef CONFIG_HIST_TRIGGERS
1588 extern int register_trigger_hist_cmd(void);
1589 extern int register_trigger_hist_enable_disable_cmds(void);
1590 #else
1591 static inline int register_trigger_hist_cmd(void) { return 0; }
1592 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1593 #endif
1594
1595 extern int register_trigger_cmds(void);
1596 extern void clear_event_triggers(struct trace_array *tr);
1597
1598 struct event_trigger_data {
1599 unsigned long count;
1600 int ref;
1601 struct event_trigger_ops *ops;
1602 struct event_command *cmd_ops;
1603 struct event_filter __rcu *filter;
1604 char *filter_str;
1605 void *private_data;
1606 bool paused;
1607 bool paused_tmp;
1608 struct list_head list;
1609 char *name;
1610 struct list_head named_list;
1611 struct event_trigger_data *named_data;
1612 };
1613
1614 /* Avoid typos */
1615 #define ENABLE_EVENT_STR "enable_event"
1616 #define DISABLE_EVENT_STR "disable_event"
1617 #define ENABLE_HIST_STR "enable_hist"
1618 #define DISABLE_HIST_STR "disable_hist"
1619
1620 struct enable_trigger_data {
1621 struct trace_event_file *file;
1622 bool enable;
1623 bool hist;
1624 };
1625
1626 extern int event_enable_trigger_print(struct seq_file *m,
1627 struct event_trigger_ops *ops,
1628 struct event_trigger_data *data);
1629 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1630 struct event_trigger_data *data);
1631 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1632 struct trace_event_file *file,
1633 char *glob, char *cmd, char *param);
1634 extern int event_enable_register_trigger(char *glob,
1635 struct event_trigger_ops *ops,
1636 struct event_trigger_data *data,
1637 struct trace_event_file *file);
1638 extern void event_enable_unregister_trigger(char *glob,
1639 struct event_trigger_ops *ops,
1640 struct event_trigger_data *test,
1641 struct trace_event_file *file);
1642 extern void trigger_data_free(struct event_trigger_data *data);
1643 extern int event_trigger_init(struct event_trigger_ops *ops,
1644 struct event_trigger_data *data);
1645 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1646 int trigger_enable);
1647 extern void update_cond_flag(struct trace_event_file *file);
1648 extern int set_trigger_filter(char *filter_str,
1649 struct event_trigger_data *trigger_data,
1650 struct trace_event_file *file);
1651 extern struct event_trigger_data *find_named_trigger(const char *name);
1652 extern bool is_named_trigger(struct event_trigger_data *test);
1653 extern int save_named_trigger(const char *name,
1654 struct event_trigger_data *data);
1655 extern void del_named_trigger(struct event_trigger_data *data);
1656 extern void pause_named_trigger(struct event_trigger_data *data);
1657 extern void unpause_named_trigger(struct event_trigger_data *data);
1658 extern void set_named_trigger_data(struct event_trigger_data *data,
1659 struct event_trigger_data *named_data);
1660 extern struct event_trigger_data *
1661 get_named_trigger_data(struct event_trigger_data *data);
1662 extern int register_event_command(struct event_command *cmd);
1663 extern int unregister_event_command(struct event_command *cmd);
1664 extern int register_trigger_hist_enable_disable_cmds(void);
1665
1666 /**
1667 * struct event_trigger_ops - callbacks for trace event triggers
1668 *
1669 * The methods in this structure provide per-event trigger hooks for
1670 * various trigger operations.
1671 *
1672 * All the methods below, except for @init() and @free(), must be
1673 * implemented.
1674 *
1675 * @func: The trigger 'probe' function called when the triggering
1676 * event occurs. The data passed into this callback is the data
1677 * that was supplied to the event_command @reg() function that
1678 * registered the trigger (see struct event_command) along with
1679 * the trace record, rec.
1680 *
1681 * @init: An optional initialization function called for the trigger
1682 * when the trigger is registered (via the event_command reg()
1683 * function). This can be used to perform per-trigger
1684 * initialization such as incrementing a per-trigger reference
1685 * count, for instance. This is usually implemented by the
1686 * generic utility function @event_trigger_init() (see
1687 * trace_event_triggers.c).
1688 *
1689 * @free: An optional de-initialization function called for the
1690 * trigger when the trigger is unregistered (via the
1691 * event_command @reg() function). This can be used to perform
1692 * per-trigger de-initialization such as decrementing a
1693 * per-trigger reference count and freeing corresponding trigger
1694 * data, for instance. This is usually implemented by the
1695 * generic utility function @event_trigger_free() (see
1696 * trace_event_triggers.c).
1697 *
1698 * @print: The callback function invoked to have the trigger print
1699 * itself. This is usually implemented by a wrapper function
1700 * that calls the generic utility function @event_trigger_print()
1701 * (see trace_event_triggers.c).
1702 */
1703 struct event_trigger_ops {
1704 void (*func)(struct event_trigger_data *data,
1705 void *rec,
1706 struct ring_buffer_event *rbe);
1707 int (*init)(struct event_trigger_ops *ops,
1708 struct event_trigger_data *data);
1709 void (*free)(struct event_trigger_ops *ops,
1710 struct event_trigger_data *data);
1711 int (*print)(struct seq_file *m,
1712 struct event_trigger_ops *ops,
1713 struct event_trigger_data *data);
1714 };
1715
1716 /**
1717 * struct event_command - callbacks and data members for event commands
1718 *
1719 * Event commands are invoked by users by writing the command name
1720 * into the 'trigger' file associated with a trace event. The
1721 * parameters associated with a specific invocation of an event
1722 * command are used to create an event trigger instance, which is
1723 * added to the list of trigger instances associated with that trace
1724 * event. When the event is hit, the set of triggers associated with
1725 * that event is invoked.
1726 *
1727 * The data members in this structure provide per-event command data
1728 * for various event commands.
1729 *
1730 * All the data members below, except for @post_trigger, must be set
1731 * for each event command.
1732 *
1733 * @name: The unique name that identifies the event command. This is
1734 * the name used when setting triggers via trigger files.
1735 *
1736 * @trigger_type: A unique id that identifies the event command
1737 * 'type'. This value has two purposes, the first to ensure that
1738 * only one trigger of the same type can be set at a given time
1739 * for a particular event e.g. it doesn't make sense to have both
1740 * a traceon and traceoff trigger attached to a single event at
1741 * the same time, so traceon and traceoff have the same type
1742 * though they have different names. The @trigger_type value is
1743 * also used as a bit value for deferring the actual trigger
1744 * action until after the current event is finished. Some
1745 * commands need to do this if they themselves log to the trace
1746 * buffer (see the @post_trigger() member below). @trigger_type
1747 * values are defined by adding new values to the trigger_type
1748 * enum in include/linux/trace_events.h.
1749 *
1750 * @flags: See the enum event_command_flags below.
1751 *
1752 * All the methods below, except for @set_filter() and @unreg_all(),
1753 * must be implemented.
1754 *
1755 * @func: The callback function responsible for parsing and
1756 * registering the trigger written to the 'trigger' file by the
1757 * user. It allocates the trigger instance and registers it with
1758 * the appropriate trace event. It makes use of the other
1759 * event_command callback functions to orchestrate this, and is
1760 * usually implemented by the generic utility function
1761 * @event_trigger_callback() (see trace_event_triggers.c).
1762 *
1763 * @reg: Adds the trigger to the list of triggers associated with the
1764 * event, and enables the event trigger itself, after
1765 * initializing it (via the event_trigger_ops @init() function).
1766 * This is also where commands can use the @trigger_type value to
1767 * make the decision as to whether or not multiple instances of
1768 * the trigger should be allowed. This is usually implemented by
1769 * the generic utility function @register_trigger() (see
1770 * trace_event_triggers.c).
1771 *
1772 * @unreg: Removes the trigger from the list of triggers associated
1773 * with the event, and disables the event trigger itself, after
1774 * initializing it (via the event_trigger_ops @free() function).
1775 * This is usually implemented by the generic utility function
1776 * @unregister_trigger() (see trace_event_triggers.c).
1777 *
1778 * @unreg_all: An optional function called to remove all the triggers
1779 * from the list of triggers associated with the event. Called
1780 * when a trigger file is opened in truncate mode.
1781 *
1782 * @set_filter: An optional function called to parse and set a filter
1783 * for the trigger. If no @set_filter() method is set for the
1784 * event command, filters set by the user for the command will be
1785 * ignored. This is usually implemented by the generic utility
1786 * function @set_trigger_filter() (see trace_event_triggers.c).
1787 *
1788 * @get_trigger_ops: The callback function invoked to retrieve the
1789 * event_trigger_ops implementation associated with the command.
1790 */
1791 struct event_command {
1792 struct list_head list;
1793 char *name;
1794 enum event_trigger_type trigger_type;
1795 int flags;
1796 int (*func)(struct event_command *cmd_ops,
1797 struct trace_event_file *file,
1798 char *glob, char *cmd, char *params);
1799 int (*reg)(char *glob,
1800 struct event_trigger_ops *ops,
1801 struct event_trigger_data *data,
1802 struct trace_event_file *file);
1803 void (*unreg)(char *glob,
1804 struct event_trigger_ops *ops,
1805 struct event_trigger_data *data,
1806 struct trace_event_file *file);
1807 void (*unreg_all)(struct trace_event_file *file);
1808 int (*set_filter)(char *filter_str,
1809 struct event_trigger_data *data,
1810 struct trace_event_file *file);
1811 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1812 };
1813
1814 /**
1815 * enum event_command_flags - flags for struct event_command
1816 *
1817 * @POST_TRIGGER: A flag that says whether or not this command needs
1818 * to have its action delayed until after the current event has
1819 * been closed. Some triggers need to avoid being invoked while
1820 * an event is currently in the process of being logged, since
1821 * the trigger may itself log data into the trace buffer. Thus
1822 * we make sure the current event is committed before invoking
1823 * those triggers. To do that, the trigger invocation is split
1824 * in two - the first part checks the filter using the current
1825 * trace record; if a command has the @post_trigger flag set, it
1826 * sets a bit for itself in the return value, otherwise it
1827 * directly invokes the trigger. Once all commands have been
1828 * either invoked or set their return flag, the current record is
1829 * either committed or discarded. At that point, if any commands
1830 * have deferred their triggers, those commands are finally
1831 * invoked following the close of the current event. In other
1832 * words, if the event_trigger_ops @func() probe implementation
1833 * itself logs to the trace buffer, this flag should be set,
1834 * otherwise it can be left unspecified.
1835 *
1836 * @NEEDS_REC: A flag that says whether or not this command needs
1837 * access to the trace record in order to perform its function,
1838 * regardless of whether or not it has a filter associated with
1839 * it (filters make a trigger require access to the trace record
1840 * but are not always present).
1841 */
1842 enum event_command_flags {
1843 EVENT_CMD_FL_POST_TRIGGER = 1,
1844 EVENT_CMD_FL_NEEDS_REC = 2,
1845 };
1846
1847 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1848 {
1849 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1850 }
1851
1852 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1853 {
1854 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1855 }
1856
1857 extern int trace_event_enable_disable(struct trace_event_file *file,
1858 int enable, int soft_disable);
1859 extern int tracing_alloc_snapshot(void);
1860 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1861 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1862
1863 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1864 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1865
1866 extern const char *__start___trace_bprintk_fmt[];
1867 extern const char *__stop___trace_bprintk_fmt[];
1868
1869 extern const char *__start___tracepoint_str[];
1870 extern const char *__stop___tracepoint_str[];
1871
1872 void trace_printk_control(bool enabled);
1873 void trace_printk_init_buffers(void);
1874 void trace_printk_start_comm(void);
1875 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1876 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1877
1878 #define MAX_EVENT_NAME_LEN 64
1879
1880 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1881 extern ssize_t trace_parse_run_command(struct file *file,
1882 const char __user *buffer, size_t count, loff_t *ppos,
1883 int (*createfn)(int, char**));
1884
1885 extern unsigned int err_pos(char *cmd, const char *str);
1886 extern void tracing_log_err(struct trace_array *tr,
1887 const char *loc, const char *cmd,
1888 const char **errs, u8 type, u8 pos);
1889
1890 /*
1891 * Normal trace_printk() and friends allocates special buffers
1892 * to do the manipulation, as well as saves the print formats
1893 * into sections to display. But the trace infrastructure wants
1894 * to use these without the added overhead at the price of being
1895 * a bit slower (used mainly for warnings, where we don't care
1896 * about performance). The internal_trace_puts() is for such
1897 * a purpose.
1898 */
1899 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1900
1901 #undef FTRACE_ENTRY
1902 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1903 extern struct trace_event_call \
1904 __aligned(4) event_##call;
1905 #undef FTRACE_ENTRY_DUP
1906 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1907 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1908 filter)
1909 #undef FTRACE_ENTRY_PACKED
1910 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1911 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1912 filter)
1913
1914 #include "trace_entries.h"
1915
1916 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1917 int perf_ftrace_event_register(struct trace_event_call *call,
1918 enum trace_reg type, void *data);
1919 #else
1920 #define perf_ftrace_event_register NULL
1921 #endif
1922
1923 #ifdef CONFIG_FTRACE_SYSCALLS
1924 void init_ftrace_syscalls(void);
1925 const char *get_syscall_name(int syscall);
1926 #else
1927 static inline void init_ftrace_syscalls(void) { }
1928 static inline const char *get_syscall_name(int syscall)
1929 {
1930 return NULL;
1931 }
1932 #endif
1933
1934 #ifdef CONFIG_EVENT_TRACING
1935 void trace_event_init(void);
1936 void trace_event_eval_update(struct trace_eval_map **map, int len);
1937 #else
1938 static inline void __init trace_event_init(void) { }
1939 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1940 #endif
1941
1942 #ifdef CONFIG_TRACER_SNAPSHOT
1943 void tracing_snapshot_instance(struct trace_array *tr);
1944 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1945 #else
1946 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1947 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1948 {
1949 return 0;
1950 }
1951 #endif
1952
1953 #ifdef CONFIG_PREEMPT_TRACER
1954 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1955 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1956 #else
1957 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1958 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1959 #endif
1960 #ifdef CONFIG_IRQSOFF_TRACER
1961 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1962 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1963 #else
1964 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1965 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1966 #endif
1967
1968 extern struct trace_iterator *tracepoint_print_iter;
1969
1970 /*
1971 * Reset the state of the trace_iterator so that it can read consumed data.
1972 * Normally, the trace_iterator is used for reading the data when it is not
1973 * consumed, and must retain state.
1974 */
1975 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1976 {
1977 const size_t offset = offsetof(struct trace_iterator, seq);
1978
1979 /*
1980 * Keep gcc from complaining about overwriting more than just one
1981 * member in the structure.
1982 */
1983 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
1984
1985 iter->pos = -1;
1986 }
1987
1988 #endif /* _LINUX_KERNEL_TRACE_H */