]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/trace/trace.h
Merge tag 'stable/for-linus-3.16-rc7-tag' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace.h
1
2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
4
5 #include <linux/fs.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/ftrace_event.h>
16 #include <linux/compiler.h>
17
18 #ifdef CONFIG_FTRACE_SYSCALLS
19 #include <asm/unistd.h> /* For NR_SYSCALLS */
20 #include <asm/syscall.h> /* some archs define it here */
21 #endif
22
23 enum trace_type {
24 __TRACE_FIRST_TYPE = 0,
25
26 TRACE_FN,
27 TRACE_CTX,
28 TRACE_WAKE,
29 TRACE_STACK,
30 TRACE_PRINT,
31 TRACE_BPRINT,
32 TRACE_MMIO_RW,
33 TRACE_MMIO_MAP,
34 TRACE_BRANCH,
35 TRACE_GRAPH_RET,
36 TRACE_GRAPH_ENT,
37 TRACE_USER_STACK,
38 TRACE_BLK,
39 TRACE_BPUTS,
40
41 __TRACE_LAST_TYPE,
42 };
43
44
45 #undef __field
46 #define __field(type, item) type item;
47
48 #undef __field_struct
49 #define __field_struct(type, item) __field(type, item)
50
51 #undef __field_desc
52 #define __field_desc(type, container, item)
53
54 #undef __array
55 #define __array(type, item, size) type item[size];
56
57 #undef __array_desc
58 #define __array_desc(type, container, item, size)
59
60 #undef __dynamic_array
61 #define __dynamic_array(type, item) type item[];
62
63 #undef F_STRUCT
64 #define F_STRUCT(args...) args
65
66 #undef FTRACE_ENTRY
67 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
68 struct struct_name { \
69 struct trace_entry ent; \
70 tstruct \
71 }
72
73 #undef TP_ARGS
74 #define TP_ARGS(args...) args
75
76 #undef FTRACE_ENTRY_DUP
77 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
78
79 #undef FTRACE_ENTRY_REG
80 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
81 filter, regfn) \
82 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
83 filter)
84
85 #include "trace_entries.h"
86
87 /*
88 * syscalls are special, and need special handling, this is why
89 * they are not included in trace_entries.h
90 */
91 struct syscall_trace_enter {
92 struct trace_entry ent;
93 int nr;
94 unsigned long args[];
95 };
96
97 struct syscall_trace_exit {
98 struct trace_entry ent;
99 int nr;
100 long ret;
101 };
102
103 struct kprobe_trace_entry_head {
104 struct trace_entry ent;
105 unsigned long ip;
106 };
107
108 struct kretprobe_trace_entry_head {
109 struct trace_entry ent;
110 unsigned long func;
111 unsigned long ret_ip;
112 };
113
114 /*
115 * trace_flag_type is an enumeration that holds different
116 * states when a trace occurs. These are:
117 * IRQS_OFF - interrupts were disabled
118 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
119 * NEED_RESCHED - reschedule is requested
120 * HARDIRQ - inside an interrupt handler
121 * SOFTIRQ - inside a softirq handler
122 */
123 enum trace_flag_type {
124 TRACE_FLAG_IRQS_OFF = 0x01,
125 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
126 TRACE_FLAG_NEED_RESCHED = 0x04,
127 TRACE_FLAG_HARDIRQ = 0x08,
128 TRACE_FLAG_SOFTIRQ = 0x10,
129 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
130 };
131
132 #define TRACE_BUF_SIZE 1024
133
134 struct trace_array;
135
136 /*
137 * The CPU trace array - it consists of thousands of trace entries
138 * plus some other descriptor data: (for example which task started
139 * the trace, etc.)
140 */
141 struct trace_array_cpu {
142 atomic_t disabled;
143 void *buffer_page; /* ring buffer spare */
144
145 unsigned long entries;
146 unsigned long saved_latency;
147 unsigned long critical_start;
148 unsigned long critical_end;
149 unsigned long critical_sequence;
150 unsigned long nice;
151 unsigned long policy;
152 unsigned long rt_priority;
153 unsigned long skipped_entries;
154 cycle_t preempt_timestamp;
155 pid_t pid;
156 kuid_t uid;
157 char comm[TASK_COMM_LEN];
158 };
159
160 struct tracer;
161
162 struct trace_buffer {
163 struct trace_array *tr;
164 struct ring_buffer *buffer;
165 struct trace_array_cpu __percpu *data;
166 cycle_t time_start;
167 int cpu;
168 };
169
170 /*
171 * The trace array - an array of per-CPU trace arrays. This is the
172 * highest level data structure that individual tracers deal with.
173 * They have on/off state as well:
174 */
175 struct trace_array {
176 struct list_head list;
177 char *name;
178 struct trace_buffer trace_buffer;
179 #ifdef CONFIG_TRACER_MAX_TRACE
180 /*
181 * The max_buffer is used to snapshot the trace when a maximum
182 * latency is reached, or when the user initiates a snapshot.
183 * Some tracers will use this to store a maximum trace while
184 * it continues examining live traces.
185 *
186 * The buffers for the max_buffer are set up the same as the trace_buffer
187 * When a snapshot is taken, the buffer of the max_buffer is swapped
188 * with the buffer of the trace_buffer and the buffers are reset for
189 * the trace_buffer so the tracing can continue.
190 */
191 struct trace_buffer max_buffer;
192 bool allocated_snapshot;
193 unsigned long max_latency;
194 #endif
195 /*
196 * max_lock is used to protect the swapping of buffers
197 * when taking a max snapshot. The buffers themselves are
198 * protected by per_cpu spinlocks. But the action of the swap
199 * needs its own lock.
200 *
201 * This is defined as a arch_spinlock_t in order to help
202 * with performance when lockdep debugging is enabled.
203 *
204 * It is also used in other places outside the update_max_tr
205 * so it needs to be defined outside of the
206 * CONFIG_TRACER_MAX_TRACE.
207 */
208 arch_spinlock_t max_lock;
209 int buffer_disabled;
210 #ifdef CONFIG_FTRACE_SYSCALLS
211 int sys_refcount_enter;
212 int sys_refcount_exit;
213 struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
214 struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
215 #endif
216 int stop_count;
217 int clock_id;
218 struct tracer *current_trace;
219 unsigned int flags;
220 raw_spinlock_t start_lock;
221 struct dentry *dir;
222 struct dentry *options;
223 struct dentry *percpu_dir;
224 struct dentry *event_dir;
225 struct list_head systems;
226 struct list_head events;
227 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
228 int ref;
229 #ifdef CONFIG_FUNCTION_TRACER
230 struct ftrace_ops *ops;
231 /* function tracing enabled */
232 int function_enabled;
233 #endif
234 };
235
236 enum {
237 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
238 };
239
240 extern struct list_head ftrace_trace_arrays;
241
242 extern struct mutex trace_types_lock;
243
244 extern int trace_array_get(struct trace_array *tr);
245 extern void trace_array_put(struct trace_array *tr);
246
247 /*
248 * The global tracer (top) should be the first trace array added,
249 * but we check the flag anyway.
250 */
251 static inline struct trace_array *top_trace_array(void)
252 {
253 struct trace_array *tr;
254
255 if (list_empty(&ftrace_trace_arrays))
256 return NULL;
257
258 tr = list_entry(ftrace_trace_arrays.prev,
259 typeof(*tr), list);
260 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
261 return tr;
262 }
263
264 #define FTRACE_CMP_TYPE(var, type) \
265 __builtin_types_compatible_p(typeof(var), type *)
266
267 #undef IF_ASSIGN
268 #define IF_ASSIGN(var, entry, etype, id) \
269 if (FTRACE_CMP_TYPE(var, etype)) { \
270 var = (typeof(var))(entry); \
271 WARN_ON(id && (entry)->type != id); \
272 break; \
273 }
274
275 /* Will cause compile errors if type is not found. */
276 extern void __ftrace_bad_type(void);
277
278 /*
279 * The trace_assign_type is a verifier that the entry type is
280 * the same as the type being assigned. To add new types simply
281 * add a line with the following format:
282 *
283 * IF_ASSIGN(var, ent, type, id);
284 *
285 * Where "type" is the trace type that includes the trace_entry
286 * as the "ent" item. And "id" is the trace identifier that is
287 * used in the trace_type enum.
288 *
289 * If the type can have more than one id, then use zero.
290 */
291 #define trace_assign_type(var, ent) \
292 do { \
293 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
294 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
295 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
296 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
297 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
298 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
299 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
300 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
301 TRACE_MMIO_RW); \
302 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
303 TRACE_MMIO_MAP); \
304 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
305 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
306 TRACE_GRAPH_ENT); \
307 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
308 TRACE_GRAPH_RET); \
309 __ftrace_bad_type(); \
310 } while (0)
311
312 /*
313 * An option specific to a tracer. This is a boolean value.
314 * The bit is the bit index that sets its value on the
315 * flags value in struct tracer_flags.
316 */
317 struct tracer_opt {
318 const char *name; /* Will appear on the trace_options file */
319 u32 bit; /* Mask assigned in val field in tracer_flags */
320 };
321
322 /*
323 * The set of specific options for a tracer. Your tracer
324 * have to set the initial value of the flags val.
325 */
326 struct tracer_flags {
327 u32 val;
328 struct tracer_opt *opts;
329 };
330
331 /* Makes more easy to define a tracer opt */
332 #define TRACER_OPT(s, b) .name = #s, .bit = b
333
334
335 /**
336 * struct tracer - a specific tracer and its callbacks to interact with debugfs
337 * @name: the name chosen to select it on the available_tracers file
338 * @init: called when one switches to this tracer (echo name > current_tracer)
339 * @reset: called when one switches to another tracer
340 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
341 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
342 * @open: called when the trace file is opened
343 * @pipe_open: called when the trace_pipe file is opened
344 * @close: called when the trace file is released
345 * @pipe_close: called when the trace_pipe file is released
346 * @read: override the default read callback on trace_pipe
347 * @splice_read: override the default splice_read callback on trace_pipe
348 * @selftest: selftest to run on boot (see trace_selftest.c)
349 * @print_headers: override the first lines that describe your columns
350 * @print_line: callback that prints a trace
351 * @set_flag: signals one of your private flags changed (trace_options file)
352 * @flags: your private flags
353 */
354 struct tracer {
355 const char *name;
356 int (*init)(struct trace_array *tr);
357 void (*reset)(struct trace_array *tr);
358 void (*start)(struct trace_array *tr);
359 void (*stop)(struct trace_array *tr);
360 void (*open)(struct trace_iterator *iter);
361 void (*pipe_open)(struct trace_iterator *iter);
362 void (*close)(struct trace_iterator *iter);
363 void (*pipe_close)(struct trace_iterator *iter);
364 ssize_t (*read)(struct trace_iterator *iter,
365 struct file *filp, char __user *ubuf,
366 size_t cnt, loff_t *ppos);
367 ssize_t (*splice_read)(struct trace_iterator *iter,
368 struct file *filp,
369 loff_t *ppos,
370 struct pipe_inode_info *pipe,
371 size_t len,
372 unsigned int flags);
373 #ifdef CONFIG_FTRACE_STARTUP_TEST
374 int (*selftest)(struct tracer *trace,
375 struct trace_array *tr);
376 #endif
377 void (*print_header)(struct seq_file *m);
378 enum print_line_t (*print_line)(struct trace_iterator *iter);
379 /* If you handled the flag setting, return 0 */
380 int (*set_flag)(struct trace_array *tr,
381 u32 old_flags, u32 bit, int set);
382 /* Return 0 if OK with change, else return non-zero */
383 int (*flag_changed)(struct trace_array *tr,
384 u32 mask, int set);
385 struct tracer *next;
386 struct tracer_flags *flags;
387 int enabled;
388 bool print_max;
389 bool allow_instances;
390 #ifdef CONFIG_TRACER_MAX_TRACE
391 bool use_max_tr;
392 #endif
393 };
394
395
396 /* Only current can touch trace_recursion */
397
398 /*
399 * For function tracing recursion:
400 * The order of these bits are important.
401 *
402 * When function tracing occurs, the following steps are made:
403 * If arch does not support a ftrace feature:
404 * call internal function (uses INTERNAL bits) which calls...
405 * If callback is registered to the "global" list, the list
406 * function is called and recursion checks the GLOBAL bits.
407 * then this function calls...
408 * The function callback, which can use the FTRACE bits to
409 * check for recursion.
410 *
411 * Now if the arch does not suppport a feature, and it calls
412 * the global list function which calls the ftrace callback
413 * all three of these steps will do a recursion protection.
414 * There's no reason to do one if the previous caller already
415 * did. The recursion that we are protecting against will
416 * go through the same steps again.
417 *
418 * To prevent the multiple recursion checks, if a recursion
419 * bit is set that is higher than the MAX bit of the current
420 * check, then we know that the check was made by the previous
421 * caller, and we can skip the current check.
422 */
423 enum {
424 TRACE_BUFFER_BIT,
425 TRACE_BUFFER_NMI_BIT,
426 TRACE_BUFFER_IRQ_BIT,
427 TRACE_BUFFER_SIRQ_BIT,
428
429 /* Start of function recursion bits */
430 TRACE_FTRACE_BIT,
431 TRACE_FTRACE_NMI_BIT,
432 TRACE_FTRACE_IRQ_BIT,
433 TRACE_FTRACE_SIRQ_BIT,
434
435 /* INTERNAL_BITs must be greater than FTRACE_BITs */
436 TRACE_INTERNAL_BIT,
437 TRACE_INTERNAL_NMI_BIT,
438 TRACE_INTERNAL_IRQ_BIT,
439 TRACE_INTERNAL_SIRQ_BIT,
440
441 TRACE_CONTROL_BIT,
442
443 /*
444 * Abuse of the trace_recursion.
445 * As we need a way to maintain state if we are tracing the function
446 * graph in irq because we want to trace a particular function that
447 * was called in irq context but we have irq tracing off. Since this
448 * can only be modified by current, we can reuse trace_recursion.
449 */
450 TRACE_IRQ_BIT,
451 };
452
453 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
454 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
455 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
456
457 #define TRACE_CONTEXT_BITS 4
458
459 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
460 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
461
462 #define TRACE_LIST_START TRACE_INTERNAL_BIT
463 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
464
465 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
466
467 static __always_inline int trace_get_context_bit(void)
468 {
469 int bit;
470
471 if (in_interrupt()) {
472 if (in_nmi())
473 bit = 0;
474
475 else if (in_irq())
476 bit = 1;
477 else
478 bit = 2;
479 } else
480 bit = 3;
481
482 return bit;
483 }
484
485 static __always_inline int trace_test_and_set_recursion(int start, int max)
486 {
487 unsigned int val = current->trace_recursion;
488 int bit;
489
490 /* A previous recursion check was made */
491 if ((val & TRACE_CONTEXT_MASK) > max)
492 return 0;
493
494 bit = trace_get_context_bit() + start;
495 if (unlikely(val & (1 << bit)))
496 return -1;
497
498 val |= 1 << bit;
499 current->trace_recursion = val;
500 barrier();
501
502 return bit;
503 }
504
505 static __always_inline void trace_clear_recursion(int bit)
506 {
507 unsigned int val = current->trace_recursion;
508
509 if (!bit)
510 return;
511
512 bit = 1 << bit;
513 val &= ~bit;
514
515 barrier();
516 current->trace_recursion = val;
517 }
518
519 static inline struct ring_buffer_iter *
520 trace_buffer_iter(struct trace_iterator *iter, int cpu)
521 {
522 if (iter->buffer_iter && iter->buffer_iter[cpu])
523 return iter->buffer_iter[cpu];
524 return NULL;
525 }
526
527 int tracer_init(struct tracer *t, struct trace_array *tr);
528 int tracing_is_enabled(void);
529 void tracing_reset(struct trace_buffer *buf, int cpu);
530 void tracing_reset_online_cpus(struct trace_buffer *buf);
531 void tracing_reset_current(int cpu);
532 void tracing_reset_all_online_cpus(void);
533 int tracing_open_generic(struct inode *inode, struct file *filp);
534 bool tracing_is_disabled(void);
535 struct dentry *trace_create_file(const char *name,
536 umode_t mode,
537 struct dentry *parent,
538 void *data,
539 const struct file_operations *fops);
540
541 struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
542 struct dentry *tracing_init_dentry(void);
543
544 struct ring_buffer_event;
545
546 struct ring_buffer_event *
547 trace_buffer_lock_reserve(struct ring_buffer *buffer,
548 int type,
549 unsigned long len,
550 unsigned long flags,
551 int pc);
552
553 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
554 struct trace_array_cpu *data);
555
556 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
557 int *ent_cpu, u64 *ent_ts);
558
559 void __buffer_unlock_commit(struct ring_buffer *buffer,
560 struct ring_buffer_event *event);
561
562 int trace_empty(struct trace_iterator *iter);
563
564 void *trace_find_next_entry_inc(struct trace_iterator *iter);
565
566 void trace_init_global_iter(struct trace_iterator *iter);
567
568 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
569
570 void tracing_sched_switch_trace(struct trace_array *tr,
571 struct task_struct *prev,
572 struct task_struct *next,
573 unsigned long flags, int pc);
574
575 void tracing_sched_wakeup_trace(struct trace_array *tr,
576 struct task_struct *wakee,
577 struct task_struct *cur,
578 unsigned long flags, int pc);
579 void trace_function(struct trace_array *tr,
580 unsigned long ip,
581 unsigned long parent_ip,
582 unsigned long flags, int pc);
583 void trace_graph_function(struct trace_array *tr,
584 unsigned long ip,
585 unsigned long parent_ip,
586 unsigned long flags, int pc);
587 void trace_latency_header(struct seq_file *m);
588 void trace_default_header(struct seq_file *m);
589 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
590 int trace_empty(struct trace_iterator *iter);
591
592 void trace_graph_return(struct ftrace_graph_ret *trace);
593 int trace_graph_entry(struct ftrace_graph_ent *trace);
594 void set_graph_array(struct trace_array *tr);
595
596 void tracing_start_cmdline_record(void);
597 void tracing_stop_cmdline_record(void);
598 void tracing_sched_switch_assign_trace(struct trace_array *tr);
599 void tracing_stop_sched_switch_record(void);
600 void tracing_start_sched_switch_record(void);
601 int register_tracer(struct tracer *type);
602 int is_tracing_stopped(void);
603
604 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
605
606 extern cpumask_var_t __read_mostly tracing_buffer_mask;
607
608 #define for_each_tracing_cpu(cpu) \
609 for_each_cpu(cpu, tracing_buffer_mask)
610
611 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
612
613 extern unsigned long tracing_thresh;
614
615 #ifdef CONFIG_TRACER_MAX_TRACE
616 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
617 void update_max_tr_single(struct trace_array *tr,
618 struct task_struct *tsk, int cpu);
619 #endif /* CONFIG_TRACER_MAX_TRACE */
620
621 #ifdef CONFIG_STACKTRACE
622 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
623 int skip, int pc);
624
625 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
626 int skip, int pc, struct pt_regs *regs);
627
628 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
629 int pc);
630
631 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
632 int pc);
633 #else
634 static inline void ftrace_trace_stack(struct ring_buffer *buffer,
635 unsigned long flags, int skip, int pc)
636 {
637 }
638
639 static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
640 unsigned long flags, int skip,
641 int pc, struct pt_regs *regs)
642 {
643 }
644
645 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
646 unsigned long flags, int pc)
647 {
648 }
649
650 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
651 int skip, int pc)
652 {
653 }
654 #endif /* CONFIG_STACKTRACE */
655
656 extern cycle_t ftrace_now(int cpu);
657
658 extern void trace_find_cmdline(int pid, char comm[]);
659
660 #ifdef CONFIG_DYNAMIC_FTRACE
661 extern unsigned long ftrace_update_tot_cnt;
662 #endif
663 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
664 extern int DYN_FTRACE_TEST_NAME(void);
665 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
666 extern int DYN_FTRACE_TEST_NAME2(void);
667
668 extern bool ring_buffer_expanded;
669 extern bool tracing_selftest_disabled;
670 DECLARE_PER_CPU(int, ftrace_cpu_disabled);
671
672 #ifdef CONFIG_FTRACE_STARTUP_TEST
673 extern int trace_selftest_startup_function(struct tracer *trace,
674 struct trace_array *tr);
675 extern int trace_selftest_startup_function_graph(struct tracer *trace,
676 struct trace_array *tr);
677 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
678 struct trace_array *tr);
679 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
680 struct trace_array *tr);
681 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
682 struct trace_array *tr);
683 extern int trace_selftest_startup_wakeup(struct tracer *trace,
684 struct trace_array *tr);
685 extern int trace_selftest_startup_nop(struct tracer *trace,
686 struct trace_array *tr);
687 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
688 struct trace_array *tr);
689 extern int trace_selftest_startup_branch(struct tracer *trace,
690 struct trace_array *tr);
691 /*
692 * Tracer data references selftest functions that only occur
693 * on boot up. These can be __init functions. Thus, when selftests
694 * are enabled, then the tracers need to reference __init functions.
695 */
696 #define __tracer_data __refdata
697 #else
698 /* Tracers are seldom changed. Optimize when selftests are disabled. */
699 #define __tracer_data __read_mostly
700 #endif /* CONFIG_FTRACE_STARTUP_TEST */
701
702 extern void *head_page(struct trace_array_cpu *data);
703 extern unsigned long long ns2usecs(cycle_t nsec);
704 extern int
705 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
706 extern int
707 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
708 extern int
709 trace_array_vprintk(struct trace_array *tr,
710 unsigned long ip, const char *fmt, va_list args);
711 int trace_array_printk(struct trace_array *tr,
712 unsigned long ip, const char *fmt, ...);
713 int trace_array_printk_buf(struct ring_buffer *buffer,
714 unsigned long ip, const char *fmt, ...);
715 void trace_printk_seq(struct trace_seq *s);
716 enum print_line_t print_trace_line(struct trace_iterator *iter);
717
718 extern unsigned long trace_flags;
719
720 /* Standard output formatting function used for function return traces */
721 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
722
723 /* Flag options */
724 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
725 #define TRACE_GRAPH_PRINT_CPU 0x2
726 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
727 #define TRACE_GRAPH_PRINT_PROC 0x8
728 #define TRACE_GRAPH_PRINT_DURATION 0x10
729 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
730 #define TRACE_GRAPH_PRINT_IRQS 0x40
731 #define TRACE_GRAPH_PRINT_TAIL 0x80
732 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
733 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
734
735 extern enum print_line_t
736 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
737 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
738 extern enum print_line_t
739 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
740 extern void graph_trace_open(struct trace_iterator *iter);
741 extern void graph_trace_close(struct trace_iterator *iter);
742 extern int __trace_graph_entry(struct trace_array *tr,
743 struct ftrace_graph_ent *trace,
744 unsigned long flags, int pc);
745 extern void __trace_graph_return(struct trace_array *tr,
746 struct ftrace_graph_ret *trace,
747 unsigned long flags, int pc);
748
749
750 #ifdef CONFIG_DYNAMIC_FTRACE
751 /* TODO: make this variable */
752 #define FTRACE_GRAPH_MAX_FUNCS 32
753 extern int ftrace_graph_count;
754 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
755 extern int ftrace_graph_notrace_count;
756 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
757
758 static inline int ftrace_graph_addr(unsigned long addr)
759 {
760 int i;
761
762 if (!ftrace_graph_count)
763 return 1;
764
765 for (i = 0; i < ftrace_graph_count; i++) {
766 if (addr == ftrace_graph_funcs[i]) {
767 /*
768 * If no irqs are to be traced, but a set_graph_function
769 * is set, and called by an interrupt handler, we still
770 * want to trace it.
771 */
772 if (in_irq())
773 trace_recursion_set(TRACE_IRQ_BIT);
774 else
775 trace_recursion_clear(TRACE_IRQ_BIT);
776 return 1;
777 }
778 }
779
780 return 0;
781 }
782
783 static inline int ftrace_graph_notrace_addr(unsigned long addr)
784 {
785 int i;
786
787 if (!ftrace_graph_notrace_count)
788 return 0;
789
790 for (i = 0; i < ftrace_graph_notrace_count; i++) {
791 if (addr == ftrace_graph_notrace_funcs[i])
792 return 1;
793 }
794
795 return 0;
796 }
797 #else
798 static inline int ftrace_graph_addr(unsigned long addr)
799 {
800 return 1;
801 }
802
803 static inline int ftrace_graph_notrace_addr(unsigned long addr)
804 {
805 return 0;
806 }
807 #endif /* CONFIG_DYNAMIC_FTRACE */
808 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
809 static inline enum print_line_t
810 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
811 {
812 return TRACE_TYPE_UNHANDLED;
813 }
814 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
815
816 extern struct list_head ftrace_pids;
817
818 #ifdef CONFIG_FUNCTION_TRACER
819 extern bool ftrace_filter_param __initdata;
820 static inline int ftrace_trace_task(struct task_struct *task)
821 {
822 if (list_empty(&ftrace_pids))
823 return 1;
824
825 return test_tsk_trace_trace(task);
826 }
827 extern int ftrace_is_dead(void);
828 int ftrace_create_function_files(struct trace_array *tr,
829 struct dentry *parent);
830 void ftrace_destroy_function_files(struct trace_array *tr);
831 void ftrace_init_global_array_ops(struct trace_array *tr);
832 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
833 void ftrace_reset_array_ops(struct trace_array *tr);
834 int using_ftrace_ops_list_func(void);
835 #else
836 static inline int ftrace_trace_task(struct task_struct *task)
837 {
838 return 1;
839 }
840 static inline int ftrace_is_dead(void) { return 0; }
841 static inline int
842 ftrace_create_function_files(struct trace_array *tr,
843 struct dentry *parent)
844 {
845 return 0;
846 }
847 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
848 static inline __init void
849 ftrace_init_global_array_ops(struct trace_array *tr) { }
850 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
851 /* ftace_func_t type is not defined, use macro instead of static inline */
852 #define ftrace_init_array_ops(tr, func) do { } while (0)
853 #endif /* CONFIG_FUNCTION_TRACER */
854
855 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
856 void ftrace_create_filter_files(struct ftrace_ops *ops,
857 struct dentry *parent);
858 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
859 #else
860 /*
861 * The ops parameter passed in is usually undefined.
862 * This must be a macro.
863 */
864 #define ftrace_create_filter_files(ops, parent) do { } while (0)
865 #define ftrace_destroy_filter_files(ops) do { } while (0)
866 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
867
868 int ftrace_event_is_function(struct ftrace_event_call *call);
869
870 /*
871 * struct trace_parser - servers for reading the user input separated by spaces
872 * @cont: set if the input is not complete - no final space char was found
873 * @buffer: holds the parsed user input
874 * @idx: user input length
875 * @size: buffer size
876 */
877 struct trace_parser {
878 bool cont;
879 char *buffer;
880 unsigned idx;
881 unsigned size;
882 };
883
884 static inline bool trace_parser_loaded(struct trace_parser *parser)
885 {
886 return (parser->idx != 0);
887 }
888
889 static inline bool trace_parser_cont(struct trace_parser *parser)
890 {
891 return parser->cont;
892 }
893
894 static inline void trace_parser_clear(struct trace_parser *parser)
895 {
896 parser->cont = false;
897 parser->idx = 0;
898 }
899
900 extern int trace_parser_get_init(struct trace_parser *parser, int size);
901 extern void trace_parser_put(struct trace_parser *parser);
902 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
903 size_t cnt, loff_t *ppos);
904
905 /*
906 * trace_iterator_flags is an enumeration that defines bit
907 * positions into trace_flags that controls the output.
908 *
909 * NOTE: These bits must match the trace_options array in
910 * trace.c.
911 */
912 enum trace_iterator_flags {
913 TRACE_ITER_PRINT_PARENT = 0x01,
914 TRACE_ITER_SYM_OFFSET = 0x02,
915 TRACE_ITER_SYM_ADDR = 0x04,
916 TRACE_ITER_VERBOSE = 0x08,
917 TRACE_ITER_RAW = 0x10,
918 TRACE_ITER_HEX = 0x20,
919 TRACE_ITER_BIN = 0x40,
920 TRACE_ITER_BLOCK = 0x80,
921 TRACE_ITER_STACKTRACE = 0x100,
922 TRACE_ITER_PRINTK = 0x200,
923 TRACE_ITER_PREEMPTONLY = 0x400,
924 TRACE_ITER_BRANCH = 0x800,
925 TRACE_ITER_ANNOTATE = 0x1000,
926 TRACE_ITER_USERSTACKTRACE = 0x2000,
927 TRACE_ITER_SYM_USEROBJ = 0x4000,
928 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
929 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
930 TRACE_ITER_LATENCY_FMT = 0x20000,
931 TRACE_ITER_SLEEP_TIME = 0x40000,
932 TRACE_ITER_GRAPH_TIME = 0x80000,
933 TRACE_ITER_RECORD_CMD = 0x100000,
934 TRACE_ITER_OVERWRITE = 0x200000,
935 TRACE_ITER_STOP_ON_FREE = 0x400000,
936 TRACE_ITER_IRQ_INFO = 0x800000,
937 TRACE_ITER_MARKERS = 0x1000000,
938 TRACE_ITER_FUNCTION = 0x2000000,
939 };
940
941 /*
942 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
943 * control the output of kernel symbols.
944 */
945 #define TRACE_ITER_SYM_MASK \
946 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
947
948 extern struct tracer nop_trace;
949
950 #ifdef CONFIG_BRANCH_TRACER
951 extern int enable_branch_tracing(struct trace_array *tr);
952 extern void disable_branch_tracing(void);
953 static inline int trace_branch_enable(struct trace_array *tr)
954 {
955 if (trace_flags & TRACE_ITER_BRANCH)
956 return enable_branch_tracing(tr);
957 return 0;
958 }
959 static inline void trace_branch_disable(void)
960 {
961 /* due to races, always disable */
962 disable_branch_tracing();
963 }
964 #else
965 static inline int trace_branch_enable(struct trace_array *tr)
966 {
967 return 0;
968 }
969 static inline void trace_branch_disable(void)
970 {
971 }
972 #endif /* CONFIG_BRANCH_TRACER */
973
974 /* set ring buffers to default size if not already done so */
975 int tracing_update_buffers(void);
976
977 struct ftrace_event_field {
978 struct list_head link;
979 const char *name;
980 const char *type;
981 int filter_type;
982 int offset;
983 int size;
984 int is_signed;
985 };
986
987 struct event_filter {
988 int n_preds; /* Number assigned */
989 int a_preds; /* allocated */
990 struct filter_pred *preds;
991 struct filter_pred *root;
992 char *filter_string;
993 };
994
995 struct event_subsystem {
996 struct list_head list;
997 const char *name;
998 struct event_filter *filter;
999 int ref_count;
1000 };
1001
1002 struct ftrace_subsystem_dir {
1003 struct list_head list;
1004 struct event_subsystem *subsystem;
1005 struct trace_array *tr;
1006 struct dentry *entry;
1007 int ref_count;
1008 int nr_events;
1009 };
1010
1011 #define FILTER_PRED_INVALID ((unsigned short)-1)
1012 #define FILTER_PRED_IS_RIGHT (1 << 15)
1013 #define FILTER_PRED_FOLD (1 << 15)
1014
1015 /*
1016 * The max preds is the size of unsigned short with
1017 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1018 * and FOLD flags. The other is reserved.
1019 *
1020 * 2^14 preds is way more than enough.
1021 */
1022 #define MAX_FILTER_PRED 16384
1023
1024 struct filter_pred;
1025 struct regex;
1026
1027 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1028
1029 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1030
1031 enum regex_type {
1032 MATCH_FULL = 0,
1033 MATCH_FRONT_ONLY,
1034 MATCH_MIDDLE_ONLY,
1035 MATCH_END_ONLY,
1036 };
1037
1038 struct regex {
1039 char pattern[MAX_FILTER_STR_VAL];
1040 int len;
1041 int field_len;
1042 regex_match_func match;
1043 };
1044
1045 struct filter_pred {
1046 filter_pred_fn_t fn;
1047 u64 val;
1048 struct regex regex;
1049 unsigned short *ops;
1050 struct ftrace_event_field *field;
1051 int offset;
1052 int not;
1053 int op;
1054 unsigned short index;
1055 unsigned short parent;
1056 unsigned short left;
1057 unsigned short right;
1058 };
1059
1060 extern enum regex_type
1061 filter_parse_regex(char *buff, int len, char **search, int *not);
1062 extern void print_event_filter(struct ftrace_event_file *file,
1063 struct trace_seq *s);
1064 extern int apply_event_filter(struct ftrace_event_file *file,
1065 char *filter_string);
1066 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1067 char *filter_string);
1068 extern void print_subsystem_event_filter(struct event_subsystem *system,
1069 struct trace_seq *s);
1070 extern int filter_assign_type(const char *type);
1071 extern int create_event_filter(struct ftrace_event_call *call,
1072 char *filter_str, bool set_str,
1073 struct event_filter **filterp);
1074 extern void free_event_filter(struct event_filter *filter);
1075
1076 struct ftrace_event_field *
1077 trace_find_event_field(struct ftrace_event_call *call, char *name);
1078
1079 extern void trace_event_enable_cmd_record(bool enable);
1080 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1081 extern int event_trace_del_tracer(struct trace_array *tr);
1082
1083 extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
1084 const char *system,
1085 const char *event);
1086
1087 static inline void *event_file_data(struct file *filp)
1088 {
1089 return ACCESS_ONCE(file_inode(filp)->i_private);
1090 }
1091
1092 extern struct mutex event_mutex;
1093 extern struct list_head ftrace_events;
1094
1095 extern const struct file_operations event_trigger_fops;
1096
1097 extern int register_trigger_cmds(void);
1098 extern void clear_event_triggers(struct trace_array *tr);
1099
1100 struct event_trigger_data {
1101 unsigned long count;
1102 int ref;
1103 struct event_trigger_ops *ops;
1104 struct event_command *cmd_ops;
1105 struct event_filter __rcu *filter;
1106 char *filter_str;
1107 void *private_data;
1108 struct list_head list;
1109 };
1110
1111 /**
1112 * struct event_trigger_ops - callbacks for trace event triggers
1113 *
1114 * The methods in this structure provide per-event trigger hooks for
1115 * various trigger operations.
1116 *
1117 * All the methods below, except for @init() and @free(), must be
1118 * implemented.
1119 *
1120 * @func: The trigger 'probe' function called when the triggering
1121 * event occurs. The data passed into this callback is the data
1122 * that was supplied to the event_command @reg() function that
1123 * registered the trigger (see struct event_command).
1124 *
1125 * @init: An optional initialization function called for the trigger
1126 * when the trigger is registered (via the event_command reg()
1127 * function). This can be used to perform per-trigger
1128 * initialization such as incrementing a per-trigger reference
1129 * count, for instance. This is usually implemented by the
1130 * generic utility function @event_trigger_init() (see
1131 * trace_event_triggers.c).
1132 *
1133 * @free: An optional de-initialization function called for the
1134 * trigger when the trigger is unregistered (via the
1135 * event_command @reg() function). This can be used to perform
1136 * per-trigger de-initialization such as decrementing a
1137 * per-trigger reference count and freeing corresponding trigger
1138 * data, for instance. This is usually implemented by the
1139 * generic utility function @event_trigger_free() (see
1140 * trace_event_triggers.c).
1141 *
1142 * @print: The callback function invoked to have the trigger print
1143 * itself. This is usually implemented by a wrapper function
1144 * that calls the generic utility function @event_trigger_print()
1145 * (see trace_event_triggers.c).
1146 */
1147 struct event_trigger_ops {
1148 void (*func)(struct event_trigger_data *data);
1149 int (*init)(struct event_trigger_ops *ops,
1150 struct event_trigger_data *data);
1151 void (*free)(struct event_trigger_ops *ops,
1152 struct event_trigger_data *data);
1153 int (*print)(struct seq_file *m,
1154 struct event_trigger_ops *ops,
1155 struct event_trigger_data *data);
1156 };
1157
1158 /**
1159 * struct event_command - callbacks and data members for event commands
1160 *
1161 * Event commands are invoked by users by writing the command name
1162 * into the 'trigger' file associated with a trace event. The
1163 * parameters associated with a specific invocation of an event
1164 * command are used to create an event trigger instance, which is
1165 * added to the list of trigger instances associated with that trace
1166 * event. When the event is hit, the set of triggers associated with
1167 * that event is invoked.
1168 *
1169 * The data members in this structure provide per-event command data
1170 * for various event commands.
1171 *
1172 * All the data members below, except for @post_trigger, must be set
1173 * for each event command.
1174 *
1175 * @name: The unique name that identifies the event command. This is
1176 * the name used when setting triggers via trigger files.
1177 *
1178 * @trigger_type: A unique id that identifies the event command
1179 * 'type'. This value has two purposes, the first to ensure that
1180 * only one trigger of the same type can be set at a given time
1181 * for a particular event e.g. it doesn't make sense to have both
1182 * a traceon and traceoff trigger attached to a single event at
1183 * the same time, so traceon and traceoff have the same type
1184 * though they have different names. The @trigger_type value is
1185 * also used as a bit value for deferring the actual trigger
1186 * action until after the current event is finished. Some
1187 * commands need to do this if they themselves log to the trace
1188 * buffer (see the @post_trigger() member below). @trigger_type
1189 * values are defined by adding new values to the trigger_type
1190 * enum in include/linux/ftrace_event.h.
1191 *
1192 * @post_trigger: A flag that says whether or not this command needs
1193 * to have its action delayed until after the current event has
1194 * been closed. Some triggers need to avoid being invoked while
1195 * an event is currently in the process of being logged, since
1196 * the trigger may itself log data into the trace buffer. Thus
1197 * we make sure the current event is committed before invoking
1198 * those triggers. To do that, the trigger invocation is split
1199 * in two - the first part checks the filter using the current
1200 * trace record; if a command has the @post_trigger flag set, it
1201 * sets a bit for itself in the return value, otherwise it
1202 * directly invokes the trigger. Once all commands have been
1203 * either invoked or set their return flag, the current record is
1204 * either committed or discarded. At that point, if any commands
1205 * have deferred their triggers, those commands are finally
1206 * invoked following the close of the current event. In other
1207 * words, if the event_trigger_ops @func() probe implementation
1208 * itself logs to the trace buffer, this flag should be set,
1209 * otherwise it can be left unspecified.
1210 *
1211 * All the methods below, except for @set_filter(), must be
1212 * implemented.
1213 *
1214 * @func: The callback function responsible for parsing and
1215 * registering the trigger written to the 'trigger' file by the
1216 * user. It allocates the trigger instance and registers it with
1217 * the appropriate trace event. It makes use of the other
1218 * event_command callback functions to orchestrate this, and is
1219 * usually implemented by the generic utility function
1220 * @event_trigger_callback() (see trace_event_triggers.c).
1221 *
1222 * @reg: Adds the trigger to the list of triggers associated with the
1223 * event, and enables the event trigger itself, after
1224 * initializing it (via the event_trigger_ops @init() function).
1225 * This is also where commands can use the @trigger_type value to
1226 * make the decision as to whether or not multiple instances of
1227 * the trigger should be allowed. This is usually implemented by
1228 * the generic utility function @register_trigger() (see
1229 * trace_event_triggers.c).
1230 *
1231 * @unreg: Removes the trigger from the list of triggers associated
1232 * with the event, and disables the event trigger itself, after
1233 * initializing it (via the event_trigger_ops @free() function).
1234 * This is usually implemented by the generic utility function
1235 * @unregister_trigger() (see trace_event_triggers.c).
1236 *
1237 * @set_filter: An optional function called to parse and set a filter
1238 * for the trigger. If no @set_filter() method is set for the
1239 * event command, filters set by the user for the command will be
1240 * ignored. This is usually implemented by the generic utility
1241 * function @set_trigger_filter() (see trace_event_triggers.c).
1242 *
1243 * @get_trigger_ops: The callback function invoked to retrieve the
1244 * event_trigger_ops implementation associated with the command.
1245 */
1246 struct event_command {
1247 struct list_head list;
1248 char *name;
1249 enum event_trigger_type trigger_type;
1250 bool post_trigger;
1251 int (*func)(struct event_command *cmd_ops,
1252 struct ftrace_event_file *file,
1253 char *glob, char *cmd, char *params);
1254 int (*reg)(char *glob,
1255 struct event_trigger_ops *ops,
1256 struct event_trigger_data *data,
1257 struct ftrace_event_file *file);
1258 void (*unreg)(char *glob,
1259 struct event_trigger_ops *ops,
1260 struct event_trigger_data *data,
1261 struct ftrace_event_file *file);
1262 int (*set_filter)(char *filter_str,
1263 struct event_trigger_data *data,
1264 struct ftrace_event_file *file);
1265 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1266 };
1267
1268 extern int trace_event_enable_disable(struct ftrace_event_file *file,
1269 int enable, int soft_disable);
1270 extern int tracing_alloc_snapshot(void);
1271
1272 extern const char *__start___trace_bprintk_fmt[];
1273 extern const char *__stop___trace_bprintk_fmt[];
1274
1275 extern const char *__start___tracepoint_str[];
1276 extern const char *__stop___tracepoint_str[];
1277
1278 void trace_printk_init_buffers(void);
1279 void trace_printk_start_comm(void);
1280 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1281 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1282
1283 /*
1284 * Normal trace_printk() and friends allocates special buffers
1285 * to do the manipulation, as well as saves the print formats
1286 * into sections to display. But the trace infrastructure wants
1287 * to use these without the added overhead at the price of being
1288 * a bit slower (used mainly for warnings, where we don't care
1289 * about performance). The internal_trace_puts() is for such
1290 * a purpose.
1291 */
1292 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1293
1294 #undef FTRACE_ENTRY
1295 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1296 extern struct ftrace_event_call \
1297 __aligned(4) event_##call;
1298 #undef FTRACE_ENTRY_DUP
1299 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1300 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1301 filter)
1302 #include "trace_entries.h"
1303
1304 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1305 int perf_ftrace_event_register(struct ftrace_event_call *call,
1306 enum trace_reg type, void *data);
1307 #else
1308 #define perf_ftrace_event_register NULL
1309 #endif
1310
1311 #endif /* _LINUX_KERNEL_TRACE_H */