]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/trace_events.h
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / include / linux / trace_events.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
5
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
12
13 struct trace_array;
14 struct array_buffer;
15 struct tracer;
16 struct dentry;
17 struct bpf_prog;
18
19 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
20 unsigned long flags,
21 const struct trace_print_flags *flag_array);
22
23 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
24 const struct trace_print_flags *symbol_array);
25
26 #if BITS_PER_LONG == 32
27 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
28 unsigned long long flags,
29 const struct trace_print_flags_u64 *flag_array);
30
31 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
32 unsigned long long val,
33 const struct trace_print_flags_u64
34 *symbol_array);
35 #endif
36
37 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
38 unsigned int bitmask_size);
39
40 const char *trace_print_hex_seq(struct trace_seq *p,
41 const unsigned char *buf, int len,
42 bool concatenate);
43
44 const char *trace_print_array_seq(struct trace_seq *p,
45 const void *buf, int count,
46 size_t el_size);
47
48 const char *
49 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
50 int prefix_type, int rowsize, int groupsize,
51 const void *buf, size_t len, bool ascii);
52
53 struct trace_iterator;
54 struct trace_event;
55
56 int trace_raw_output_prep(struct trace_iterator *iter,
57 struct trace_event *event);
58 extern __printf(2, 3)
59 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
60
61 /*
62 * The trace entry - the most basic unit of tracing. This is what
63 * is printed in the end as a single line in the trace output, such as:
64 *
65 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
66 */
67 struct trace_entry {
68 unsigned short type;
69 unsigned char flags;
70 unsigned char preempt_count;
71 int pid;
72 };
73
74 #define TRACE_EVENT_TYPE_MAX \
75 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
76
77 /*
78 * Trace iterator - used by printout routines who present trace
79 * results to users and which routines might sleep, etc:
80 */
81 struct trace_iterator {
82 struct trace_array *tr;
83 struct tracer *trace;
84 struct array_buffer *array_buffer;
85 void *private;
86 int cpu_file;
87 struct mutex mutex;
88 struct ring_buffer_iter **buffer_iter;
89 unsigned long iter_flags;
90 void *temp; /* temp holder */
91 unsigned int temp_size;
92 char *fmt; /* modified format holder */
93 unsigned int fmt_size;
94
95 /* trace_seq for __print_flags() and __print_symbolic() etc. */
96 struct trace_seq tmp_seq;
97
98 cpumask_var_t started;
99
100 /* it's true when current open file is snapshot */
101 bool snapshot;
102
103 /* The below is zeroed out in pipe_read */
104 struct trace_seq seq;
105 struct trace_entry *ent;
106 unsigned long lost_events;
107 int leftover;
108 int ent_size;
109 int cpu;
110 u64 ts;
111
112 loff_t pos;
113 long idx;
114
115 /* All new field here will be zeroed out in pipe_read */
116 };
117
118 enum trace_iter_flags {
119 TRACE_FILE_LAT_FMT = 1,
120 TRACE_FILE_ANNOTATE = 2,
121 TRACE_FILE_TIME_IN_NS = 4,
122 };
123
124
125 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
126 int flags, struct trace_event *event);
127
128 struct trace_event_functions {
129 trace_print_func trace;
130 trace_print_func raw;
131 trace_print_func hex;
132 trace_print_func binary;
133 };
134
135 struct trace_event {
136 struct hlist_node node;
137 struct list_head list;
138 int type;
139 struct trace_event_functions *funcs;
140 };
141
142 extern int register_trace_event(struct trace_event *event);
143 extern int unregister_trace_event(struct trace_event *event);
144
145 /* Return values for print_line callback */
146 enum print_line_t {
147 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
148 TRACE_TYPE_HANDLED = 1,
149 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
150 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
151 };
152
153 enum print_line_t trace_handle_return(struct trace_seq *s);
154
155 static inline void tracing_generic_entry_update(struct trace_entry *entry,
156 unsigned short type,
157 unsigned int trace_ctx)
158 {
159 entry->preempt_count = trace_ctx & 0xff;
160 entry->pid = current->pid;
161 entry->type = type;
162 entry->flags = trace_ctx >> 16;
163 }
164
165 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
166
167 enum trace_flag_type {
168 TRACE_FLAG_IRQS_OFF = 0x01,
169 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
170 TRACE_FLAG_NEED_RESCHED = 0x04,
171 TRACE_FLAG_HARDIRQ = 0x08,
172 TRACE_FLAG_SOFTIRQ = 0x10,
173 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
174 TRACE_FLAG_NMI = 0x40,
175 };
176
177 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
178 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
179 {
180 unsigned int irq_status = irqs_disabled_flags(irqflags) ?
181 TRACE_FLAG_IRQS_OFF : 0;
182 return tracing_gen_ctx_irq_test(irq_status);
183 }
184 static inline unsigned int tracing_gen_ctx(void)
185 {
186 unsigned long irqflags;
187
188 local_save_flags(irqflags);
189 return tracing_gen_ctx_flags(irqflags);
190 }
191 #else
192
193 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
194 {
195 return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
196 }
197 static inline unsigned int tracing_gen_ctx(void)
198 {
199 return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
200 }
201 #endif
202
203 static inline unsigned int tracing_gen_ctx_dec(void)
204 {
205 unsigned int trace_ctx;
206
207 trace_ctx = tracing_gen_ctx();
208 /*
209 * Subtract one from the preemption counter if preemption is enabled,
210 * see trace_event_buffer_reserve()for details.
211 */
212 if (IS_ENABLED(CONFIG_PREEMPTION))
213 trace_ctx--;
214 return trace_ctx;
215 }
216
217 struct trace_event_file;
218
219 struct ring_buffer_event *
220 trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
221 struct trace_event_file *trace_file,
222 int type, unsigned long len,
223 unsigned int trace_ctx);
224
225 #define TRACE_RECORD_CMDLINE BIT(0)
226 #define TRACE_RECORD_TGID BIT(1)
227
228 void tracing_record_taskinfo(struct task_struct *task, int flags);
229 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
230 struct task_struct *next, int flags);
231
232 void tracing_record_cmdline(struct task_struct *task);
233 void tracing_record_tgid(struct task_struct *task);
234
235 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
236
237 struct event_filter;
238
239 enum trace_reg {
240 TRACE_REG_REGISTER,
241 TRACE_REG_UNREGISTER,
242 #ifdef CONFIG_PERF_EVENTS
243 TRACE_REG_PERF_REGISTER,
244 TRACE_REG_PERF_UNREGISTER,
245 TRACE_REG_PERF_OPEN,
246 TRACE_REG_PERF_CLOSE,
247 /*
248 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
249 * custom action was taken and the default action is not to be
250 * performed.
251 */
252 TRACE_REG_PERF_ADD,
253 TRACE_REG_PERF_DEL,
254 #endif
255 };
256
257 struct trace_event_call;
258
259 #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
260
261 struct trace_event_fields {
262 const char *type;
263 union {
264 struct {
265 const char *name;
266 const int size;
267 const int align;
268 const int is_signed;
269 const int filter_type;
270 };
271 int (*define_fields)(struct trace_event_call *);
272 };
273 };
274
275 struct trace_event_class {
276 const char *system;
277 void *probe;
278 #ifdef CONFIG_PERF_EVENTS
279 void *perf_probe;
280 #endif
281 int (*reg)(struct trace_event_call *event,
282 enum trace_reg type, void *data);
283 struct trace_event_fields *fields_array;
284 struct list_head *(*get_fields)(struct trace_event_call *);
285 struct list_head fields;
286 int (*raw_init)(struct trace_event_call *);
287 };
288
289 extern int trace_event_reg(struct trace_event_call *event,
290 enum trace_reg type, void *data);
291
292 struct trace_event_buffer {
293 struct trace_buffer *buffer;
294 struct ring_buffer_event *event;
295 struct trace_event_file *trace_file;
296 void *entry;
297 unsigned int trace_ctx;
298 struct pt_regs *regs;
299 };
300
301 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
302 struct trace_event_file *trace_file,
303 unsigned long len);
304
305 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
306
307 enum {
308 TRACE_EVENT_FL_FILTERED_BIT,
309 TRACE_EVENT_FL_CAP_ANY_BIT,
310 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
311 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
312 TRACE_EVENT_FL_TRACEPOINT_BIT,
313 TRACE_EVENT_FL_DYNAMIC_BIT,
314 TRACE_EVENT_FL_KPROBE_BIT,
315 TRACE_EVENT_FL_UPROBE_BIT,
316 TRACE_EVENT_FL_EPROBE_BIT,
317 };
318
319 /*
320 * Event flags:
321 * FILTERED - The event has a filter attached
322 * CAP_ANY - Any user can enable for perf
323 * NO_SET_FILTER - Set when filter has error and is to be ignored
324 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
325 * TRACEPOINT - Event is a tracepoint
326 * DYNAMIC - Event is a dynamic event (created at run time)
327 * KPROBE - Event is a kprobe
328 * UPROBE - Event is a uprobe
329 * EPROBE - Event is an event probe
330 */
331 enum {
332 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
333 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
334 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
335 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
336 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
337 TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
338 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
339 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
340 TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
341 };
342
343 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
344
345 struct trace_event_call {
346 struct list_head list;
347 struct trace_event_class *class;
348 union {
349 char *name;
350 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
351 struct tracepoint *tp;
352 };
353 struct trace_event event;
354 char *print_fmt;
355 struct event_filter *filter;
356 /*
357 * Static events can disappear with modules,
358 * where as dynamic ones need their own ref count.
359 */
360 union {
361 void *module;
362 atomic_t refcnt;
363 };
364 void *data;
365
366 /* See the TRACE_EVENT_FL_* flags above */
367 int flags; /* static flags of different events */
368
369 #ifdef CONFIG_PERF_EVENTS
370 int perf_refcount;
371 struct hlist_head __percpu *perf_events;
372 struct bpf_prog_array __rcu *prog_array;
373
374 int (*perf_perm)(struct trace_event_call *,
375 struct perf_event *);
376 #endif
377 };
378
379 #ifdef CONFIG_DYNAMIC_EVENTS
380 bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
381 void trace_event_dyn_put_ref(struct trace_event_call *call);
382 bool trace_event_dyn_busy(struct trace_event_call *call);
383 #else
384 static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
385 {
386 /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
387 return false;
388 }
389 static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
390 {
391 }
392 static inline bool trace_event_dyn_busy(struct trace_event_call *call)
393 {
394 /* Nothing should call this without DYNAIMIC_EVENTS configured. */
395 return true;
396 }
397 #endif
398
399 static inline bool trace_event_try_get_ref(struct trace_event_call *call)
400 {
401 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
402 return trace_event_dyn_try_get_ref(call);
403 else
404 return try_module_get(call->module);
405 }
406
407 static inline void trace_event_put_ref(struct trace_event_call *call)
408 {
409 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
410 trace_event_dyn_put_ref(call);
411 else
412 module_put(call->module);
413 }
414
415 #ifdef CONFIG_PERF_EVENTS
416 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
417 {
418 /*
419 * This inline function checks whether call->prog_array
420 * is valid or not. The function is called in various places,
421 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
422 *
423 * If this function returns true, and later call->prog_array
424 * becomes false inside rcu_read_lock/unlock region,
425 * we bail out then. If this function return false,
426 * there is a risk that we might miss a few events if the checking
427 * were delayed until inside rcu_read_lock/unlock region and
428 * call->prog_array happened to become non-NULL then.
429 *
430 * Here, READ_ONCE() is used instead of rcu_access_pointer().
431 * rcu_access_pointer() requires the actual definition of
432 * "struct bpf_prog_array" while READ_ONCE() only needs
433 * a declaration of the same type.
434 */
435 return !!READ_ONCE(call->prog_array);
436 }
437 #endif
438
439 static inline const char *
440 trace_event_name(struct trace_event_call *call)
441 {
442 if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
443 return call->tp ? call->tp->name : NULL;
444 else
445 return call->name;
446 }
447
448 static inline struct list_head *
449 trace_get_fields(struct trace_event_call *event_call)
450 {
451 if (!event_call->class->get_fields)
452 return &event_call->class->fields;
453 return event_call->class->get_fields(event_call);
454 }
455
456 struct trace_subsystem_dir;
457
458 enum {
459 EVENT_FILE_FL_ENABLED_BIT,
460 EVENT_FILE_FL_RECORDED_CMD_BIT,
461 EVENT_FILE_FL_RECORDED_TGID_BIT,
462 EVENT_FILE_FL_FILTERED_BIT,
463 EVENT_FILE_FL_NO_SET_FILTER_BIT,
464 EVENT_FILE_FL_SOFT_MODE_BIT,
465 EVENT_FILE_FL_SOFT_DISABLED_BIT,
466 EVENT_FILE_FL_TRIGGER_MODE_BIT,
467 EVENT_FILE_FL_TRIGGER_COND_BIT,
468 EVENT_FILE_FL_PID_FILTER_BIT,
469 EVENT_FILE_FL_WAS_ENABLED_BIT,
470 };
471
472 extern struct trace_event_file *trace_get_event_file(const char *instance,
473 const char *system,
474 const char *event);
475 extern void trace_put_event_file(struct trace_event_file *file);
476
477 #define MAX_DYNEVENT_CMD_LEN (2048)
478
479 enum dynevent_type {
480 DYNEVENT_TYPE_SYNTH = 1,
481 DYNEVENT_TYPE_KPROBE,
482 DYNEVENT_TYPE_NONE,
483 };
484
485 struct dynevent_cmd;
486
487 typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
488
489 struct dynevent_cmd {
490 struct seq_buf seq;
491 const char *event_name;
492 unsigned int n_fields;
493 enum dynevent_type type;
494 dynevent_create_fn_t run_command;
495 void *private_data;
496 };
497
498 extern int dynevent_create(struct dynevent_cmd *cmd);
499
500 extern int synth_event_delete(const char *name);
501
502 extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
503 char *buf, int maxlen);
504
505 extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
506 const char *name,
507 struct module *mod, ...);
508
509 #define synth_event_gen_cmd_start(cmd, name, mod, ...) \
510 __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
511
512 struct synth_field_desc {
513 const char *type;
514 const char *name;
515 };
516
517 extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
518 const char *name,
519 struct module *mod,
520 struct synth_field_desc *fields,
521 unsigned int n_fields);
522 extern int synth_event_create(const char *name,
523 struct synth_field_desc *fields,
524 unsigned int n_fields, struct module *mod);
525
526 extern int synth_event_add_field(struct dynevent_cmd *cmd,
527 const char *type,
528 const char *name);
529 extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
530 const char *type_name);
531 extern int synth_event_add_fields(struct dynevent_cmd *cmd,
532 struct synth_field_desc *fields,
533 unsigned int n_fields);
534
535 #define synth_event_gen_cmd_end(cmd) \
536 dynevent_create(cmd)
537
538 struct synth_event;
539
540 struct synth_event_trace_state {
541 struct trace_event_buffer fbuffer;
542 struct synth_trace_event *entry;
543 struct trace_buffer *buffer;
544 struct synth_event *event;
545 unsigned int cur_field;
546 unsigned int n_u64;
547 bool disabled;
548 bool add_next;
549 bool add_name;
550 };
551
552 extern int synth_event_trace(struct trace_event_file *file,
553 unsigned int n_vals, ...);
554 extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
555 unsigned int n_vals);
556 extern int synth_event_trace_start(struct trace_event_file *file,
557 struct synth_event_trace_state *trace_state);
558 extern int synth_event_add_next_val(u64 val,
559 struct synth_event_trace_state *trace_state);
560 extern int synth_event_add_val(const char *field_name, u64 val,
561 struct synth_event_trace_state *trace_state);
562 extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
563
564 extern int kprobe_event_delete(const char *name);
565
566 extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
567 char *buf, int maxlen);
568
569 #define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
570 __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
571
572 #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
573 __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
574
575 extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
576 bool kretprobe,
577 const char *name,
578 const char *loc, ...);
579
580 #define kprobe_event_add_fields(cmd, ...) \
581 __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
582
583 #define kprobe_event_add_field(cmd, field) \
584 __kprobe_event_add_fields(cmd, field, NULL)
585
586 extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
587
588 #define kprobe_event_gen_cmd_end(cmd) \
589 dynevent_create(cmd)
590
591 #define kretprobe_event_gen_cmd_end(cmd) \
592 dynevent_create(cmd)
593
594 /*
595 * Event file flags:
596 * ENABLED - The event is enabled
597 * RECORDED_CMD - The comms should be recorded at sched_switch
598 * RECORDED_TGID - The tgids should be recorded at sched_switch
599 * FILTERED - The event has a filter attached
600 * NO_SET_FILTER - Set when filter has error and is to be ignored
601 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
602 * SOFT_DISABLED - When set, do not trace the event (even though its
603 * tracepoint may be enabled)
604 * TRIGGER_MODE - When set, invoke the triggers associated with the event
605 * TRIGGER_COND - When set, one or more triggers has an associated filter
606 * PID_FILTER - When set, the event is filtered based on pid
607 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
608 */
609 enum {
610 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
611 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
612 EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
613 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
614 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
615 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
616 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
617 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
618 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
619 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
620 EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
621 };
622
623 struct trace_event_file {
624 struct list_head list;
625 struct trace_event_call *event_call;
626 struct event_filter __rcu *filter;
627 struct dentry *dir;
628 struct trace_array *tr;
629 struct trace_subsystem_dir *system;
630 struct list_head triggers;
631
632 /*
633 * 32 bit flags:
634 * bit 0: enabled
635 * bit 1: enabled cmd record
636 * bit 2: enable/disable with the soft disable bit
637 * bit 3: soft disabled
638 * bit 4: trigger enabled
639 *
640 * Note: The bits must be set atomically to prevent races
641 * from other writers. Reads of flags do not need to be in
642 * sync as they occur in critical sections. But the way flags
643 * is currently used, these changes do not affect the code
644 * except that when a change is made, it may have a slight
645 * delay in propagating the changes to other CPUs due to
646 * caching and such. Which is mostly OK ;-)
647 */
648 unsigned long flags;
649 atomic_t sm_ref; /* soft-mode reference counter */
650 atomic_t tm_ref; /* trigger-mode reference counter */
651 };
652
653 #define __TRACE_EVENT_FLAGS(name, value) \
654 static int __init trace_init_flags_##name(void) \
655 { \
656 event_##name.flags |= value; \
657 return 0; \
658 } \
659 early_initcall(trace_init_flags_##name);
660
661 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
662 static int perf_perm_##name(struct trace_event_call *tp_event, \
663 struct perf_event *p_event) \
664 { \
665 return ({ expr; }); \
666 } \
667 static int __init trace_init_perf_perm_##name(void) \
668 { \
669 event_##name.perf_perm = &perf_perm_##name; \
670 return 0; \
671 } \
672 early_initcall(trace_init_perf_perm_##name);
673
674 #define PERF_MAX_TRACE_SIZE 2048
675
676 #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
677
678 enum event_trigger_type {
679 ETT_NONE = (0),
680 ETT_TRACE_ONOFF = (1 << 0),
681 ETT_SNAPSHOT = (1 << 1),
682 ETT_STACKTRACE = (1 << 2),
683 ETT_EVENT_ENABLE = (1 << 3),
684 ETT_EVENT_HIST = (1 << 4),
685 ETT_HIST_ENABLE = (1 << 5),
686 ETT_EVENT_EPROBE = (1 << 6),
687 };
688
689 extern int filter_match_preds(struct event_filter *filter, void *rec);
690
691 extern enum event_trigger_type
692 event_triggers_call(struct trace_event_file *file,
693 struct trace_buffer *buffer, void *rec,
694 struct ring_buffer_event *event);
695 extern void
696 event_triggers_post_call(struct trace_event_file *file,
697 enum event_trigger_type tt);
698
699 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
700
701 /**
702 * trace_trigger_soft_disabled - do triggers and test if soft disabled
703 * @file: The file pointer of the event to test
704 *
705 * If any triggers without filters are attached to this event, they
706 * will be called here. If the event is soft disabled and has no
707 * triggers that require testing the fields, it will return true,
708 * otherwise false.
709 */
710 static inline bool
711 trace_trigger_soft_disabled(struct trace_event_file *file)
712 {
713 unsigned long eflags = file->flags;
714
715 if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
716 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
717 event_triggers_call(file, NULL, NULL, NULL);
718 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
719 return true;
720 if (eflags & EVENT_FILE_FL_PID_FILTER)
721 return trace_event_ignore_this_pid(file);
722 }
723 return false;
724 }
725
726 #ifdef CONFIG_BPF_EVENTS
727 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
728 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
729 void perf_event_detach_bpf_prog(struct perf_event *event);
730 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
731 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
732 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
733 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
734 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
735 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
736 u32 *fd_type, const char **buf,
737 u64 *probe_offset, u64 *probe_addr);
738 #else
739 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
740 {
741 return 1;
742 }
743
744 static inline int
745 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
746 {
747 return -EOPNOTSUPP;
748 }
749
750 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
751
752 static inline int
753 perf_event_query_prog_array(struct perf_event *event, void __user *info)
754 {
755 return -EOPNOTSUPP;
756 }
757 static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
758 {
759 return -EOPNOTSUPP;
760 }
761 static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
762 {
763 return -EOPNOTSUPP;
764 }
765 static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
766 {
767 return NULL;
768 }
769 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
770 {
771 }
772 static inline int bpf_get_perf_event_info(const struct perf_event *event,
773 u32 *prog_id, u32 *fd_type,
774 const char **buf, u64 *probe_offset,
775 u64 *probe_addr)
776 {
777 return -EOPNOTSUPP;
778 }
779 #endif
780
781 enum {
782 FILTER_OTHER = 0,
783 FILTER_STATIC_STRING,
784 FILTER_DYN_STRING,
785 FILTER_PTR_STRING,
786 FILTER_TRACE_FN,
787 FILTER_COMM,
788 FILTER_CPU,
789 };
790
791 extern int trace_event_raw_init(struct trace_event_call *call);
792 extern int trace_define_field(struct trace_event_call *call, const char *type,
793 const char *name, int offset, int size,
794 int is_signed, int filter_type);
795 extern int trace_add_event_call(struct trace_event_call *call);
796 extern int trace_remove_event_call(struct trace_event_call *call);
797 extern int trace_event_get_offsets(struct trace_event_call *call);
798
799 #define is_signed_type(type) (((type)(-1)) < (type)1)
800
801 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
802 int trace_set_clr_event(const char *system, const char *event, int set);
803 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
804 const char *event, bool enable);
805 /*
806 * The double __builtin_constant_p is because gcc will give us an error
807 * if we try to allocate the static variable to fmt if it is not a
808 * constant. Even with the outer if statement optimizing out.
809 */
810 #define event_trace_printk(ip, fmt, args...) \
811 do { \
812 __trace_printk_check_format(fmt, ##args); \
813 tracing_record_cmdline(current); \
814 if (__builtin_constant_p(fmt)) { \
815 static const char *trace_printk_fmt \
816 __section("__trace_printk_fmt") = \
817 __builtin_constant_p(fmt) ? fmt : NULL; \
818 \
819 __trace_bprintk(ip, trace_printk_fmt, ##args); \
820 } else \
821 __trace_printk(ip, fmt, ##args); \
822 } while (0)
823
824 #ifdef CONFIG_PERF_EVENTS
825 struct perf_event;
826
827 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
828 DECLARE_PER_CPU(int, bpf_kprobe_override);
829
830 extern int perf_trace_init(struct perf_event *event);
831 extern void perf_trace_destroy(struct perf_event *event);
832 extern int perf_trace_add(struct perf_event *event, int flags);
833 extern void perf_trace_del(struct perf_event *event, int flags);
834 #ifdef CONFIG_KPROBE_EVENTS
835 extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
836 extern void perf_kprobe_destroy(struct perf_event *event);
837 extern int bpf_get_kprobe_info(const struct perf_event *event,
838 u32 *fd_type, const char **symbol,
839 u64 *probe_offset, u64 *probe_addr,
840 bool perf_type_tracepoint);
841 #endif
842 #ifdef CONFIG_UPROBE_EVENTS
843 extern int perf_uprobe_init(struct perf_event *event,
844 unsigned long ref_ctr_offset, bool is_retprobe);
845 extern void perf_uprobe_destroy(struct perf_event *event);
846 extern int bpf_get_uprobe_info(const struct perf_event *event,
847 u32 *fd_type, const char **filename,
848 u64 *probe_offset, bool perf_type_tracepoint);
849 #endif
850 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
851 char *filter_str);
852 extern void ftrace_profile_free_filter(struct perf_event *event);
853 void perf_trace_buf_update(void *record, u16 type);
854 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
855
856 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
857 void perf_event_free_bpf_prog(struct perf_event *event);
858
859 void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
860 void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
861 void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
862 u64 arg3);
863 void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
864 u64 arg3, u64 arg4);
865 void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
866 u64 arg3, u64 arg4, u64 arg5);
867 void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
868 u64 arg3, u64 arg4, u64 arg5, u64 arg6);
869 void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
870 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
871 void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
872 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
873 u64 arg8);
874 void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
875 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
876 u64 arg8, u64 arg9);
877 void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
878 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
879 u64 arg8, u64 arg9, u64 arg10);
880 void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
881 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
882 u64 arg8, u64 arg9, u64 arg10, u64 arg11);
883 void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
884 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
885 u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
886 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
887 struct trace_event_call *call, u64 count,
888 struct pt_regs *regs, struct hlist_head *head,
889 struct task_struct *task);
890
891 static inline void
892 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
893 u64 count, struct pt_regs *regs, void *head,
894 struct task_struct *task)
895 {
896 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
897 }
898
899 #endif
900
901 #endif /* _LINUX_TRACE_EVENT_H */