]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/trace_events.h
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / include / linux / trace_events.h
1
2 #ifndef _LINUX_TRACE_EVENT_H
3 #define _LINUX_TRACE_EVENT_H
4
5 #include <linux/ring_buffer.h>
6 #include <linux/trace_seq.h>
7 #include <linux/percpu.h>
8 #include <linux/hardirq.h>
9 #include <linux/perf_event.h>
10 #include <linux/tracepoint.h>
11
12 struct trace_array;
13 struct trace_buffer;
14 struct tracer;
15 struct dentry;
16 struct bpf_prog;
17
18 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
19 unsigned long flags,
20 const struct trace_print_flags *flag_array);
21
22 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
23 const struct trace_print_flags *symbol_array);
24
25 #if BITS_PER_LONG == 32
26 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
27 unsigned long long flags,
28 const struct trace_print_flags_u64 *flag_array);
29
30 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
31 unsigned long long val,
32 const struct trace_print_flags_u64
33 *symbol_array);
34 #endif
35
36 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
37 unsigned int bitmask_size);
38
39 const char *trace_print_hex_seq(struct trace_seq *p,
40 const unsigned char *buf, int len,
41 bool concatenate);
42
43 const char *trace_print_array_seq(struct trace_seq *p,
44 const void *buf, int count,
45 size_t el_size);
46
47 struct trace_iterator;
48 struct trace_event;
49
50 int trace_raw_output_prep(struct trace_iterator *iter,
51 struct trace_event *event);
52
53 /*
54 * The trace entry - the most basic unit of tracing. This is what
55 * is printed in the end as a single line in the trace output, such as:
56 *
57 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
58 */
59 struct trace_entry {
60 unsigned short type;
61 unsigned char flags;
62 unsigned char preempt_count;
63 int pid;
64 };
65
66 #define TRACE_EVENT_TYPE_MAX \
67 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
68
69 /*
70 * Trace iterator - used by printout routines who present trace
71 * results to users and which routines might sleep, etc:
72 */
73 struct trace_iterator {
74 struct trace_array *tr;
75 struct tracer *trace;
76 struct trace_buffer *trace_buffer;
77 void *private;
78 int cpu_file;
79 struct mutex mutex;
80 struct ring_buffer_iter **buffer_iter;
81 unsigned long iter_flags;
82
83 /* trace_seq for __print_flags() and __print_symbolic() etc. */
84 struct trace_seq tmp_seq;
85
86 cpumask_var_t started;
87
88 /* it's true when current open file is snapshot */
89 bool snapshot;
90
91 /* The below is zeroed out in pipe_read */
92 struct trace_seq seq;
93 struct trace_entry *ent;
94 unsigned long lost_events;
95 int leftover;
96 int ent_size;
97 int cpu;
98 u64 ts;
99
100 loff_t pos;
101 long idx;
102
103 /* All new field here will be zeroed out in pipe_read */
104 };
105
106 enum trace_iter_flags {
107 TRACE_FILE_LAT_FMT = 1,
108 TRACE_FILE_ANNOTATE = 2,
109 TRACE_FILE_TIME_IN_NS = 4,
110 };
111
112
113 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
114 int flags, struct trace_event *event);
115
116 struct trace_event_functions {
117 trace_print_func trace;
118 trace_print_func raw;
119 trace_print_func hex;
120 trace_print_func binary;
121 };
122
123 struct trace_event {
124 struct hlist_node node;
125 struct list_head list;
126 int type;
127 struct trace_event_functions *funcs;
128 };
129
130 extern int register_trace_event(struct trace_event *event);
131 extern int unregister_trace_event(struct trace_event *event);
132
133 /* Return values for print_line callback */
134 enum print_line_t {
135 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
136 TRACE_TYPE_HANDLED = 1,
137 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
138 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
139 };
140
141 enum print_line_t trace_handle_return(struct trace_seq *s);
142
143 void tracing_generic_entry_update(struct trace_entry *entry,
144 unsigned long flags,
145 int pc);
146 struct trace_event_file;
147
148 struct ring_buffer_event *
149 trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
150 struct trace_event_file *trace_file,
151 int type, unsigned long len,
152 unsigned long flags, int pc);
153
154 void tracing_record_cmdline(struct task_struct *tsk);
155
156 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
157
158 struct event_filter;
159
160 enum trace_reg {
161 TRACE_REG_REGISTER,
162 TRACE_REG_UNREGISTER,
163 #ifdef CONFIG_PERF_EVENTS
164 TRACE_REG_PERF_REGISTER,
165 TRACE_REG_PERF_UNREGISTER,
166 TRACE_REG_PERF_OPEN,
167 TRACE_REG_PERF_CLOSE,
168 TRACE_REG_PERF_ADD,
169 TRACE_REG_PERF_DEL,
170 #endif
171 };
172
173 struct trace_event_call;
174
175 struct trace_event_class {
176 const char *system;
177 void *probe;
178 #ifdef CONFIG_PERF_EVENTS
179 void *perf_probe;
180 #endif
181 int (*reg)(struct trace_event_call *event,
182 enum trace_reg type, void *data);
183 int (*define_fields)(struct trace_event_call *);
184 struct list_head *(*get_fields)(struct trace_event_call *);
185 struct list_head fields;
186 int (*raw_init)(struct trace_event_call *);
187 };
188
189 extern int trace_event_reg(struct trace_event_call *event,
190 enum trace_reg type, void *data);
191
192 struct trace_event_buffer {
193 struct ring_buffer *buffer;
194 struct ring_buffer_event *event;
195 struct trace_event_file *trace_file;
196 void *entry;
197 unsigned long flags;
198 int pc;
199 };
200
201 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
202 struct trace_event_file *trace_file,
203 unsigned long len);
204
205 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
206
207 enum {
208 TRACE_EVENT_FL_FILTERED_BIT,
209 TRACE_EVENT_FL_CAP_ANY_BIT,
210 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
211 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
212 TRACE_EVENT_FL_WAS_ENABLED_BIT,
213 TRACE_EVENT_FL_TRACEPOINT_BIT,
214 TRACE_EVENT_FL_KPROBE_BIT,
215 TRACE_EVENT_FL_UPROBE_BIT,
216 };
217
218 /*
219 * Event flags:
220 * FILTERED - The event has a filter attached
221 * CAP_ANY - Any user can enable for perf
222 * NO_SET_FILTER - Set when filter has error and is to be ignored
223 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
224 * WAS_ENABLED - Set and stays set when an event was ever enabled
225 * (used for module unloading, if a module event is enabled,
226 * it is best to clear the buffers that used it).
227 * TRACEPOINT - Event is a tracepoint
228 * KPROBE - Event is a kprobe
229 * UPROBE - Event is a uprobe
230 */
231 enum {
232 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
233 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
234 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
235 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
236 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
237 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
238 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
239 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
240 };
241
242 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
243
244 struct trace_event_call {
245 struct list_head list;
246 struct trace_event_class *class;
247 union {
248 char *name;
249 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
250 struct tracepoint *tp;
251 };
252 struct trace_event event;
253 char *print_fmt;
254 struct event_filter *filter;
255 void *mod;
256 void *data;
257 /*
258 * bit 0: filter_active
259 * bit 1: allow trace by non root (cap any)
260 * bit 2: failed to apply filter
261 * bit 3: trace internal event (do not enable)
262 * bit 4: Event was enabled by module
263 * bit 5: use call filter rather than file filter
264 * bit 6: Event is a tracepoint
265 */
266 int flags; /* static flags of different events */
267
268 #ifdef CONFIG_PERF_EVENTS
269 int perf_refcount;
270 struct hlist_head __percpu *perf_events;
271 struct bpf_prog *prog;
272
273 int (*perf_perm)(struct trace_event_call *,
274 struct perf_event *);
275 #endif
276 };
277
278 static inline const char *
279 trace_event_name(struct trace_event_call *call)
280 {
281 if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
282 return call->tp ? call->tp->name : NULL;
283 else
284 return call->name;
285 }
286
287 struct trace_array;
288 struct trace_subsystem_dir;
289
290 enum {
291 EVENT_FILE_FL_ENABLED_BIT,
292 EVENT_FILE_FL_RECORDED_CMD_BIT,
293 EVENT_FILE_FL_FILTERED_BIT,
294 EVENT_FILE_FL_NO_SET_FILTER_BIT,
295 EVENT_FILE_FL_SOFT_MODE_BIT,
296 EVENT_FILE_FL_SOFT_DISABLED_BIT,
297 EVENT_FILE_FL_TRIGGER_MODE_BIT,
298 EVENT_FILE_FL_TRIGGER_COND_BIT,
299 EVENT_FILE_FL_PID_FILTER_BIT,
300 };
301
302 /*
303 * Event file flags:
304 * ENABLED - The event is enabled
305 * RECORDED_CMD - The comms should be recorded at sched_switch
306 * FILTERED - The event has a filter attached
307 * NO_SET_FILTER - Set when filter has error and is to be ignored
308 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
309 * SOFT_DISABLED - When set, do not trace the event (even though its
310 * tracepoint may be enabled)
311 * TRIGGER_MODE - When set, invoke the triggers associated with the event
312 * TRIGGER_COND - When set, one or more triggers has an associated filter
313 * PID_FILTER - When set, the event is filtered based on pid
314 */
315 enum {
316 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
317 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
318 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
319 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
320 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
321 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
322 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
323 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
324 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
325 };
326
327 struct trace_event_file {
328 struct list_head list;
329 struct trace_event_call *event_call;
330 struct event_filter *filter;
331 struct dentry *dir;
332 struct trace_array *tr;
333 struct trace_subsystem_dir *system;
334 struct list_head triggers;
335
336 /*
337 * 32 bit flags:
338 * bit 0: enabled
339 * bit 1: enabled cmd record
340 * bit 2: enable/disable with the soft disable bit
341 * bit 3: soft disabled
342 * bit 4: trigger enabled
343 *
344 * Note: The bits must be set atomically to prevent races
345 * from other writers. Reads of flags do not need to be in
346 * sync as they occur in critical sections. But the way flags
347 * is currently used, these changes do not affect the code
348 * except that when a change is made, it may have a slight
349 * delay in propagating the changes to other CPUs due to
350 * caching and such. Which is mostly OK ;-)
351 */
352 unsigned long flags;
353 atomic_t sm_ref; /* soft-mode reference counter */
354 atomic_t tm_ref; /* trigger-mode reference counter */
355 };
356
357 #define __TRACE_EVENT_FLAGS(name, value) \
358 static int __init trace_init_flags_##name(void) \
359 { \
360 event_##name.flags |= value; \
361 return 0; \
362 } \
363 early_initcall(trace_init_flags_##name);
364
365 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
366 static int perf_perm_##name(struct trace_event_call *tp_event, \
367 struct perf_event *p_event) \
368 { \
369 return ({ expr; }); \
370 } \
371 static int __init trace_init_perf_perm_##name(void) \
372 { \
373 event_##name.perf_perm = &perf_perm_##name; \
374 return 0; \
375 } \
376 early_initcall(trace_init_perf_perm_##name);
377
378 #define PERF_MAX_TRACE_SIZE 2048
379
380 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
381
382 enum event_trigger_type {
383 ETT_NONE = (0),
384 ETT_TRACE_ONOFF = (1 << 0),
385 ETT_SNAPSHOT = (1 << 1),
386 ETT_STACKTRACE = (1 << 2),
387 ETT_EVENT_ENABLE = (1 << 3),
388 ETT_EVENT_HIST = (1 << 4),
389 ETT_HIST_ENABLE = (1 << 5),
390 };
391
392 extern int filter_match_preds(struct event_filter *filter, void *rec);
393
394 extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
395 void *rec);
396 extern void event_triggers_post_call(struct trace_event_file *file,
397 enum event_trigger_type tt,
398 void *rec);
399
400 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
401
402 /**
403 * trace_trigger_soft_disabled - do triggers and test if soft disabled
404 * @file: The file pointer of the event to test
405 *
406 * If any triggers without filters are attached to this event, they
407 * will be called here. If the event is soft disabled and has no
408 * triggers that require testing the fields, it will return true,
409 * otherwise false.
410 */
411 static inline bool
412 trace_trigger_soft_disabled(struct trace_event_file *file)
413 {
414 unsigned long eflags = file->flags;
415
416 if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
417 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
418 event_triggers_call(file, NULL);
419 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
420 return true;
421 if (eflags & EVENT_FILE_FL_PID_FILTER)
422 return trace_event_ignore_this_pid(file);
423 }
424 return false;
425 }
426
427 #ifdef CONFIG_BPF_EVENTS
428 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
429 #else
430 static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
431 {
432 return 1;
433 }
434 #endif
435
436 enum {
437 FILTER_OTHER = 0,
438 FILTER_STATIC_STRING,
439 FILTER_DYN_STRING,
440 FILTER_PTR_STRING,
441 FILTER_TRACE_FN,
442 FILTER_COMM,
443 FILTER_CPU,
444 };
445
446 extern int trace_event_raw_init(struct trace_event_call *call);
447 extern int trace_define_field(struct trace_event_call *call, const char *type,
448 const char *name, int offset, int size,
449 int is_signed, int filter_type);
450 extern int trace_add_event_call(struct trace_event_call *call);
451 extern int trace_remove_event_call(struct trace_event_call *call);
452 extern int trace_event_get_offsets(struct trace_event_call *call);
453
454 #define is_signed_type(type) (((type)(-1)) < (type)1)
455
456 int trace_set_clr_event(const char *system, const char *event, int set);
457
458 /*
459 * The double __builtin_constant_p is because gcc will give us an error
460 * if we try to allocate the static variable to fmt if it is not a
461 * constant. Even with the outer if statement optimizing out.
462 */
463 #define event_trace_printk(ip, fmt, args...) \
464 do { \
465 __trace_printk_check_format(fmt, ##args); \
466 tracing_record_cmdline(current); \
467 if (__builtin_constant_p(fmt)) { \
468 static const char *trace_printk_fmt \
469 __attribute__((section("__trace_printk_fmt"))) = \
470 __builtin_constant_p(fmt) ? fmt : NULL; \
471 \
472 __trace_bprintk(ip, trace_printk_fmt, ##args); \
473 } else \
474 __trace_printk(ip, fmt, ##args); \
475 } while (0)
476
477 #ifdef CONFIG_PERF_EVENTS
478 struct perf_event;
479
480 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
481
482 extern int perf_trace_init(struct perf_event *event);
483 extern void perf_trace_destroy(struct perf_event *event);
484 extern int perf_trace_add(struct perf_event *event, int flags);
485 extern void perf_trace_del(struct perf_event *event, int flags);
486 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
487 char *filter_str);
488 extern void ftrace_profile_free_filter(struct perf_event *event);
489 void perf_trace_buf_update(void *record, u16 type);
490 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
491
492 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
493 struct trace_event_call *call, u64 count,
494 struct pt_regs *regs, struct hlist_head *head,
495 struct task_struct *task);
496
497 static inline void
498 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
499 u64 count, struct pt_regs *regs, void *head,
500 struct task_struct *task)
501 {
502 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
503 }
504 #endif
505
506 #endif /* _LINUX_TRACE_EVENT_H */