1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11 #include <trace/boot.h>
14 __TRACE_FIRST_TYPE
= 0,
37 * The trace entry - the most basic unit of tracing. This is what
38 * is printed in the end as a single line in the trace output, such as:
40 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
46 unsigned char preempt_count
;
52 * Function trace entry - function address and parent function addres:
55 struct trace_entry ent
;
57 unsigned long parent_ip
;
60 /* Function return entry */
61 struct ftrace_ret_entry
{
62 struct trace_entry ent
;
64 unsigned long parent_ip
;
65 unsigned long long calltime
;
66 unsigned long long rettime
;
67 unsigned long overrun
;
69 extern struct tracer boot_tracer
;
72 * Context switch trace entry - which task (and prio) we switched from/to:
74 struct ctx_switch_entry
{
75 struct trace_entry ent
;
76 unsigned int prev_pid
;
77 unsigned char prev_prio
;
78 unsigned char prev_state
;
79 unsigned int next_pid
;
80 unsigned char next_prio
;
81 unsigned char next_state
;
82 unsigned int next_cpu
;
86 * Special (free-form) trace entry:
88 struct special_entry
{
89 struct trace_entry ent
;
99 #define FTRACE_STACK_ENTRIES 8
102 struct trace_entry ent
;
103 unsigned long caller
[FTRACE_STACK_ENTRIES
];
106 struct userstack_entry
{
107 struct trace_entry ent
;
108 unsigned long caller
[FTRACE_STACK_ENTRIES
];
112 * ftrace_printk entry:
115 struct trace_entry ent
;
120 #define TRACE_OLD_SIZE 88
122 struct trace_field_cont
{
124 /* Temporary till we get rid of this completely */
125 char buf
[TRACE_OLD_SIZE
- 1];
128 struct trace_mmiotrace_rw
{
129 struct trace_entry ent
;
130 struct mmiotrace_rw rw
;
133 struct trace_mmiotrace_map
{
134 struct trace_entry ent
;
135 struct mmiotrace_map map
;
138 struct trace_boot_call
{
139 struct trace_entry ent
;
140 struct boot_trace_call boot_call
;
143 struct trace_boot_ret
{
144 struct trace_entry ent
;
145 struct boot_trace_ret boot_ret
;
148 #define TRACE_FUNC_SIZE 30
149 #define TRACE_FILE_SIZE 20
150 struct trace_branch
{
151 struct trace_entry ent
;
153 char func
[TRACE_FUNC_SIZE
+1];
154 char file
[TRACE_FILE_SIZE
+1];
159 struct trace_entry ent
;
165 struct trace_entry ent
;
166 struct power_trace state_data
;
170 * trace_flag_type is an enumeration that holds different
171 * states when a trace occurs. These are:
172 * IRQS_OFF - interrupts were disabled
173 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
174 * NEED_RESCED - reschedule is requested
175 * HARDIRQ - inside an interrupt handler
176 * SOFTIRQ - inside a softirq handler
177 * CONT - multiple entries hold the trace item
179 enum trace_flag_type
{
180 TRACE_FLAG_IRQS_OFF
= 0x01,
181 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
182 TRACE_FLAG_NEED_RESCHED
= 0x04,
183 TRACE_FLAG_HARDIRQ
= 0x08,
184 TRACE_FLAG_SOFTIRQ
= 0x10,
185 TRACE_FLAG_CONT
= 0x20,
188 #define TRACE_BUF_SIZE 1024
191 * The CPU trace array - it consists of thousands of trace entries
192 * plus some other descriptor data: (for example which task started
195 struct trace_array_cpu
{
198 /* these fields get copied into max-trace: */
199 unsigned long trace_idx
;
200 unsigned long overrun
;
201 unsigned long saved_latency
;
202 unsigned long critical_start
;
203 unsigned long critical_end
;
204 unsigned long critical_sequence
;
206 unsigned long policy
;
207 unsigned long rt_priority
;
208 cycle_t preempt_timestamp
;
211 char comm
[TASK_COMM_LEN
];
214 struct trace_iterator
;
217 * The trace array - an array of per-CPU trace arrays. This is the
218 * highest level data structure that individual tracers deal with.
219 * They have on/off state as well:
222 struct ring_buffer
*buffer
;
223 unsigned long entries
;
226 struct task_struct
*waiter
;
227 struct trace_array_cpu
*data
[NR_CPUS
];
230 #define FTRACE_CMP_TYPE(var, type) \
231 __builtin_types_compatible_p(typeof(var), type *)
234 #define IF_ASSIGN(var, entry, etype, id) \
235 if (FTRACE_CMP_TYPE(var, etype)) { \
236 var = (typeof(var))(entry); \
237 WARN_ON(id && (entry)->type != id); \
241 /* Will cause compile errors if type is not found. */
242 extern void __ftrace_bad_type(void);
245 * The trace_assign_type is a verifier that the entry type is
246 * the same as the type being assigned. To add new types simply
247 * add a line with the following format:
249 * IF_ASSIGN(var, ent, type, id);
251 * Where "type" is the trace type that includes the trace_entry
252 * as the "ent" item. And "id" is the trace identifier that is
253 * used in the trace_type enum.
255 * If the type can have more than one id, then use zero.
257 #define trace_assign_type(var, ent) \
259 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
260 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
261 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
262 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
263 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
264 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
265 IF_ASSIGN(var, ent, struct special_entry, 0); \
266 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
268 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
270 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
271 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
272 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
273 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
274 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
275 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
276 __ftrace_bad_type(); \
279 /* Return values for print_line callback */
281 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
282 TRACE_TYPE_HANDLED
= 1,
283 TRACE_TYPE_UNHANDLED
= 2 /* Relay to other output functions */
288 * An option specific to a tracer. This is a boolean value.
289 * The bit is the bit index that sets its value on the
290 * flags value in struct tracer_flags.
293 const char *name
; /* Will appear on the trace_options file */
294 u32 bit
; /* Mask assigned in val field in tracer_flags */
298 * The set of specific options for a tracer. Your tracer
299 * have to set the initial value of the flags val.
301 struct tracer_flags
{
303 struct tracer_opt
*opts
;
306 /* Makes more easy to define a tracer opt */
307 #define TRACER_OPT(s, b) .name = #s, .bit = b
310 * A specific tracer, represented by methods that operate on a trace array:
314 /* Your tracer should raise a warning if init fails */
315 int (*init
)(struct trace_array
*tr
);
316 void (*reset
)(struct trace_array
*tr
);
317 void (*start
)(struct trace_array
*tr
);
318 void (*stop
)(struct trace_array
*tr
);
319 void (*open
)(struct trace_iterator
*iter
);
320 void (*pipe_open
)(struct trace_iterator
*iter
);
321 void (*close
)(struct trace_iterator
*iter
);
322 ssize_t (*read
)(struct trace_iterator
*iter
,
323 struct file
*filp
, char __user
*ubuf
,
324 size_t cnt
, loff_t
*ppos
);
325 #ifdef CONFIG_FTRACE_STARTUP_TEST
326 int (*selftest
)(struct tracer
*trace
,
327 struct trace_array
*tr
);
329 void (*print_header
)(struct seq_file
*m
);
330 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
331 /* If you handled the flag setting, return 0 */
332 int (*set_flag
)(u32 old_flags
, u32 bit
, int set
);
335 struct tracer_flags
*flags
;
339 unsigned char buffer
[PAGE_SIZE
];
341 unsigned int readpos
;
345 * Trace iterator - used by printout routines who present trace
346 * results to users and which routines might sleep, etc:
348 struct trace_iterator
{
349 struct trace_array
*tr
;
350 struct tracer
*trace
;
352 struct ring_buffer_iter
*buffer_iter
[NR_CPUS
];
354 /* The below is zeroed out in pipe_read */
355 struct trace_seq seq
;
356 struct trace_entry
*ent
;
360 unsigned long iter_flags
;
367 int tracing_is_enabled(void);
368 void trace_wake_up(void);
369 void tracing_reset(struct trace_array
*tr
, int cpu
);
370 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
371 struct dentry
*tracing_init_dentry(void);
372 void init_tracer_sysprof_debugfs(struct dentry
*d_tracer
);
374 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
375 struct trace_array_cpu
*data
);
376 void tracing_generic_entry_update(struct trace_entry
*entry
,
380 void ftrace(struct trace_array
*tr
,
381 struct trace_array_cpu
*data
,
383 unsigned long parent_ip
,
384 unsigned long flags
, int pc
);
385 void tracing_sched_switch_trace(struct trace_array
*tr
,
386 struct trace_array_cpu
*data
,
387 struct task_struct
*prev
,
388 struct task_struct
*next
,
389 unsigned long flags
, int pc
);
390 void tracing_record_cmdline(struct task_struct
*tsk
);
392 void tracing_sched_wakeup_trace(struct trace_array
*tr
,
393 struct trace_array_cpu
*data
,
394 struct task_struct
*wakee
,
395 struct task_struct
*cur
,
396 unsigned long flags
, int pc
);
397 void trace_special(struct trace_array
*tr
,
398 struct trace_array_cpu
*data
,
401 unsigned long arg3
, int pc
);
402 void trace_function(struct trace_array
*tr
,
403 struct trace_array_cpu
*data
,
405 unsigned long parent_ip
,
406 unsigned long flags
, int pc
);
408 trace_function_return(struct ftrace_retfunc
*trace
);
410 void trace_bts(struct trace_array
*tr
,
414 void tracing_start_cmdline_record(void);
415 void tracing_stop_cmdline_record(void);
416 void tracing_sched_switch_assign_trace(struct trace_array
*tr
);
417 void tracing_stop_sched_switch_record(void);
418 void tracing_start_sched_switch_record(void);
419 int register_tracer(struct tracer
*type
);
420 void unregister_tracer(struct tracer
*type
);
422 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
424 extern unsigned long tracing_max_latency
;
425 extern unsigned long tracing_thresh
;
427 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
428 void update_max_tr_single(struct trace_array
*tr
,
429 struct task_struct
*tsk
, int cpu
);
431 extern cycle_t
ftrace_now(int cpu
);
433 #ifdef CONFIG_FUNCTION_TRACER
434 void tracing_start_function_trace(void);
435 void tracing_stop_function_trace(void);
437 # define tracing_start_function_trace() do { } while (0)
438 # define tracing_stop_function_trace() do { } while (0)
441 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
443 (*tracer_switch_func_t
)(void *private,
445 struct task_struct
*prev
,
446 struct task_struct
*next
);
448 struct tracer_switch_ops
{
449 tracer_switch_func_t func
;
451 struct tracer_switch_ops
*next
;
454 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
456 #ifdef CONFIG_DYNAMIC_FTRACE
457 extern unsigned long ftrace_update_tot_cnt
;
458 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
459 extern int DYN_FTRACE_TEST_NAME(void);
462 #ifdef CONFIG_FTRACE_STARTUP_TEST
463 extern int trace_selftest_startup_function(struct tracer
*trace
,
464 struct trace_array
*tr
);
465 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
466 struct trace_array
*tr
);
467 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
468 struct trace_array
*tr
);
469 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
470 struct trace_array
*tr
);
471 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
472 struct trace_array
*tr
);
473 extern int trace_selftest_startup_nop(struct tracer
*trace
,
474 struct trace_array
*tr
);
475 extern int trace_selftest_startup_sched_switch(struct tracer
*trace
,
476 struct trace_array
*tr
);
477 extern int trace_selftest_startup_sysprof(struct tracer
*trace
,
478 struct trace_array
*tr
);
479 extern int trace_selftest_startup_branch(struct tracer
*trace
,
480 struct trace_array
*tr
);
481 #endif /* CONFIG_FTRACE_STARTUP_TEST */
483 extern void *head_page(struct trace_array_cpu
*data
);
484 extern int trace_seq_printf(struct trace_seq
*s
, const char *fmt
, ...);
485 extern void trace_seq_print_cont(struct trace_seq
*s
,
486 struct trace_iterator
*iter
);
489 seq_print_ip_sym(struct trace_seq
*s
, unsigned long ip
,
490 unsigned long sym_flags
);
491 extern ssize_t
trace_seq_to_user(struct trace_seq
*s
, char __user
*ubuf
,
493 extern long ns2usecs(cycle_t nsec
);
494 extern int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
);
496 extern unsigned long trace_flags
;
498 /* Standard output formatting function used for function return traces */
499 #ifdef CONFIG_FUNCTION_RET_TRACER
500 extern enum print_line_t
print_return_function(struct trace_iterator
*iter
);
502 static inline enum print_line_t
503 print_return_function(struct trace_iterator
*iter
)
505 return TRACE_TYPE_UNHANDLED
;
510 * trace_iterator_flags is an enumeration that defines bit
511 * positions into trace_flags that controls the output.
513 * NOTE: These bits must match the trace_options array in
516 enum trace_iterator_flags
{
517 TRACE_ITER_PRINT_PARENT
= 0x01,
518 TRACE_ITER_SYM_OFFSET
= 0x02,
519 TRACE_ITER_SYM_ADDR
= 0x04,
520 TRACE_ITER_VERBOSE
= 0x08,
521 TRACE_ITER_RAW
= 0x10,
522 TRACE_ITER_HEX
= 0x20,
523 TRACE_ITER_BIN
= 0x40,
524 TRACE_ITER_BLOCK
= 0x80,
525 TRACE_ITER_STACKTRACE
= 0x100,
526 TRACE_ITER_SCHED_TREE
= 0x200,
527 TRACE_ITER_PRINTK
= 0x400,
528 TRACE_ITER_PREEMPTONLY
= 0x800,
529 TRACE_ITER_BRANCH
= 0x1000,
530 TRACE_ITER_ANNOTATE
= 0x2000,
531 TRACE_ITER_USERSTACKTRACE
= 0x4000,
532 TRACE_ITER_SYM_USEROBJ
= 0x8000
536 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
537 * control the output of kernel symbols.
539 #define TRACE_ITER_SYM_MASK \
540 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
542 extern struct tracer nop_trace
;
545 * ftrace_preempt_disable - disable preemption scheduler safe
547 * When tracing can happen inside the scheduler, there exists
548 * cases that the tracing might happen before the need_resched
549 * flag is checked. If this happens and the tracer calls
550 * preempt_enable (after a disable), a schedule might take place
551 * causing an infinite recursion.
553 * To prevent this, we read the need_recshed flag before
554 * disabling preemption. When we want to enable preemption we
555 * check the flag, if it is set, then we call preempt_enable_no_resched.
556 * Otherwise, we call preempt_enable.
558 * The rational for doing the above is that if need resched is set
559 * and we have yet to reschedule, we are either in an atomic location
560 * (where we do not need to check for scheduling) or we are inside
561 * the scheduler and do not want to resched.
563 static inline int ftrace_preempt_disable(void)
567 resched
= need_resched();
568 preempt_disable_notrace();
574 * ftrace_preempt_enable - enable preemption scheduler safe
575 * @resched: the return value from ftrace_preempt_disable
577 * This is a scheduler safe way to enable preemption and not miss
578 * any preemption checks. The disabled saved the state of preemption.
579 * If resched is set, then we were either inside an atomic or
580 * are inside the scheduler (we would have already scheduled
581 * otherwise). In this case, we do not want to call normal
582 * preempt_enable, but preempt_enable_no_resched instead.
584 static inline void ftrace_preempt_enable(int resched
)
587 preempt_enable_no_resched_notrace();
589 preempt_enable_notrace();
592 #ifdef CONFIG_BRANCH_TRACER
593 extern int enable_branch_tracing(struct trace_array
*tr
);
594 extern void disable_branch_tracing(void);
595 static inline int trace_branch_enable(struct trace_array
*tr
)
597 if (trace_flags
& TRACE_ITER_BRANCH
)
598 return enable_branch_tracing(tr
);
601 static inline void trace_branch_disable(void)
603 /* due to races, always disable */
604 disable_branch_tracing();
607 static inline int trace_branch_enable(struct trace_array
*tr
)
611 static inline void trace_branch_disable(void)
614 #endif /* CONFIG_BRANCH_TRACER */
616 #endif /* _LINUX_KERNEL_TRACE_H */