2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded
;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running
;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled
;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator
*tracepoint_print_iter
;
70 int tracepoint_printk
;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt
[] = {
77 static struct tracer_flags dummy_tracer_flags
= {
79 .opts
= dummy_tracer_opt
83 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
93 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
101 static int tracing_disabled
= 1;
103 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
105 cpumask_var_t __read_mostly tracing_buffer_mask
;
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
123 enum ftrace_dump_mode ftrace_dump_on_oops
;
125 /* When set, tracing will stop when a WARN*() is hit */
126 int __disable_trace_on_warning
;
128 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
129 /* Map of enums to their values, for "enum_map" file */
130 struct trace_enum_map_head
{
132 unsigned long length
;
135 union trace_enum_map_item
;
137 struct trace_enum_map_tail
{
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
142 union trace_enum_map_item
*next
;
143 const char *end
; /* points to NULL */
146 static DEFINE_MUTEX(trace_enum_mutex
);
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
155 union trace_enum_map_item
{
156 struct trace_enum_map map
;
157 struct trace_enum_map_head head
;
158 struct trace_enum_map_tail tail
;
161 static union trace_enum_map_item
*trace_enum_maps
;
162 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
164 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
166 #define MAX_TRACER_SIZE 100
167 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
168 static char *default_bootup_tracer
;
170 static bool allocate_snapshot
;
172 static int __init
set_cmdline_ftrace(char *str
)
174 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
175 default_bootup_tracer
= bootup_tracer_buf
;
176 /* We are using ftrace early, expand it */
177 ring_buffer_expanded
= true;
180 __setup("ftrace=", set_cmdline_ftrace
);
182 static int __init
set_ftrace_dump_on_oops(char *str
)
184 if (*str
++ != '=' || !*str
) {
185 ftrace_dump_on_oops
= DUMP_ALL
;
189 if (!strcmp("orig_cpu", str
)) {
190 ftrace_dump_on_oops
= DUMP_ORIG
;
196 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
198 static int __init
stop_trace_on_warning(char *str
)
200 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
201 __disable_trace_on_warning
= 1;
204 __setup("traceoff_on_warning", stop_trace_on_warning
);
206 static int __init
boot_alloc_snapshot(char *str
)
208 allocate_snapshot
= true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded
= true;
213 __setup("alloc_snapshot", boot_alloc_snapshot
);
216 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
218 static int __init
set_trace_boot_options(char *str
)
220 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
223 __setup("trace_options=", set_trace_boot_options
);
225 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
226 static char *trace_boot_clock __initdata
;
228 static int __init
set_trace_boot_clock(char *str
)
230 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
231 trace_boot_clock
= trace_boot_clock_buf
;
234 __setup("trace_clock=", set_trace_boot_clock
);
236 static int __init
set_tracepoint_printk(char *str
)
238 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
239 tracepoint_printk
= 1;
242 __setup("tp_printk", set_tracepoint_printk
);
244 unsigned long long ns2usecs(cycle_t nsec
)
251 /* trace_flags holds trace_options default values */
252 #define TRACE_DEFAULT_FLAGS \
253 (FUNCTION_DEFAULT_FLAGS | \
254 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
255 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
256 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
257 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
259 /* trace_options that are only supported by global_trace */
260 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
261 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
265 * The global_trace is the descriptor that holds the tracing
266 * buffers for the live tracing. For each CPU, it contains
267 * a link list of pages that will store trace entries. The
268 * page descriptor of the pages in the memory is used to hold
269 * the link list by linking the lru item in the page descriptor
270 * to each of the pages in the buffer per CPU.
272 * For each active CPU there is a data field that holds the
273 * pages for the buffer for that CPU. Each CPU has the same number
274 * of pages allocated for its buffer.
276 static struct trace_array global_trace
= {
277 .trace_flags
= TRACE_DEFAULT_FLAGS
,
280 LIST_HEAD(ftrace_trace_arrays
);
282 int trace_array_get(struct trace_array
*this_tr
)
284 struct trace_array
*tr
;
287 mutex_lock(&trace_types_lock
);
288 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
295 mutex_unlock(&trace_types_lock
);
300 static void __trace_array_put(struct trace_array
*this_tr
)
302 WARN_ON(!this_tr
->ref
);
306 void trace_array_put(struct trace_array
*this_tr
)
308 mutex_lock(&trace_types_lock
);
309 __trace_array_put(this_tr
);
310 mutex_unlock(&trace_types_lock
);
313 int filter_check_discard(struct trace_event_file
*file
, void *rec
,
314 struct ring_buffer
*buffer
,
315 struct ring_buffer_event
*event
)
317 if (unlikely(file
->flags
& EVENT_FILE_FL_FILTERED
) &&
318 !filter_match_preds(file
->filter
, rec
)) {
319 ring_buffer_discard_commit(buffer
, event
);
325 EXPORT_SYMBOL_GPL(filter_check_discard
);
327 int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
328 struct ring_buffer
*buffer
,
329 struct ring_buffer_event
*event
)
331 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
332 !filter_match_preds(call
->filter
, rec
)) {
333 ring_buffer_discard_commit(buffer
, event
);
339 EXPORT_SYMBOL_GPL(call_filter_check_discard
);
341 static cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
345 /* Early boot up does not have a buffer yet */
347 return trace_clock_local();
349 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
350 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
355 cycle_t
ftrace_now(int cpu
)
357 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
361 * tracing_is_enabled - Show if global_trace has been disabled
363 * Shows if the global trace has been enabled or not. It uses the
364 * mirror flag "buffer_disabled" to be used in fast paths such as for
365 * the irqsoff tracer. But it may be inaccurate due to races. If you
366 * need to know the accurate state, use tracing_is_on() which is a little
367 * slower, but accurate.
369 int tracing_is_enabled(void)
372 * For quick access (irqsoff uses this in fast path), just
373 * return the mirror variable of the state of the ring buffer.
374 * It's a little racy, but we don't really care.
377 return !global_trace
.buffer_disabled
;
381 * trace_buf_size is the size in bytes that is allocated
382 * for a buffer. Note, the number of bytes is always rounded
385 * This number is purposely set to a low number of 16384.
386 * If the dump on oops happens, it will be much appreciated
387 * to not have to wait for all that output. Anyway this can be
388 * boot time and run time configurable.
390 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
392 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
394 /* trace_types holds a link list of available tracers. */
395 static struct tracer
*trace_types __read_mostly
;
398 * trace_types_lock is used to protect the trace_types list.
400 DEFINE_MUTEX(trace_types_lock
);
403 * serialize the access of the ring buffer
405 * ring buffer serializes readers, but it is low level protection.
406 * The validity of the events (which returns by ring_buffer_peek() ..etc)
407 * are not protected by ring buffer.
409 * The content of events may become garbage if we allow other process consumes
410 * these events concurrently:
411 * A) the page of the consumed events may become a normal page
412 * (not reader page) in ring buffer, and this page will be rewrited
413 * by events producer.
414 * B) The page of the consumed events may become a page for splice_read,
415 * and this page will be returned to system.
417 * These primitives allow multi process access to different cpu ring buffer
420 * These primitives don't distinguish read-only and read-consume access.
421 * Multi read-only access are also serialized.
425 static DECLARE_RWSEM(all_cpu_access_lock
);
426 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
428 static inline void trace_access_lock(int cpu
)
430 if (cpu
== RING_BUFFER_ALL_CPUS
) {
431 /* gain it for accessing the whole ring buffer. */
432 down_write(&all_cpu_access_lock
);
434 /* gain it for accessing a cpu ring buffer. */
436 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
437 down_read(&all_cpu_access_lock
);
439 /* Secondly block other access to this @cpu ring buffer. */
440 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
444 static inline void trace_access_unlock(int cpu
)
446 if (cpu
== RING_BUFFER_ALL_CPUS
) {
447 up_write(&all_cpu_access_lock
);
449 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
450 up_read(&all_cpu_access_lock
);
454 static inline void trace_access_lock_init(void)
458 for_each_possible_cpu(cpu
)
459 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
464 static DEFINE_MUTEX(access_lock
);
466 static inline void trace_access_lock(int cpu
)
469 mutex_lock(&access_lock
);
472 static inline void trace_access_unlock(int cpu
)
475 mutex_unlock(&access_lock
);
478 static inline void trace_access_lock_init(void)
484 #ifdef CONFIG_STACKTRACE
485 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
487 int skip
, int pc
, struct pt_regs
*regs
);
488 static inline void ftrace_trace_stack(struct trace_array
*tr
,
489 struct ring_buffer
*buffer
,
491 int skip
, int pc
, struct pt_regs
*regs
);
494 static inline void __ftrace_trace_stack(struct ring_buffer
*buffer
,
496 int skip
, int pc
, struct pt_regs
*regs
)
499 static inline void ftrace_trace_stack(struct trace_array
*tr
,
500 struct ring_buffer
*buffer
,
502 int skip
, int pc
, struct pt_regs
*regs
)
508 static void tracer_tracing_on(struct trace_array
*tr
)
510 if (tr
->trace_buffer
.buffer
)
511 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
513 * This flag is looked at when buffers haven't been allocated
514 * yet, or by some tracers (like irqsoff), that just want to
515 * know if the ring buffer has been disabled, but it can handle
516 * races of where it gets disabled but we still do a record.
517 * As the check is in the fast path of the tracers, it is more
518 * important to be fast than accurate.
520 tr
->buffer_disabled
= 0;
521 /* Make the flag seen by readers */
526 * tracing_on - enable tracing buffers
528 * This function enables tracing buffers that may have been
529 * disabled with tracing_off.
531 void tracing_on(void)
533 tracer_tracing_on(&global_trace
);
535 EXPORT_SYMBOL_GPL(tracing_on
);
538 * __trace_puts - write a constant string into the trace buffer.
539 * @ip: The address of the caller
540 * @str: The constant string to write
541 * @size: The size of the string.
543 int __trace_puts(unsigned long ip
, const char *str
, int size
)
545 struct ring_buffer_event
*event
;
546 struct ring_buffer
*buffer
;
547 struct print_entry
*entry
;
548 unsigned long irq_flags
;
552 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
555 pc
= preempt_count();
557 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
560 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
562 local_save_flags(irq_flags
);
563 buffer
= global_trace
.trace_buffer
.buffer
;
564 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
569 entry
= ring_buffer_event_data(event
);
572 memcpy(&entry
->buf
, str
, size
);
574 /* Add a newline if necessary */
575 if (entry
->buf
[size
- 1] != '\n') {
576 entry
->buf
[size
] = '\n';
577 entry
->buf
[size
+ 1] = '\0';
579 entry
->buf
[size
] = '\0';
581 __buffer_unlock_commit(buffer
, event
);
582 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
586 EXPORT_SYMBOL_GPL(__trace_puts
);
589 * __trace_bputs - write the pointer to a constant string into trace buffer
590 * @ip: The address of the caller
591 * @str: The constant string to write to the buffer to
593 int __trace_bputs(unsigned long ip
, const char *str
)
595 struct ring_buffer_event
*event
;
596 struct ring_buffer
*buffer
;
597 struct bputs_entry
*entry
;
598 unsigned long irq_flags
;
599 int size
= sizeof(struct bputs_entry
);
602 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
605 pc
= preempt_count();
607 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
610 local_save_flags(irq_flags
);
611 buffer
= global_trace
.trace_buffer
.buffer
;
612 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
617 entry
= ring_buffer_event_data(event
);
621 __buffer_unlock_commit(buffer
, event
);
622 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
626 EXPORT_SYMBOL_GPL(__trace_bputs
);
628 #ifdef CONFIG_TRACER_SNAPSHOT
630 * trace_snapshot - take a snapshot of the current buffer.
632 * This causes a swap between the snapshot buffer and the current live
633 * tracing buffer. You can use this to take snapshots of the live
634 * trace when some condition is triggered, but continue to trace.
636 * Note, make sure to allocate the snapshot with either
637 * a tracing_snapshot_alloc(), or by doing it manually
638 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
640 * If the snapshot buffer is not allocated, it will stop tracing.
641 * Basically making a permanent snapshot.
643 void tracing_snapshot(void)
645 struct trace_array
*tr
= &global_trace
;
646 struct tracer
*tracer
= tr
->current_trace
;
650 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
651 internal_trace_puts("*** snapshot is being ignored ***\n");
655 if (!tr
->allocated_snapshot
) {
656 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
657 internal_trace_puts("*** stopping trace here! ***\n");
662 /* Note, snapshot can not be used when the tracer uses it */
663 if (tracer
->use_max_tr
) {
664 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
665 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
669 local_irq_save(flags
);
670 update_max_tr(tr
, current
, smp_processor_id());
671 local_irq_restore(flags
);
673 EXPORT_SYMBOL_GPL(tracing_snapshot
);
675 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
676 struct trace_buffer
*size_buf
, int cpu_id
);
677 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
679 static int alloc_snapshot(struct trace_array
*tr
)
683 if (!tr
->allocated_snapshot
) {
685 /* allocate spare buffer */
686 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
687 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
691 tr
->allocated_snapshot
= true;
697 static void free_snapshot(struct trace_array
*tr
)
700 * We don't free the ring buffer. instead, resize it because
701 * The max_tr ring buffer has some state (e.g. ring->clock) and
702 * we want preserve it.
704 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
705 set_buffer_entries(&tr
->max_buffer
, 1);
706 tracing_reset_online_cpus(&tr
->max_buffer
);
707 tr
->allocated_snapshot
= false;
711 * tracing_alloc_snapshot - allocate snapshot buffer.
713 * This only allocates the snapshot buffer if it isn't already
714 * allocated - it doesn't also take a snapshot.
716 * This is meant to be used in cases where the snapshot buffer needs
717 * to be set up for events that can't sleep but need to be able to
718 * trigger a snapshot.
720 int tracing_alloc_snapshot(void)
722 struct trace_array
*tr
= &global_trace
;
725 ret
= alloc_snapshot(tr
);
730 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
733 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
735 * This is similar to trace_snapshot(), but it will allocate the
736 * snapshot buffer if it isn't already allocated. Use this only
737 * where it is safe to sleep, as the allocation may sleep.
739 * This causes a swap between the snapshot buffer and the current live
740 * tracing buffer. You can use this to take snapshots of the live
741 * trace when some condition is triggered, but continue to trace.
743 void tracing_snapshot_alloc(void)
747 ret
= tracing_alloc_snapshot();
753 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
755 void tracing_snapshot(void)
757 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
759 EXPORT_SYMBOL_GPL(tracing_snapshot
);
760 int tracing_alloc_snapshot(void)
762 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
765 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
766 void tracing_snapshot_alloc(void)
771 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
772 #endif /* CONFIG_TRACER_SNAPSHOT */
774 static void tracer_tracing_off(struct trace_array
*tr
)
776 if (tr
->trace_buffer
.buffer
)
777 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
779 * This flag is looked at when buffers haven't been allocated
780 * yet, or by some tracers (like irqsoff), that just want to
781 * know if the ring buffer has been disabled, but it can handle
782 * races of where it gets disabled but we still do a record.
783 * As the check is in the fast path of the tracers, it is more
784 * important to be fast than accurate.
786 tr
->buffer_disabled
= 1;
787 /* Make the flag seen by readers */
792 * tracing_off - turn off tracing buffers
794 * This function stops the tracing buffers from recording data.
795 * It does not disable any overhead the tracers themselves may
796 * be causing. This function simply causes all recording to
797 * the ring buffers to fail.
799 void tracing_off(void)
801 tracer_tracing_off(&global_trace
);
803 EXPORT_SYMBOL_GPL(tracing_off
);
805 void disable_trace_on_warning(void)
807 if (__disable_trace_on_warning
)
812 * tracer_tracing_is_on - show real state of ring buffer enabled
813 * @tr : the trace array to know if ring buffer is enabled
815 * Shows real state of the ring buffer if it is enabled or not.
817 static int tracer_tracing_is_on(struct trace_array
*tr
)
819 if (tr
->trace_buffer
.buffer
)
820 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
821 return !tr
->buffer_disabled
;
825 * tracing_is_on - show state of ring buffers enabled
827 int tracing_is_on(void)
829 return tracer_tracing_is_on(&global_trace
);
831 EXPORT_SYMBOL_GPL(tracing_is_on
);
833 static int __init
set_buf_size(char *str
)
835 unsigned long buf_size
;
839 buf_size
= memparse(str
, &str
);
840 /* nr_entries can not be zero */
843 trace_buf_size
= buf_size
;
846 __setup("trace_buf_size=", set_buf_size
);
848 static int __init
set_tracing_thresh(char *str
)
850 unsigned long threshold
;
855 ret
= kstrtoul(str
, 0, &threshold
);
858 tracing_thresh
= threshold
* 1000;
861 __setup("tracing_thresh=", set_tracing_thresh
);
863 unsigned long nsecs_to_usecs(unsigned long nsecs
)
869 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
870 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
871 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
872 * of strings in the order that the enums were defined.
877 /* These must match the bit postions in trace_iterator_flags */
878 static const char *trace_options
[] = {
886 int in_ns
; /* is this clock in nanoseconds? */
888 { trace_clock_local
, "local", 1 },
889 { trace_clock_global
, "global", 1 },
890 { trace_clock_counter
, "counter", 0 },
891 { trace_clock_jiffies
, "uptime", 0 },
892 { trace_clock
, "perf", 1 },
893 { ktime_get_mono_fast_ns
, "mono", 1 },
894 { ktime_get_raw_fast_ns
, "mono_raw", 1 },
899 * trace_parser_get_init - gets the buffer for trace parser
901 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
903 memset(parser
, 0, sizeof(*parser
));
905 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
914 * trace_parser_put - frees the buffer for trace parser
916 void trace_parser_put(struct trace_parser
*parser
)
918 kfree(parser
->buffer
);
922 * trace_get_user - reads the user input string separated by space
923 * (matched by isspace(ch))
925 * For each string found the 'struct trace_parser' is updated,
926 * and the function returns.
928 * Returns number of bytes read.
930 * See kernel/trace/trace.h for 'struct trace_parser' details.
932 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
933 size_t cnt
, loff_t
*ppos
)
940 trace_parser_clear(parser
);
942 ret
= get_user(ch
, ubuf
++);
950 * The parser is not finished with the last write,
951 * continue reading the user input without skipping spaces.
954 /* skip white space */
955 while (cnt
&& isspace(ch
)) {
956 ret
= get_user(ch
, ubuf
++);
963 /* only spaces were written */
973 /* read the non-space input */
974 while (cnt
&& !isspace(ch
)) {
975 if (parser
->idx
< parser
->size
- 1)
976 parser
->buffer
[parser
->idx
++] = ch
;
981 ret
= get_user(ch
, ubuf
++);
988 /* We either got finished input or we have to wait for another call. */
990 parser
->buffer
[parser
->idx
] = 0;
991 parser
->cont
= false;
992 } else if (parser
->idx
< parser
->size
- 1) {
994 parser
->buffer
[parser
->idx
++] = ch
;
1007 /* TODO add a seq_buf_to_buffer() */
1008 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
1012 if (trace_seq_used(s
) <= s
->seq
.readpos
)
1015 len
= trace_seq_used(s
) - s
->seq
.readpos
;
1018 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
1020 s
->seq
.readpos
+= cnt
;
1024 unsigned long __read_mostly tracing_thresh
;
1026 #ifdef CONFIG_TRACER_MAX_TRACE
1028 * Copy the new maximum trace into the separate maximum-trace
1029 * structure. (this way the maximum trace is permanently saved,
1030 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1033 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1035 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
1036 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
1037 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
1038 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
1041 max_buf
->time_start
= data
->preempt_timestamp
;
1043 max_data
->saved_latency
= tr
->max_latency
;
1044 max_data
->critical_start
= data
->critical_start
;
1045 max_data
->critical_end
= data
->critical_end
;
1047 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
1048 max_data
->pid
= tsk
->pid
;
1050 * If tsk == current, then use current_uid(), as that does not use
1051 * RCU. The irq tracer can be called out of RCU scope.
1054 max_data
->uid
= current_uid();
1056 max_data
->uid
= task_uid(tsk
);
1058 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1059 max_data
->policy
= tsk
->policy
;
1060 max_data
->rt_priority
= tsk
->rt_priority
;
1062 /* record this tasks comm */
1063 tracing_record_cmdline(tsk
);
1067 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1069 * @tsk: the task with the latency
1070 * @cpu: The cpu that initiated the trace.
1072 * Flip the buffers between the @tr and the max_tr and record information
1073 * about which task was the cause of this latency.
1076 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1078 struct ring_buffer
*buf
;
1083 WARN_ON_ONCE(!irqs_disabled());
1085 if (!tr
->allocated_snapshot
) {
1086 /* Only the nop tracer should hit this when disabling */
1087 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1091 arch_spin_lock(&tr
->max_lock
);
1093 buf
= tr
->trace_buffer
.buffer
;
1094 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1095 tr
->max_buffer
.buffer
= buf
;
1097 __update_max_tr(tr
, tsk
, cpu
);
1098 arch_spin_unlock(&tr
->max_lock
);
1102 * update_max_tr_single - only copy one trace over, and reset the rest
1104 * @tsk - task with the latency
1105 * @cpu - the cpu of the buffer to copy.
1107 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1110 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1117 WARN_ON_ONCE(!irqs_disabled());
1118 if (!tr
->allocated_snapshot
) {
1119 /* Only the nop tracer should hit this when disabling */
1120 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1124 arch_spin_lock(&tr
->max_lock
);
1126 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1128 if (ret
== -EBUSY
) {
1130 * We failed to swap the buffer due to a commit taking
1131 * place on this CPU. We fail to record, but we reset
1132 * the max trace buffer (no one writes directly to it)
1133 * and flag that it failed.
1135 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1136 "Failed to swap buffers due to commit in progress\n");
1139 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1141 __update_max_tr(tr
, tsk
, cpu
);
1142 arch_spin_unlock(&tr
->max_lock
);
1144 #endif /* CONFIG_TRACER_MAX_TRACE */
1146 static int wait_on_pipe(struct trace_iterator
*iter
, bool full
)
1148 /* Iterators are static, they should be filled or empty */
1149 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1152 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
1156 #ifdef CONFIG_FTRACE_STARTUP_TEST
1157 static int run_tracer_selftest(struct tracer
*type
)
1159 struct trace_array
*tr
= &global_trace
;
1160 struct tracer
*saved_tracer
= tr
->current_trace
;
1163 if (!type
->selftest
|| tracing_selftest_disabled
)
1167 * Run a selftest on this tracer.
1168 * Here we reset the trace buffer, and set the current
1169 * tracer to be this tracer. The tracer can then run some
1170 * internal tracing to verify that everything is in order.
1171 * If we fail, we do not register this tracer.
1173 tracing_reset_online_cpus(&tr
->trace_buffer
);
1175 tr
->current_trace
= type
;
1177 #ifdef CONFIG_TRACER_MAX_TRACE
1178 if (type
->use_max_tr
) {
1179 /* If we expanded the buffers, make sure the max is expanded too */
1180 if (ring_buffer_expanded
)
1181 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1182 RING_BUFFER_ALL_CPUS
);
1183 tr
->allocated_snapshot
= true;
1187 /* the test is responsible for initializing and enabling */
1188 pr_info("Testing tracer %s: ", type
->name
);
1189 ret
= type
->selftest(type
, tr
);
1190 /* the test is responsible for resetting too */
1191 tr
->current_trace
= saved_tracer
;
1193 printk(KERN_CONT
"FAILED!\n");
1194 /* Add the warning after printing 'FAILED' */
1198 /* Only reset on passing, to avoid touching corrupted buffers */
1199 tracing_reset_online_cpus(&tr
->trace_buffer
);
1201 #ifdef CONFIG_TRACER_MAX_TRACE
1202 if (type
->use_max_tr
) {
1203 tr
->allocated_snapshot
= false;
1205 /* Shrink the max buffer again */
1206 if (ring_buffer_expanded
)
1207 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1208 RING_BUFFER_ALL_CPUS
);
1212 printk(KERN_CONT
"PASSED\n");
1216 static inline int run_tracer_selftest(struct tracer
*type
)
1220 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1222 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
);
1224 static void __init
apply_trace_boot_options(void);
1227 * register_tracer - register a tracer with the ftrace system.
1228 * @type - the plugin for the tracer
1230 * Register a new plugin tracer.
1232 int __init
register_tracer(struct tracer
*type
)
1238 pr_info("Tracer must have a name\n");
1242 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1243 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1247 mutex_lock(&trace_types_lock
);
1249 tracing_selftest_running
= true;
1251 for (t
= trace_types
; t
; t
= t
->next
) {
1252 if (strcmp(type
->name
, t
->name
) == 0) {
1254 pr_info("Tracer %s already registered\n",
1261 if (!type
->set_flag
)
1262 type
->set_flag
= &dummy_set_flag
;
1264 type
->flags
= &dummy_tracer_flags
;
1266 if (!type
->flags
->opts
)
1267 type
->flags
->opts
= dummy_tracer_opt
;
1269 ret
= run_tracer_selftest(type
);
1273 type
->next
= trace_types
;
1275 add_tracer_options(&global_trace
, type
);
1278 tracing_selftest_running
= false;
1279 mutex_unlock(&trace_types_lock
);
1281 if (ret
|| !default_bootup_tracer
)
1284 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1287 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1288 /* Do we want this tracer to start on bootup? */
1289 tracing_set_tracer(&global_trace
, type
->name
);
1290 default_bootup_tracer
= NULL
;
1292 apply_trace_boot_options();
1294 /* disable other selftests, since this will break it. */
1295 tracing_selftest_disabled
= true;
1296 #ifdef CONFIG_FTRACE_STARTUP_TEST
1297 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1305 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1307 struct ring_buffer
*buffer
= buf
->buffer
;
1312 ring_buffer_record_disable(buffer
);
1314 /* Make sure all commits have finished */
1315 synchronize_sched();
1316 ring_buffer_reset_cpu(buffer
, cpu
);
1318 ring_buffer_record_enable(buffer
);
1321 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1323 struct ring_buffer
*buffer
= buf
->buffer
;
1329 ring_buffer_record_disable(buffer
);
1331 /* Make sure all commits have finished */
1332 synchronize_sched();
1334 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1336 for_each_online_cpu(cpu
)
1337 ring_buffer_reset_cpu(buffer
, cpu
);
1339 ring_buffer_record_enable(buffer
);
1342 /* Must have trace_types_lock held */
1343 void tracing_reset_all_online_cpus(void)
1345 struct trace_array
*tr
;
1347 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1348 tracing_reset_online_cpus(&tr
->trace_buffer
);
1349 #ifdef CONFIG_TRACER_MAX_TRACE
1350 tracing_reset_online_cpus(&tr
->max_buffer
);
1355 #define SAVED_CMDLINES_DEFAULT 128
1356 #define NO_CMDLINE_MAP UINT_MAX
1357 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1358 struct saved_cmdlines_buffer
{
1359 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1360 unsigned *map_cmdline_to_pid
;
1361 unsigned cmdline_num
;
1363 char *saved_cmdlines
;
1365 static struct saved_cmdlines_buffer
*savedcmd
;
1367 /* temporary disable recording */
1368 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1370 static inline char *get_saved_cmdlines(int idx
)
1372 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1375 static inline void set_cmdline(int idx
, const char *cmdline
)
1377 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1380 static int allocate_cmdlines_buffer(unsigned int val
,
1381 struct saved_cmdlines_buffer
*s
)
1383 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1385 if (!s
->map_cmdline_to_pid
)
1388 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1389 if (!s
->saved_cmdlines
) {
1390 kfree(s
->map_cmdline_to_pid
);
1395 s
->cmdline_num
= val
;
1396 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1397 sizeof(s
->map_pid_to_cmdline
));
1398 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1399 val
* sizeof(*s
->map_cmdline_to_pid
));
1404 static int trace_create_savedcmd(void)
1408 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1412 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1422 int is_tracing_stopped(void)
1424 return global_trace
.stop_count
;
1428 * tracing_start - quick start of the tracer
1430 * If tracing is enabled but was stopped by tracing_stop,
1431 * this will start the tracer back up.
1433 void tracing_start(void)
1435 struct ring_buffer
*buffer
;
1436 unsigned long flags
;
1438 if (tracing_disabled
)
1441 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1442 if (--global_trace
.stop_count
) {
1443 if (global_trace
.stop_count
< 0) {
1444 /* Someone screwed up their debugging */
1446 global_trace
.stop_count
= 0;
1451 /* Prevent the buffers from switching */
1452 arch_spin_lock(&global_trace
.max_lock
);
1454 buffer
= global_trace
.trace_buffer
.buffer
;
1456 ring_buffer_record_enable(buffer
);
1458 #ifdef CONFIG_TRACER_MAX_TRACE
1459 buffer
= global_trace
.max_buffer
.buffer
;
1461 ring_buffer_record_enable(buffer
);
1464 arch_spin_unlock(&global_trace
.max_lock
);
1467 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1470 static void tracing_start_tr(struct trace_array
*tr
)
1472 struct ring_buffer
*buffer
;
1473 unsigned long flags
;
1475 if (tracing_disabled
)
1478 /* If global, we need to also start the max tracer */
1479 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1480 return tracing_start();
1482 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1484 if (--tr
->stop_count
) {
1485 if (tr
->stop_count
< 0) {
1486 /* Someone screwed up their debugging */
1493 buffer
= tr
->trace_buffer
.buffer
;
1495 ring_buffer_record_enable(buffer
);
1498 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1502 * tracing_stop - quick stop of the tracer
1504 * Light weight way to stop tracing. Use in conjunction with
1507 void tracing_stop(void)
1509 struct ring_buffer
*buffer
;
1510 unsigned long flags
;
1512 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1513 if (global_trace
.stop_count
++)
1516 /* Prevent the buffers from switching */
1517 arch_spin_lock(&global_trace
.max_lock
);
1519 buffer
= global_trace
.trace_buffer
.buffer
;
1521 ring_buffer_record_disable(buffer
);
1523 #ifdef CONFIG_TRACER_MAX_TRACE
1524 buffer
= global_trace
.max_buffer
.buffer
;
1526 ring_buffer_record_disable(buffer
);
1529 arch_spin_unlock(&global_trace
.max_lock
);
1532 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1535 static void tracing_stop_tr(struct trace_array
*tr
)
1537 struct ring_buffer
*buffer
;
1538 unsigned long flags
;
1540 /* If global, we need to also stop the max tracer */
1541 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1542 return tracing_stop();
1544 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1545 if (tr
->stop_count
++)
1548 buffer
= tr
->trace_buffer
.buffer
;
1550 ring_buffer_record_disable(buffer
);
1553 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1556 void trace_stop_cmdline_recording(void);
1558 static int trace_save_cmdline(struct task_struct
*tsk
)
1562 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1566 * It's not the end of the world if we don't get
1567 * the lock, but we also don't want to spin
1568 * nor do we want to disable interrupts,
1569 * so if we miss here, then better luck next time.
1571 if (!arch_spin_trylock(&trace_cmdline_lock
))
1574 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1575 if (idx
== NO_CMDLINE_MAP
) {
1576 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1579 * Check whether the cmdline buffer at idx has a pid
1580 * mapped. We are going to overwrite that entry so we
1581 * need to clear the map_pid_to_cmdline. Otherwise we
1582 * would read the new comm for the old pid.
1584 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1585 if (pid
!= NO_CMDLINE_MAP
)
1586 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1588 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1589 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1591 savedcmd
->cmdline_idx
= idx
;
1594 set_cmdline(idx
, tsk
->comm
);
1596 arch_spin_unlock(&trace_cmdline_lock
);
1601 static void __trace_find_cmdline(int pid
, char comm
[])
1606 strcpy(comm
, "<idle>");
1610 if (WARN_ON_ONCE(pid
< 0)) {
1611 strcpy(comm
, "<XXX>");
1615 if (pid
> PID_MAX_DEFAULT
) {
1616 strcpy(comm
, "<...>");
1620 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1621 if (map
!= NO_CMDLINE_MAP
)
1622 strcpy(comm
, get_saved_cmdlines(map
));
1624 strcpy(comm
, "<...>");
1627 void trace_find_cmdline(int pid
, char comm
[])
1630 arch_spin_lock(&trace_cmdline_lock
);
1632 __trace_find_cmdline(pid
, comm
);
1634 arch_spin_unlock(&trace_cmdline_lock
);
1638 void tracing_record_cmdline(struct task_struct
*tsk
)
1640 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1643 if (!__this_cpu_read(trace_cmdline_save
))
1646 if (trace_save_cmdline(tsk
))
1647 __this_cpu_write(trace_cmdline_save
, false);
1651 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1654 struct task_struct
*tsk
= current
;
1656 entry
->preempt_count
= pc
& 0xff;
1657 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1659 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1660 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1662 TRACE_FLAG_IRQS_NOSUPPORT
|
1664 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1665 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1666 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
1667 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
1669 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1671 struct ring_buffer_event
*
1672 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1675 unsigned long flags
, int pc
)
1677 struct ring_buffer_event
*event
;
1679 event
= ring_buffer_lock_reserve(buffer
, len
);
1680 if (event
!= NULL
) {
1681 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1683 tracing_generic_entry_update(ent
, flags
, pc
);
1691 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1693 __this_cpu_write(trace_cmdline_save
, true);
1694 ring_buffer_unlock_commit(buffer
, event
);
1697 void trace_buffer_unlock_commit(struct trace_array
*tr
,
1698 struct ring_buffer
*buffer
,
1699 struct ring_buffer_event
*event
,
1700 unsigned long flags
, int pc
)
1702 __buffer_unlock_commit(buffer
, event
);
1704 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
1705 ftrace_trace_userstack(buffer
, flags
, pc
);
1707 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1709 static struct ring_buffer
*temp_buffer
;
1711 struct ring_buffer_event
*
1712 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1713 struct trace_event_file
*trace_file
,
1714 int type
, unsigned long len
,
1715 unsigned long flags
, int pc
)
1717 struct ring_buffer_event
*entry
;
1719 *current_rb
= trace_file
->tr
->trace_buffer
.buffer
;
1720 entry
= trace_buffer_lock_reserve(*current_rb
,
1721 type
, len
, flags
, pc
);
1723 * If tracing is off, but we have triggers enabled
1724 * we still need to look at the event data. Use the temp_buffer
1725 * to store the trace event for the tigger to use. It's recusive
1726 * safe and will not be recorded anywhere.
1728 if (!entry
&& trace_file
->flags
& EVENT_FILE_FL_TRIGGER_COND
) {
1729 *current_rb
= temp_buffer
;
1730 entry
= trace_buffer_lock_reserve(*current_rb
,
1731 type
, len
, flags
, pc
);
1735 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1737 struct ring_buffer_event
*
1738 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1739 int type
, unsigned long len
,
1740 unsigned long flags
, int pc
)
1742 *current_rb
= global_trace
.trace_buffer
.buffer
;
1743 return trace_buffer_lock_reserve(*current_rb
,
1744 type
, len
, flags
, pc
);
1746 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1748 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
1749 struct ring_buffer
*buffer
,
1750 struct ring_buffer_event
*event
,
1751 unsigned long flags
, int pc
,
1752 struct pt_regs
*regs
)
1754 __buffer_unlock_commit(buffer
, event
);
1756 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, regs
);
1757 ftrace_trace_userstack(buffer
, flags
, pc
);
1759 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1761 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1762 struct ring_buffer_event
*event
)
1764 ring_buffer_discard_commit(buffer
, event
);
1766 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1769 trace_function(struct trace_array
*tr
,
1770 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1773 struct trace_event_call
*call
= &event_function
;
1774 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1775 struct ring_buffer_event
*event
;
1776 struct ftrace_entry
*entry
;
1778 /* If we are reading the ring buffer, don't trace */
1779 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1782 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1786 entry
= ring_buffer_event_data(event
);
1788 entry
->parent_ip
= parent_ip
;
1790 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1791 __buffer_unlock_commit(buffer
, event
);
1794 #ifdef CONFIG_STACKTRACE
1796 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797 struct ftrace_stack
{
1798 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1801 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1802 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1804 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1805 unsigned long flags
,
1806 int skip
, int pc
, struct pt_regs
*regs
)
1808 struct trace_event_call
*call
= &event_kernel_stack
;
1809 struct ring_buffer_event
*event
;
1810 struct stack_entry
*entry
;
1811 struct stack_trace trace
;
1813 int size
= FTRACE_STACK_ENTRIES
;
1815 trace
.nr_entries
= 0;
1819 * Since events can happen in NMIs there's no safe way to
1820 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821 * or NMI comes in, it will just have to use the default
1822 * FTRACE_STACK_SIZE.
1824 preempt_disable_notrace();
1826 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1828 * We don't need any atomic variables, just a barrier.
1829 * If an interrupt comes in, we don't care, because it would
1830 * have exited and put the counter back to what we want.
1831 * We just need a barrier to keep gcc from moving things
1835 if (use_stack
== 1) {
1836 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
1837 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1840 save_stack_trace_regs(regs
, &trace
);
1842 save_stack_trace(&trace
);
1844 if (trace
.nr_entries
> size
)
1845 size
= trace
.nr_entries
;
1847 /* From now on, use_stack is a boolean */
1850 size
*= sizeof(unsigned long);
1852 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1853 sizeof(*entry
) + size
, flags
, pc
);
1856 entry
= ring_buffer_event_data(event
);
1858 memset(&entry
->caller
, 0, size
);
1861 memcpy(&entry
->caller
, trace
.entries
,
1862 trace
.nr_entries
* sizeof(unsigned long));
1864 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1865 trace
.entries
= entry
->caller
;
1867 save_stack_trace_regs(regs
, &trace
);
1869 save_stack_trace(&trace
);
1872 entry
->size
= trace
.nr_entries
;
1874 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1875 __buffer_unlock_commit(buffer
, event
);
1878 /* Again, don't let gcc optimize things here */
1880 __this_cpu_dec(ftrace_stack_reserve
);
1881 preempt_enable_notrace();
1885 static inline void ftrace_trace_stack(struct trace_array
*tr
,
1886 struct ring_buffer
*buffer
,
1887 unsigned long flags
,
1888 int skip
, int pc
, struct pt_regs
*regs
)
1890 if (!(tr
->trace_flags
& TRACE_ITER_STACKTRACE
))
1893 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1896 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1899 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1903 * trace_dump_stack - record a stack back trace in the trace buffer
1904 * @skip: Number of functions to skip (helper handlers)
1906 void trace_dump_stack(int skip
)
1908 unsigned long flags
;
1910 if (tracing_disabled
|| tracing_selftest_running
)
1913 local_save_flags(flags
);
1916 * Skip 3 more, seems to get us at the caller of
1920 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1921 flags
, skip
, preempt_count(), NULL
);
1924 static DEFINE_PER_CPU(int, user_stack_count
);
1927 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1929 struct trace_event_call
*call
= &event_user_stack
;
1930 struct ring_buffer_event
*event
;
1931 struct userstack_entry
*entry
;
1932 struct stack_trace trace
;
1934 if (!(global_trace
.trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1938 * NMIs can not handle page faults, even with fix ups.
1939 * The save user stack can (and often does) fault.
1941 if (unlikely(in_nmi()))
1945 * prevent recursion, since the user stack tracing may
1946 * trigger other kernel events.
1949 if (__this_cpu_read(user_stack_count
))
1952 __this_cpu_inc(user_stack_count
);
1954 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1955 sizeof(*entry
), flags
, pc
);
1957 goto out_drop_count
;
1958 entry
= ring_buffer_event_data(event
);
1960 entry
->tgid
= current
->tgid
;
1961 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1963 trace
.nr_entries
= 0;
1964 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1966 trace
.entries
= entry
->caller
;
1968 save_stack_trace_user(&trace
);
1969 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1970 __buffer_unlock_commit(buffer
, event
);
1973 __this_cpu_dec(user_stack_count
);
1979 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1981 ftrace_trace_userstack(tr
, flags
, preempt_count());
1985 #endif /* CONFIG_STACKTRACE */
1987 /* created for use with alloc_percpu */
1988 struct trace_buffer_struct
{
1989 char buffer
[TRACE_BUF_SIZE
];
1992 static struct trace_buffer_struct
*trace_percpu_buffer
;
1993 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1994 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1995 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2004 static char *get_trace_buf(void)
2006 struct trace_buffer_struct
*percpu_buffer
;
2009 * If we have allocated per cpu buffers, then we do not
2010 * need to do any locking.
2013 percpu_buffer
= trace_percpu_nmi_buffer
;
2015 percpu_buffer
= trace_percpu_irq_buffer
;
2016 else if (in_softirq())
2017 percpu_buffer
= trace_percpu_sirq_buffer
;
2019 percpu_buffer
= trace_percpu_buffer
;
2024 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
2027 static int alloc_percpu_trace_buffer(void)
2029 struct trace_buffer_struct
*buffers
;
2030 struct trace_buffer_struct
*sirq_buffers
;
2031 struct trace_buffer_struct
*irq_buffers
;
2032 struct trace_buffer_struct
*nmi_buffers
;
2034 buffers
= alloc_percpu(struct trace_buffer_struct
);
2038 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
2042 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
2046 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
2050 trace_percpu_buffer
= buffers
;
2051 trace_percpu_sirq_buffer
= sirq_buffers
;
2052 trace_percpu_irq_buffer
= irq_buffers
;
2053 trace_percpu_nmi_buffer
= nmi_buffers
;
2058 free_percpu(irq_buffers
);
2060 free_percpu(sirq_buffers
);
2062 free_percpu(buffers
);
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2068 static int buffers_allocated
;
2070 void trace_printk_init_buffers(void)
2072 if (buffers_allocated
)
2075 if (alloc_percpu_trace_buffer())
2078 /* trace_printk() is for debug use only. Don't use it in production. */
2081 pr_warning("**********************************************************\n");
2082 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warning("** **\n");
2084 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warning("** **\n");
2086 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2087 pr_warning("** unsafe for production use. **\n");
2088 pr_warning("** **\n");
2089 pr_warning("** If you see this message and you are not debugging **\n");
2090 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warning("** **\n");
2092 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warning("**********************************************************\n");
2095 /* Expand the buffers to set size */
2096 tracing_update_buffers();
2098 buffers_allocated
= 1;
2101 * trace_printk_init_buffers() can be called by modules.
2102 * If that happens, then we need to start cmdline recording
2103 * directly here. If the global_trace.buffer is already
2104 * allocated here, then this was called by module code.
2106 if (global_trace
.trace_buffer
.buffer
)
2107 tracing_start_cmdline_record();
2110 void trace_printk_start_comm(void)
2112 /* Start tracing comms if trace printk is set */
2113 if (!buffers_allocated
)
2115 tracing_start_cmdline_record();
2118 static void trace_printk_start_stop_comm(int enabled
)
2120 if (!buffers_allocated
)
2124 tracing_start_cmdline_record();
2126 tracing_stop_cmdline_record();
2130 * trace_vbprintk - write binary msg to tracing buffer
2133 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2135 struct trace_event_call
*call
= &event_bprint
;
2136 struct ring_buffer_event
*event
;
2137 struct ring_buffer
*buffer
;
2138 struct trace_array
*tr
= &global_trace
;
2139 struct bprint_entry
*entry
;
2140 unsigned long flags
;
2142 int len
= 0, size
, pc
;
2144 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2150 pc
= preempt_count();
2151 preempt_disable_notrace();
2153 tbuffer
= get_trace_buf();
2159 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2161 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2164 local_save_flags(flags
);
2165 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2166 buffer
= tr
->trace_buffer
.buffer
;
2167 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2171 entry
= ring_buffer_event_data(event
);
2175 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2176 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2177 __buffer_unlock_commit(buffer
, event
);
2178 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
2182 preempt_enable_notrace();
2183 unpause_graph_tracing();
2187 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2190 __trace_array_vprintk(struct ring_buffer
*buffer
,
2191 unsigned long ip
, const char *fmt
, va_list args
)
2193 struct trace_event_call
*call
= &event_print
;
2194 struct ring_buffer_event
*event
;
2195 int len
= 0, size
, pc
;
2196 struct print_entry
*entry
;
2197 unsigned long flags
;
2200 if (tracing_disabled
|| tracing_selftest_running
)
2203 /* Don't pollute graph traces with trace_vprintk internals */
2204 pause_graph_tracing();
2206 pc
= preempt_count();
2207 preempt_disable_notrace();
2210 tbuffer
= get_trace_buf();
2216 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2218 local_save_flags(flags
);
2219 size
= sizeof(*entry
) + len
+ 1;
2220 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2224 entry
= ring_buffer_event_data(event
);
2227 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
2228 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2229 __buffer_unlock_commit(buffer
, event
);
2230 ftrace_trace_stack(&global_trace
, buffer
, flags
, 6, pc
, NULL
);
2233 preempt_enable_notrace();
2234 unpause_graph_tracing();
2239 int trace_array_vprintk(struct trace_array
*tr
,
2240 unsigned long ip
, const char *fmt
, va_list args
)
2242 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2245 int trace_array_printk(struct trace_array
*tr
,
2246 unsigned long ip
, const char *fmt
, ...)
2251 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
2255 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2260 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2261 unsigned long ip
, const char *fmt
, ...)
2266 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
2270 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2275 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2277 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2279 EXPORT_SYMBOL_GPL(trace_vprintk
);
2281 static void trace_iterator_increment(struct trace_iterator
*iter
)
2283 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2287 ring_buffer_read(buf_iter
, NULL
);
2290 static struct trace_entry
*
2291 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2292 unsigned long *lost_events
)
2294 struct ring_buffer_event
*event
;
2295 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2298 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2300 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2304 iter
->ent_size
= ring_buffer_event_length(event
);
2305 return ring_buffer_event_data(event
);
2311 static struct trace_entry
*
2312 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2313 unsigned long *missing_events
, u64
*ent_ts
)
2315 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2316 struct trace_entry
*ent
, *next
= NULL
;
2317 unsigned long lost_events
= 0, next_lost
= 0;
2318 int cpu_file
= iter
->cpu_file
;
2319 u64 next_ts
= 0, ts
;
2325 * If we are in a per_cpu trace file, don't bother by iterating over
2326 * all cpu and peek directly.
2328 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2329 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2331 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2333 *ent_cpu
= cpu_file
;
2338 for_each_tracing_cpu(cpu
) {
2340 if (ring_buffer_empty_cpu(buffer
, cpu
))
2343 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2346 * Pick the entry with the smallest timestamp:
2348 if (ent
&& (!next
|| ts
< next_ts
)) {
2352 next_lost
= lost_events
;
2353 next_size
= iter
->ent_size
;
2357 iter
->ent_size
= next_size
;
2360 *ent_cpu
= next_cpu
;
2366 *missing_events
= next_lost
;
2371 /* Find the next real entry, without updating the iterator itself */
2372 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2373 int *ent_cpu
, u64
*ent_ts
)
2375 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2378 /* Find the next real entry, and increment the iterator to the next entry */
2379 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2381 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2382 &iter
->lost_events
, &iter
->ts
);
2385 trace_iterator_increment(iter
);
2387 return iter
->ent
? iter
: NULL
;
2390 static void trace_consume(struct trace_iterator
*iter
)
2392 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2393 &iter
->lost_events
);
2396 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2398 struct trace_iterator
*iter
= m
->private;
2402 WARN_ON_ONCE(iter
->leftover
);
2406 /* can't go backwards */
2411 ent
= trace_find_next_entry_inc(iter
);
2415 while (ent
&& iter
->idx
< i
)
2416 ent
= trace_find_next_entry_inc(iter
);
2423 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2425 struct ring_buffer_event
*event
;
2426 struct ring_buffer_iter
*buf_iter
;
2427 unsigned long entries
= 0;
2430 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2432 buf_iter
= trace_buffer_iter(iter
, cpu
);
2436 ring_buffer_iter_reset(buf_iter
);
2439 * We could have the case with the max latency tracers
2440 * that a reset never took place on a cpu. This is evident
2441 * by the timestamp being before the start of the buffer.
2443 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2444 if (ts
>= iter
->trace_buffer
->time_start
)
2447 ring_buffer_read(buf_iter
, NULL
);
2450 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2454 * The current tracer is copied to avoid a global locking
2457 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2459 struct trace_iterator
*iter
= m
->private;
2460 struct trace_array
*tr
= iter
->tr
;
2461 int cpu_file
= iter
->cpu_file
;
2467 * copy the tracer to avoid using a global lock all around.
2468 * iter->trace is a copy of current_trace, the pointer to the
2469 * name may be used instead of a strcmp(), as iter->trace->name
2470 * will point to the same string as current_trace->name.
2472 mutex_lock(&trace_types_lock
);
2473 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2474 *iter
->trace
= *tr
->current_trace
;
2475 mutex_unlock(&trace_types_lock
);
2477 #ifdef CONFIG_TRACER_MAX_TRACE
2478 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2479 return ERR_PTR(-EBUSY
);
2482 if (!iter
->snapshot
)
2483 atomic_inc(&trace_record_cmdline_disabled
);
2485 if (*pos
!= iter
->pos
) {
2490 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2491 for_each_tracing_cpu(cpu
)
2492 tracing_iter_reset(iter
, cpu
);
2494 tracing_iter_reset(iter
, cpu_file
);
2497 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2502 * If we overflowed the seq_file before, then we want
2503 * to just reuse the trace_seq buffer again.
2509 p
= s_next(m
, p
, &l
);
2513 trace_event_read_lock();
2514 trace_access_lock(cpu_file
);
2518 static void s_stop(struct seq_file
*m
, void *p
)
2520 struct trace_iterator
*iter
= m
->private;
2522 #ifdef CONFIG_TRACER_MAX_TRACE
2523 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2527 if (!iter
->snapshot
)
2528 atomic_dec(&trace_record_cmdline_disabled
);
2530 trace_access_unlock(iter
->cpu_file
);
2531 trace_event_read_unlock();
2535 get_total_entries(struct trace_buffer
*buf
,
2536 unsigned long *total
, unsigned long *entries
)
2538 unsigned long count
;
2544 for_each_tracing_cpu(cpu
) {
2545 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2547 * If this buffer has skipped entries, then we hold all
2548 * entries for the trace and we need to ignore the
2549 * ones before the time stamp.
2551 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2552 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2553 /* total is the same as the entries */
2557 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2562 static void print_lat_help_header(struct seq_file
*m
)
2564 seq_puts(m
, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
2574 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2576 unsigned long total
;
2577 unsigned long entries
;
2579 get_total_entries(buf
, &total
, &entries
);
2580 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries
, total
, num_online_cpus());
2585 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2587 print_event_info(buf
, m
);
2588 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2592 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2594 print_event_info(buf
, m
);
2595 seq_puts(m
, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
2605 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2607 unsigned long sym_flags
= (global_trace
.trace_flags
& TRACE_ITER_SYM_MASK
);
2608 struct trace_buffer
*buf
= iter
->trace_buffer
;
2609 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2610 struct tracer
*type
= iter
->trace
;
2611 unsigned long entries
;
2612 unsigned long total
;
2613 const char *name
= "preemption";
2617 get_total_entries(buf
, &total
, &entries
);
2619 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2621 seq_puts(m
, "# -----------------------------------"
2622 "---------------------------------\n");
2623 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2625 nsecs_to_usecs(data
->saved_latency
),
2629 #if defined(CONFIG_PREEMPT_NONE)
2631 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2633 #elif defined(CONFIG_PREEMPT)
2638 /* These are reserved for later use */
2641 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2645 seq_puts(m
, "# -----------------\n");
2646 seq_printf(m
, "# | task: %.16s-%d "
2647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2648 data
->comm
, data
->pid
,
2649 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2650 data
->policy
, data
->rt_priority
);
2651 seq_puts(m
, "# -----------------\n");
2653 if (data
->critical_start
) {
2654 seq_puts(m
, "# => started at: ");
2655 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2656 trace_print_seq(m
, &iter
->seq
);
2657 seq_puts(m
, "\n# => ended at: ");
2658 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2659 trace_print_seq(m
, &iter
->seq
);
2660 seq_puts(m
, "\n#\n");
2666 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2668 struct trace_seq
*s
= &iter
->seq
;
2669 struct trace_array
*tr
= iter
->tr
;
2671 if (!(tr
->trace_flags
& TRACE_ITER_ANNOTATE
))
2674 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2677 if (iter
->started
&& cpumask_test_cpu(iter
->cpu
, iter
->started
))
2680 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2684 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2686 /* Don't print started cpu buffer for the first entry of the trace */
2688 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2692 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2694 struct trace_array
*tr
= iter
->tr
;
2695 struct trace_seq
*s
= &iter
->seq
;
2696 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
2697 struct trace_entry
*entry
;
2698 struct trace_event
*event
;
2702 test_cpu_buff_start(iter
);
2704 event
= ftrace_find_event(entry
->type
);
2706 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2707 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2708 trace_print_lat_context(iter
);
2710 trace_print_context(iter
);
2713 if (trace_seq_has_overflowed(s
))
2714 return TRACE_TYPE_PARTIAL_LINE
;
2717 return event
->funcs
->trace(iter
, sym_flags
, event
);
2719 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
2721 return trace_handle_return(s
);
2724 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2726 struct trace_array
*tr
= iter
->tr
;
2727 struct trace_seq
*s
= &iter
->seq
;
2728 struct trace_entry
*entry
;
2729 struct trace_event
*event
;
2733 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
)
2734 trace_seq_printf(s
, "%d %d %llu ",
2735 entry
->pid
, iter
->cpu
, iter
->ts
);
2737 if (trace_seq_has_overflowed(s
))
2738 return TRACE_TYPE_PARTIAL_LINE
;
2740 event
= ftrace_find_event(entry
->type
);
2742 return event
->funcs
->raw(iter
, 0, event
);
2744 trace_seq_printf(s
, "%d ?\n", entry
->type
);
2746 return trace_handle_return(s
);
2749 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2751 struct trace_array
*tr
= iter
->tr
;
2752 struct trace_seq
*s
= &iter
->seq
;
2753 unsigned char newline
= '\n';
2754 struct trace_entry
*entry
;
2755 struct trace_event
*event
;
2759 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2760 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
2761 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
2762 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
2763 if (trace_seq_has_overflowed(s
))
2764 return TRACE_TYPE_PARTIAL_LINE
;
2767 event
= ftrace_find_event(entry
->type
);
2769 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2770 if (ret
!= TRACE_TYPE_HANDLED
)
2774 SEQ_PUT_FIELD(s
, newline
);
2776 return trace_handle_return(s
);
2779 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2781 struct trace_array
*tr
= iter
->tr
;
2782 struct trace_seq
*s
= &iter
->seq
;
2783 struct trace_entry
*entry
;
2784 struct trace_event
*event
;
2788 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2789 SEQ_PUT_FIELD(s
, entry
->pid
);
2790 SEQ_PUT_FIELD(s
, iter
->cpu
);
2791 SEQ_PUT_FIELD(s
, iter
->ts
);
2792 if (trace_seq_has_overflowed(s
))
2793 return TRACE_TYPE_PARTIAL_LINE
;
2796 event
= ftrace_find_event(entry
->type
);
2797 return event
? event
->funcs
->binary(iter
, 0, event
) :
2801 int trace_empty(struct trace_iterator
*iter
)
2803 struct ring_buffer_iter
*buf_iter
;
2806 /* If we are looking at one CPU buffer, only check that one */
2807 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2808 cpu
= iter
->cpu_file
;
2809 buf_iter
= trace_buffer_iter(iter
, cpu
);
2811 if (!ring_buffer_iter_empty(buf_iter
))
2814 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2820 for_each_tracing_cpu(cpu
) {
2821 buf_iter
= trace_buffer_iter(iter
, cpu
);
2823 if (!ring_buffer_iter_empty(buf_iter
))
2826 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2834 /* Called with trace_event_read_lock() held. */
2835 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2837 struct trace_array
*tr
= iter
->tr
;
2838 unsigned long trace_flags
= tr
->trace_flags
;
2839 enum print_line_t ret
;
2841 if (iter
->lost_events
) {
2842 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter
->cpu
, iter
->lost_events
);
2844 if (trace_seq_has_overflowed(&iter
->seq
))
2845 return TRACE_TYPE_PARTIAL_LINE
;
2848 if (iter
->trace
&& iter
->trace
->print_line
) {
2849 ret
= iter
->trace
->print_line(iter
);
2850 if (ret
!= TRACE_TYPE_UNHANDLED
)
2854 if (iter
->ent
->type
== TRACE_BPUTS
&&
2855 trace_flags
& TRACE_ITER_PRINTK
&&
2856 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2857 return trace_print_bputs_msg_only(iter
);
2859 if (iter
->ent
->type
== TRACE_BPRINT
&&
2860 trace_flags
& TRACE_ITER_PRINTK
&&
2861 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2862 return trace_print_bprintk_msg_only(iter
);
2864 if (iter
->ent
->type
== TRACE_PRINT
&&
2865 trace_flags
& TRACE_ITER_PRINTK
&&
2866 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2867 return trace_print_printk_msg_only(iter
);
2869 if (trace_flags
& TRACE_ITER_BIN
)
2870 return print_bin_fmt(iter
);
2872 if (trace_flags
& TRACE_ITER_HEX
)
2873 return print_hex_fmt(iter
);
2875 if (trace_flags
& TRACE_ITER_RAW
)
2876 return print_raw_fmt(iter
);
2878 return print_trace_fmt(iter
);
2881 void trace_latency_header(struct seq_file
*m
)
2883 struct trace_iterator
*iter
= m
->private;
2884 struct trace_array
*tr
= iter
->tr
;
2886 /* print nothing if the buffers are empty */
2887 if (trace_empty(iter
))
2890 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2891 print_trace_header(m
, iter
);
2893 if (!(tr
->trace_flags
& TRACE_ITER_VERBOSE
))
2894 print_lat_help_header(m
);
2897 void trace_default_header(struct seq_file
*m
)
2899 struct trace_iterator
*iter
= m
->private;
2900 struct trace_array
*tr
= iter
->tr
;
2901 unsigned long trace_flags
= tr
->trace_flags
;
2903 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2906 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2907 /* print nothing if the buffers are empty */
2908 if (trace_empty(iter
))
2910 print_trace_header(m
, iter
);
2911 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2912 print_lat_help_header(m
);
2914 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2915 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2916 print_func_help_header_irq(iter
->trace_buffer
, m
);
2918 print_func_help_header(iter
->trace_buffer
, m
);
2923 static void test_ftrace_alive(struct seq_file
*m
)
2925 if (!ftrace_is_dead())
2927 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
2931 #ifdef CONFIG_TRACER_MAX_TRACE
2932 static void show_snapshot_main_help(struct seq_file
*m
)
2934 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
2942 static void show_snapshot_percpu_help(struct seq_file
*m
)
2944 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2945 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2946 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
2949 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
2952 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
2957 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2959 if (iter
->tr
->allocated_snapshot
)
2960 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
2962 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
2964 seq_puts(m
, "# Snapshot commands:\n");
2965 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2966 show_snapshot_main_help(m
);
2968 show_snapshot_percpu_help(m
);
2971 /* Should never be called */
2972 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2975 static int s_show(struct seq_file
*m
, void *v
)
2977 struct trace_iterator
*iter
= v
;
2980 if (iter
->ent
== NULL
) {
2982 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2984 test_ftrace_alive(m
);
2986 if (iter
->snapshot
&& trace_empty(iter
))
2987 print_snapshot_help(m
, iter
);
2988 else if (iter
->trace
&& iter
->trace
->print_header
)
2989 iter
->trace
->print_header(m
);
2991 trace_default_header(m
);
2993 } else if (iter
->leftover
) {
2995 * If we filled the seq_file buffer earlier, we
2996 * want to just show it now.
2998 ret
= trace_print_seq(m
, &iter
->seq
);
3000 /* ret should this time be zero, but you never know */
3001 iter
->leftover
= ret
;
3004 print_trace_line(iter
);
3005 ret
= trace_print_seq(m
, &iter
->seq
);
3007 * If we overflow the seq_file buffer, then it will
3008 * ask us for this data again at start up.
3010 * ret is 0 if seq_file write succeeded.
3013 iter
->leftover
= ret
;
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3023 static inline int tracing_get_cpu(struct inode
*inode
)
3025 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
3026 return (long)inode
->i_cdev
- 1;
3027 return RING_BUFFER_ALL_CPUS
;
3030 static const struct seq_operations tracer_seq_ops
= {
3037 static struct trace_iterator
*
3038 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
3040 struct trace_array
*tr
= inode
->i_private
;
3041 struct trace_iterator
*iter
;
3044 if (tracing_disabled
)
3045 return ERR_PTR(-ENODEV
);
3047 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
3049 return ERR_PTR(-ENOMEM
);
3051 iter
->buffer_iter
= kcalloc(nr_cpu_ids
, sizeof(*iter
->buffer_iter
),
3053 if (!iter
->buffer_iter
)
3057 * We make a copy of the current tracer to avoid concurrent
3058 * changes on it while we are reading.
3060 mutex_lock(&trace_types_lock
);
3061 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3065 *iter
->trace
= *tr
->current_trace
;
3067 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3072 #ifdef CONFIG_TRACER_MAX_TRACE
3073 /* Currently only the top directory has a snapshot */
3074 if (tr
->current_trace
->print_max
|| snapshot
)
3075 iter
->trace_buffer
= &tr
->max_buffer
;
3078 iter
->trace_buffer
= &tr
->trace_buffer
;
3079 iter
->snapshot
= snapshot
;
3081 iter
->cpu_file
= tracing_get_cpu(inode
);
3082 mutex_init(&iter
->mutex
);
3084 /* Notify the tracer early; before we stop tracing. */
3085 if (iter
->trace
&& iter
->trace
->open
)
3086 iter
->trace
->open(iter
);
3088 /* Annotate start of buffers if we had overruns */
3089 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3090 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3092 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3093 if (trace_clocks
[tr
->clock_id
].in_ns
)
3094 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3096 /* stop the trace while dumping if we are not opening "snapshot" */
3097 if (!iter
->snapshot
)
3098 tracing_stop_tr(tr
);
3100 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3101 for_each_tracing_cpu(cpu
) {
3102 iter
->buffer_iter
[cpu
] =
3103 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu
) {
3107 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3108 tracing_iter_reset(iter
, cpu
);
3111 cpu
= iter
->cpu_file
;
3112 iter
->buffer_iter
[cpu
] =
3113 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3116 tracing_iter_reset(iter
, cpu
);
3119 mutex_unlock(&trace_types_lock
);
3124 mutex_unlock(&trace_types_lock
);
3126 kfree(iter
->buffer_iter
);
3128 seq_release_private(inode
, file
);
3129 return ERR_PTR(-ENOMEM
);
3132 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3134 if (tracing_disabled
)
3137 filp
->private_data
= inode
->i_private
;
3141 bool tracing_is_disabled(void)
3143 return (tracing_disabled
) ? true: false;
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3150 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3152 struct trace_array
*tr
= inode
->i_private
;
3154 if (tracing_disabled
)
3157 if (trace_array_get(tr
) < 0)
3160 filp
->private_data
= inode
->i_private
;
3165 static int tracing_release(struct inode
*inode
, struct file
*file
)
3167 struct trace_array
*tr
= inode
->i_private
;
3168 struct seq_file
*m
= file
->private_data
;
3169 struct trace_iterator
*iter
;
3172 if (!(file
->f_mode
& FMODE_READ
)) {
3173 trace_array_put(tr
);
3177 /* Writes do not use seq_file */
3179 mutex_lock(&trace_types_lock
);
3181 for_each_tracing_cpu(cpu
) {
3182 if (iter
->buffer_iter
[cpu
])
3183 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3186 if (iter
->trace
&& iter
->trace
->close
)
3187 iter
->trace
->close(iter
);
3189 if (!iter
->snapshot
)
3190 /* reenable tracing if it was previously enabled */
3191 tracing_start_tr(tr
);
3193 __trace_array_put(tr
);
3195 mutex_unlock(&trace_types_lock
);
3197 mutex_destroy(&iter
->mutex
);
3198 free_cpumask_var(iter
->started
);
3200 kfree(iter
->buffer_iter
);
3201 seq_release_private(inode
, file
);
3206 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3208 struct trace_array
*tr
= inode
->i_private
;
3210 trace_array_put(tr
);
3214 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3216 struct trace_array
*tr
= inode
->i_private
;
3218 trace_array_put(tr
);
3220 return single_release(inode
, file
);
3223 static int tracing_open(struct inode
*inode
, struct file
*file
)
3225 struct trace_array
*tr
= inode
->i_private
;
3226 struct trace_iterator
*iter
;
3229 if (trace_array_get(tr
) < 0)
3232 /* If this file was open for write, then erase contents */
3233 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3234 int cpu
= tracing_get_cpu(inode
);
3236 if (cpu
== RING_BUFFER_ALL_CPUS
)
3237 tracing_reset_online_cpus(&tr
->trace_buffer
);
3239 tracing_reset(&tr
->trace_buffer
, cpu
);
3242 if (file
->f_mode
& FMODE_READ
) {
3243 iter
= __tracing_open(inode
, file
, false);
3245 ret
= PTR_ERR(iter
);
3246 else if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
3247 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3251 trace_array_put(tr
);
3257 * Some tracers are not suitable for instance buffers.
3258 * A tracer is always available for the global array (toplevel)
3259 * or if it explicitly states that it is.
3262 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
3264 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
3267 /* Find the next tracer that this trace array may use */
3268 static struct tracer
*
3269 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
3271 while (t
&& !trace_ok_for_array(t
, tr
))
3278 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3280 struct trace_array
*tr
= m
->private;
3281 struct tracer
*t
= v
;
3286 t
= get_tracer_for_array(tr
, t
->next
);
3291 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3293 struct trace_array
*tr
= m
->private;
3297 mutex_lock(&trace_types_lock
);
3299 t
= get_tracer_for_array(tr
, trace_types
);
3300 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3306 static void t_stop(struct seq_file
*m
, void *p
)
3308 mutex_unlock(&trace_types_lock
);
3311 static int t_show(struct seq_file
*m
, void *v
)
3313 struct tracer
*t
= v
;
3318 seq_puts(m
, t
->name
);
3327 static const struct seq_operations show_traces_seq_ops
= {
3334 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3336 struct trace_array
*tr
= inode
->i_private
;
3340 if (tracing_disabled
)
3343 ret
= seq_open(file
, &show_traces_seq_ops
);
3347 m
= file
->private_data
;
3354 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3355 size_t count
, loff_t
*ppos
)
3360 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
3364 if (file
->f_mode
& FMODE_READ
)
3365 ret
= seq_lseek(file
, offset
, whence
);
3367 file
->f_pos
= ret
= 0;
3372 static const struct file_operations tracing_fops
= {
3373 .open
= tracing_open
,
3375 .write
= tracing_write_stub
,
3376 .llseek
= tracing_lseek
,
3377 .release
= tracing_release
,
3380 static const struct file_operations show_traces_fops
= {
3381 .open
= show_traces_open
,
3383 .release
= seq_release
,
3384 .llseek
= seq_lseek
,
3388 * The tracer itself will not take this lock, but still we want
3389 * to provide a consistent cpumask to user-space:
3391 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3394 * Temporary storage for the character representation of the
3395 * CPU bitmask (and one more byte for the newline):
3397 static char mask_str
[NR_CPUS
+ 1];
3400 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3401 size_t count
, loff_t
*ppos
)
3403 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3406 mutex_lock(&tracing_cpumask_update_lock
);
3408 len
= snprintf(mask_str
, count
, "%*pb\n",
3409 cpumask_pr_args(tr
->tracing_cpumask
));
3414 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3417 mutex_unlock(&tracing_cpumask_update_lock
);
3423 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3424 size_t count
, loff_t
*ppos
)
3426 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3427 cpumask_var_t tracing_cpumask_new
;
3430 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3433 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3437 mutex_lock(&tracing_cpumask_update_lock
);
3439 local_irq_disable();
3440 arch_spin_lock(&tr
->max_lock
);
3441 for_each_tracing_cpu(cpu
) {
3443 * Increase/decrease the disabled counter if we are
3444 * about to flip a bit in the cpumask:
3446 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3447 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3448 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3449 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3451 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3452 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3453 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3454 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3457 arch_spin_unlock(&tr
->max_lock
);
3460 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
3462 mutex_unlock(&tracing_cpumask_update_lock
);
3463 free_cpumask_var(tracing_cpumask_new
);
3468 free_cpumask_var(tracing_cpumask_new
);
3473 static const struct file_operations tracing_cpumask_fops
= {
3474 .open
= tracing_open_generic_tr
,
3475 .read
= tracing_cpumask_read
,
3476 .write
= tracing_cpumask_write
,
3477 .release
= tracing_release_generic_tr
,
3478 .llseek
= generic_file_llseek
,
3481 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3483 struct tracer_opt
*trace_opts
;
3484 struct trace_array
*tr
= m
->private;
3488 mutex_lock(&trace_types_lock
);
3489 tracer_flags
= tr
->current_trace
->flags
->val
;
3490 trace_opts
= tr
->current_trace
->flags
->opts
;
3492 for (i
= 0; trace_options
[i
]; i
++) {
3493 if (tr
->trace_flags
& (1 << i
))
3494 seq_printf(m
, "%s\n", trace_options
[i
]);
3496 seq_printf(m
, "no%s\n", trace_options
[i
]);
3499 for (i
= 0; trace_opts
[i
].name
; i
++) {
3500 if (tracer_flags
& trace_opts
[i
].bit
)
3501 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3503 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3505 mutex_unlock(&trace_types_lock
);
3510 static int __set_tracer_option(struct trace_array
*tr
,
3511 struct tracer_flags
*tracer_flags
,
3512 struct tracer_opt
*opts
, int neg
)
3514 struct tracer
*trace
= tr
->current_trace
;
3517 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
3522 tracer_flags
->val
&= ~opts
->bit
;
3524 tracer_flags
->val
|= opts
->bit
;
3528 /* Try to assign a tracer specific option */
3529 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
3531 struct tracer
*trace
= tr
->current_trace
;
3532 struct tracer_flags
*tracer_flags
= trace
->flags
;
3533 struct tracer_opt
*opts
= NULL
;
3536 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3537 opts
= &tracer_flags
->opts
[i
];
3539 if (strcmp(cmp
, opts
->name
) == 0)
3540 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
3546 /* Some tracers require overwrite to stay enabled */
3547 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3549 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3555 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3557 /* do nothing if flag is already set */
3558 if (!!(tr
->trace_flags
& mask
) == !!enabled
)
3561 /* Give the tracer a chance to approve the change */
3562 if (tr
->current_trace
->flag_changed
)
3563 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
3567 tr
->trace_flags
|= mask
;
3569 tr
->trace_flags
&= ~mask
;
3571 if (mask
== TRACE_ITER_RECORD_CMD
)
3572 trace_event_enable_cmd_record(enabled
);
3574 if (mask
== TRACE_ITER_OVERWRITE
) {
3575 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3576 #ifdef CONFIG_TRACER_MAX_TRACE
3577 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3581 if (mask
== TRACE_ITER_PRINTK
) {
3582 trace_printk_start_stop_comm(enabled
);
3583 trace_printk_control(enabled
);
3589 static int trace_set_options(struct trace_array
*tr
, char *option
)
3595 size_t orig_len
= strlen(option
);
3597 cmp
= strstrip(option
);
3599 if (strncmp(cmp
, "no", 2) == 0) {
3604 mutex_lock(&trace_types_lock
);
3606 for (i
= 0; trace_options
[i
]; i
++) {
3607 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3608 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3613 /* If no option could be set, test the specific tracer options */
3614 if (!trace_options
[i
])
3615 ret
= set_tracer_option(tr
, cmp
, neg
);
3617 mutex_unlock(&trace_types_lock
);
3620 * If the first trailing whitespace is replaced with '\0' by strstrip,
3621 * turn it back into a space.
3623 if (orig_len
> strlen(option
))
3624 option
[strlen(option
)] = ' ';
3629 static void __init
apply_trace_boot_options(void)
3631 char *buf
= trace_boot_options_buf
;
3635 option
= strsep(&buf
, ",");
3642 trace_set_options(&global_trace
, option
);
3644 /* Put back the comma to allow this to be called again */
3651 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3652 size_t cnt
, loff_t
*ppos
)
3654 struct seq_file
*m
= filp
->private_data
;
3655 struct trace_array
*tr
= m
->private;
3659 if (cnt
>= sizeof(buf
))
3662 if (copy_from_user(&buf
, ubuf
, cnt
))
3667 ret
= trace_set_options(tr
, buf
);
3676 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3678 struct trace_array
*tr
= inode
->i_private
;
3681 if (tracing_disabled
)
3684 if (trace_array_get(tr
) < 0)
3687 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3689 trace_array_put(tr
);
3694 static const struct file_operations tracing_iter_fops
= {
3695 .open
= tracing_trace_options_open
,
3697 .llseek
= seq_lseek
,
3698 .release
= tracing_single_release_tr
,
3699 .write
= tracing_trace_options_write
,
3702 static const char readme_msg
[] =
3703 "tracing mini-HOWTO:\n\n"
3704 "# echo 0 > tracing_on : quick way to disable tracing\n"
3705 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3706 " Important files:\n"
3707 " trace\t\t\t- The static contents of the buffer\n"
3708 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3709 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3710 " current_tracer\t- function and latency tracers\n"
3711 " available_tracers\t- list of configured tracers for current_tracer\n"
3712 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3713 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3714 " trace_clock\t\t-change the clock used to order events\n"
3715 " local: Per cpu clock but may not be synced across CPUs\n"
3716 " global: Synced across CPUs but slows tracing down.\n"
3717 " counter: Not a clock, but just an increment\n"
3718 " uptime: Jiffy counter from time of boot\n"
3719 " perf: Same clock that perf events use\n"
3720 #ifdef CONFIG_X86_64
3721 " x86-tsc: TSC cycle counter\n"
3723 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3724 " tracing_cpumask\t- Limit which CPUs to trace\n"
3725 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3726 "\t\t\t Remove sub-buffer with rmdir\n"
3727 " trace_options\t\t- Set format or modify how tracing happens\n"
3728 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3729 "\t\t\t option name\n"
3730 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3731 #ifdef CONFIG_DYNAMIC_FTRACE
3732 "\n available_filter_functions - list of functions that can be filtered on\n"
3733 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3734 "\t\t\t functions\n"
3735 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3736 "\t modules: Can select a group via module\n"
3737 "\t Format: :mod:<module-name>\n"
3738 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3739 "\t triggers: a command to perform when function is hit\n"
3740 "\t Format: <function>:<trigger>[:count]\n"
3741 "\t trigger: traceon, traceoff\n"
3742 "\t\t enable_event:<system>:<event>\n"
3743 "\t\t disable_event:<system>:<event>\n"
3744 #ifdef CONFIG_STACKTRACE
3747 #ifdef CONFIG_TRACER_SNAPSHOT
3752 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3753 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3754 "\t The first one will disable tracing every time do_fault is hit\n"
3755 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3756 "\t The first time do trap is hit and it disables tracing, the\n"
3757 "\t counter will decrement to 2. If tracing is already disabled,\n"
3758 "\t the counter will not decrement. It only decrements when the\n"
3759 "\t trigger did work\n"
3760 "\t To remove trigger without count:\n"
3761 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3762 "\t To remove trigger with a count:\n"
3763 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3764 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3765 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3766 "\t modules: Can select a group via module command :mod:\n"
3767 "\t Does not accept triggers\n"
3768 #endif /* CONFIG_DYNAMIC_FTRACE */
3769 #ifdef CONFIG_FUNCTION_TRACER
3770 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3773 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3774 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3775 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3776 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3778 #ifdef CONFIG_TRACER_SNAPSHOT
3779 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3780 "\t\t\t snapshot buffer. Read the contents for more\n"
3781 "\t\t\t information\n"
3783 #ifdef CONFIG_STACK_TRACER
3784 " stack_trace\t\t- Shows the max stack trace when active\n"
3785 " stack_max_size\t- Shows current max stack size that was traced\n"
3786 "\t\t\t Write into this file to reset the max size (trigger a\n"
3787 "\t\t\t new trace)\n"
3788 #ifdef CONFIG_DYNAMIC_FTRACE
3789 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3792 #endif /* CONFIG_STACK_TRACER */
3793 " events/\t\t- Directory containing all trace event subsystems:\n"
3794 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3795 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3796 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3798 " filter\t\t- If set, only events passing filter are traced\n"
3799 " events/<system>/<event>/\t- Directory containing control files for\n"
3801 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3802 " filter\t\t- If set, only events passing filter are traced\n"
3803 " trigger\t\t- If set, a command to perform when event is hit\n"
3804 "\t Format: <trigger>[:count][if <filter>]\n"
3805 "\t trigger: traceon, traceoff\n"
3806 "\t enable_event:<system>:<event>\n"
3807 "\t disable_event:<system>:<event>\n"
3808 #ifdef CONFIG_STACKTRACE
3811 #ifdef CONFIG_TRACER_SNAPSHOT
3814 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3815 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3816 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3817 "\t events/block/block_unplug/trigger\n"
3818 "\t The first disables tracing every time block_unplug is hit.\n"
3819 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3820 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3821 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3822 "\t Like function triggers, the counter is only decremented if it\n"
3823 "\t enabled or disabled tracing.\n"
3824 "\t To remove a trigger without a count:\n"
3825 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3826 "\t To remove a trigger with a count:\n"
3827 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3828 "\t Filters can be ignored when removing a trigger.\n"
3832 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3833 size_t cnt
, loff_t
*ppos
)
3835 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3836 readme_msg
, strlen(readme_msg
));
3839 static const struct file_operations tracing_readme_fops
= {
3840 .open
= tracing_open_generic
,
3841 .read
= tracing_readme_read
,
3842 .llseek
= generic_file_llseek
,
3845 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3847 unsigned int *ptr
= v
;
3849 if (*pos
|| m
->count
)
3854 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
3856 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
3865 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
3871 arch_spin_lock(&trace_cmdline_lock
);
3873 v
= &savedcmd
->map_cmdline_to_pid
[0];
3875 v
= saved_cmdlines_next(m
, v
, &l
);
3883 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
3885 arch_spin_unlock(&trace_cmdline_lock
);
3889 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
3891 char buf
[TASK_COMM_LEN
];
3892 unsigned int *pid
= v
;
3894 __trace_find_cmdline(*pid
, buf
);
3895 seq_printf(m
, "%d %s\n", *pid
, buf
);
3899 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
3900 .start
= saved_cmdlines_start
,
3901 .next
= saved_cmdlines_next
,
3902 .stop
= saved_cmdlines_stop
,
3903 .show
= saved_cmdlines_show
,
3906 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
3908 if (tracing_disabled
)
3911 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
3914 static const struct file_operations tracing_saved_cmdlines_fops
= {
3915 .open
= tracing_saved_cmdlines_open
,
3917 .llseek
= seq_lseek
,
3918 .release
= seq_release
,
3922 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
3923 size_t cnt
, loff_t
*ppos
)
3928 arch_spin_lock(&trace_cmdline_lock
);
3929 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
3930 arch_spin_unlock(&trace_cmdline_lock
);
3932 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3935 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
3937 kfree(s
->saved_cmdlines
);
3938 kfree(s
->map_cmdline_to_pid
);
3942 static int tracing_resize_saved_cmdlines(unsigned int val
)
3944 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
3946 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
3950 if (allocate_cmdlines_buffer(val
, s
) < 0) {
3955 arch_spin_lock(&trace_cmdline_lock
);
3956 savedcmd_temp
= savedcmd
;
3958 arch_spin_unlock(&trace_cmdline_lock
);
3959 free_saved_cmdlines_buffer(savedcmd_temp
);
3965 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
3966 size_t cnt
, loff_t
*ppos
)
3971 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
3975 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3976 if (!val
|| val
> PID_MAX_DEFAULT
)
3979 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
3988 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
3989 .open
= tracing_open_generic
,
3990 .read
= tracing_saved_cmdlines_size_read
,
3991 .write
= tracing_saved_cmdlines_size_write
,
3994 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
3995 static union trace_enum_map_item
*
3996 update_enum_map(union trace_enum_map_item
*ptr
)
3998 if (!ptr
->map
.enum_string
) {
3999 if (ptr
->tail
.next
) {
4000 ptr
= ptr
->tail
.next
;
4001 /* Set ptr to the next real item (skip head) */
4009 static void *enum_map_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4011 union trace_enum_map_item
*ptr
= v
;
4014 * Paranoid! If ptr points to end, we don't want to increment past it.
4015 * This really should never happen.
4017 ptr
= update_enum_map(ptr
);
4018 if (WARN_ON_ONCE(!ptr
))
4025 ptr
= update_enum_map(ptr
);
4030 static void *enum_map_start(struct seq_file
*m
, loff_t
*pos
)
4032 union trace_enum_map_item
*v
;
4035 mutex_lock(&trace_enum_mutex
);
4037 v
= trace_enum_maps
;
4041 while (v
&& l
< *pos
) {
4042 v
= enum_map_next(m
, v
, &l
);
4048 static void enum_map_stop(struct seq_file
*m
, void *v
)
4050 mutex_unlock(&trace_enum_mutex
);
4053 static int enum_map_show(struct seq_file
*m
, void *v
)
4055 union trace_enum_map_item
*ptr
= v
;
4057 seq_printf(m
, "%s %ld (%s)\n",
4058 ptr
->map
.enum_string
, ptr
->map
.enum_value
,
4064 static const struct seq_operations tracing_enum_map_seq_ops
= {
4065 .start
= enum_map_start
,
4066 .next
= enum_map_next
,
4067 .stop
= enum_map_stop
,
4068 .show
= enum_map_show
,
4071 static int tracing_enum_map_open(struct inode
*inode
, struct file
*filp
)
4073 if (tracing_disabled
)
4076 return seq_open(filp
, &tracing_enum_map_seq_ops
);
4079 static const struct file_operations tracing_enum_map_fops
= {
4080 .open
= tracing_enum_map_open
,
4082 .llseek
= seq_lseek
,
4083 .release
= seq_release
,
4086 static inline union trace_enum_map_item
*
4087 trace_enum_jmp_to_tail(union trace_enum_map_item
*ptr
)
4089 /* Return tail of array given the head */
4090 return ptr
+ ptr
->head
.length
+ 1;
4094 trace_insert_enum_map_file(struct module
*mod
, struct trace_enum_map
**start
,
4097 struct trace_enum_map
**stop
;
4098 struct trace_enum_map
**map
;
4099 union trace_enum_map_item
*map_array
;
4100 union trace_enum_map_item
*ptr
;
4105 * The trace_enum_maps contains the map plus a head and tail item,
4106 * where the head holds the module and length of array, and the
4107 * tail holds a pointer to the next list.
4109 map_array
= kmalloc(sizeof(*map_array
) * (len
+ 2), GFP_KERNEL
);
4111 pr_warning("Unable to allocate trace enum mapping\n");
4115 mutex_lock(&trace_enum_mutex
);
4117 if (!trace_enum_maps
)
4118 trace_enum_maps
= map_array
;
4120 ptr
= trace_enum_maps
;
4122 ptr
= trace_enum_jmp_to_tail(ptr
);
4123 if (!ptr
->tail
.next
)
4125 ptr
= ptr
->tail
.next
;
4128 ptr
->tail
.next
= map_array
;
4130 map_array
->head
.mod
= mod
;
4131 map_array
->head
.length
= len
;
4134 for (map
= start
; (unsigned long)map
< (unsigned long)stop
; map
++) {
4135 map_array
->map
= **map
;
4138 memset(map_array
, 0, sizeof(*map_array
));
4140 mutex_unlock(&trace_enum_mutex
);
4143 static void trace_create_enum_file(struct dentry
*d_tracer
)
4145 trace_create_file("enum_map", 0444, d_tracer
,
4146 NULL
, &tracing_enum_map_fops
);
4149 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4150 static inline void trace_create_enum_file(struct dentry
*d_tracer
) { }
4151 static inline void trace_insert_enum_map_file(struct module
*mod
,
4152 struct trace_enum_map
**start
, int len
) { }
4153 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4155 static void trace_insert_enum_map(struct module
*mod
,
4156 struct trace_enum_map
**start
, int len
)
4158 struct trace_enum_map
**map
;
4165 trace_event_enum_update(map
, len
);
4167 trace_insert_enum_map_file(mod
, start
, len
);
4171 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
4172 size_t cnt
, loff_t
*ppos
)
4174 struct trace_array
*tr
= filp
->private_data
;
4175 char buf
[MAX_TRACER_SIZE
+2];
4178 mutex_lock(&trace_types_lock
);
4179 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
4180 mutex_unlock(&trace_types_lock
);
4182 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4185 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
4187 tracing_reset_online_cpus(&tr
->trace_buffer
);
4191 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
4195 for_each_tracing_cpu(cpu
)
4196 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
4199 #ifdef CONFIG_TRACER_MAX_TRACE
4200 /* resize @tr's buffer to the size of @size_tr's entries */
4201 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
4202 struct trace_buffer
*size_buf
, int cpu_id
)
4206 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
4207 for_each_tracing_cpu(cpu
) {
4208 ret
= ring_buffer_resize(trace_buf
->buffer
,
4209 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
4212 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
4213 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
4216 ret
= ring_buffer_resize(trace_buf
->buffer
,
4217 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
4219 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
4220 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
4225 #endif /* CONFIG_TRACER_MAX_TRACE */
4227 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
4228 unsigned long size
, int cpu
)
4233 * If kernel or user changes the size of the ring buffer
4234 * we use the size that was given, and we can forget about
4235 * expanding it later.
4237 ring_buffer_expanded
= true;
4239 /* May be called before buffers are initialized */
4240 if (!tr
->trace_buffer
.buffer
)
4243 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
4247 #ifdef CONFIG_TRACER_MAX_TRACE
4248 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
4249 !tr
->current_trace
->use_max_tr
)
4252 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
4254 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
4255 &tr
->trace_buffer
, cpu
);
4258 * AARGH! We are left with different
4259 * size max buffer!!!!
4260 * The max buffer is our "snapshot" buffer.
4261 * When a tracer needs a snapshot (one of the
4262 * latency tracers), it swaps the max buffer
4263 * with the saved snap shot. We succeeded to
4264 * update the size of the main buffer, but failed to
4265 * update the size of the max buffer. But when we tried
4266 * to reset the main buffer to the original size, we
4267 * failed there too. This is very unlikely to
4268 * happen, but if it does, warn and kill all
4272 tracing_disabled
= 1;
4277 if (cpu
== RING_BUFFER_ALL_CPUS
)
4278 set_buffer_entries(&tr
->max_buffer
, size
);
4280 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
4283 #endif /* CONFIG_TRACER_MAX_TRACE */
4285 if (cpu
== RING_BUFFER_ALL_CPUS
)
4286 set_buffer_entries(&tr
->trace_buffer
, size
);
4288 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
4293 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
4294 unsigned long size
, int cpu_id
)
4298 mutex_lock(&trace_types_lock
);
4300 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
4301 /* make sure, this cpu is enabled in the mask */
4302 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
4308 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
4313 mutex_unlock(&trace_types_lock
);
4320 * tracing_update_buffers - used by tracing facility to expand ring buffers
4322 * To save on memory when the tracing is never used on a system with it
4323 * configured in. The ring buffers are set to a minimum size. But once
4324 * a user starts to use the tracing facility, then they need to grow
4325 * to their default size.
4327 * This function is to be called when a tracer is about to be used.
4329 int tracing_update_buffers(void)
4333 mutex_lock(&trace_types_lock
);
4334 if (!ring_buffer_expanded
)
4335 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
4336 RING_BUFFER_ALL_CPUS
);
4337 mutex_unlock(&trace_types_lock
);
4342 struct trace_option_dentry
;
4345 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
4348 * Used to clear out the tracer before deletion of an instance.
4349 * Must have trace_types_lock held.
4351 static void tracing_set_nop(struct trace_array
*tr
)
4353 if (tr
->current_trace
== &nop_trace
)
4356 tr
->current_trace
->enabled
--;
4358 if (tr
->current_trace
->reset
)
4359 tr
->current_trace
->reset(tr
);
4361 tr
->current_trace
= &nop_trace
;
4364 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
)
4366 /* Only enable if the directory has been created already. */
4370 create_trace_option_files(tr
, t
);
4373 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
4376 #ifdef CONFIG_TRACER_MAX_TRACE
4381 mutex_lock(&trace_types_lock
);
4383 if (!ring_buffer_expanded
) {
4384 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
4385 RING_BUFFER_ALL_CPUS
);
4391 for (t
= trace_types
; t
; t
= t
->next
) {
4392 if (strcmp(t
->name
, buf
) == 0)
4399 if (t
== tr
->current_trace
)
4402 /* Some tracers are only allowed for the top level buffer */
4403 if (!trace_ok_for_array(t
, tr
)) {
4408 /* If trace pipe files are being read, we can't change the tracer */
4409 if (tr
->current_trace
->ref
) {
4414 trace_branch_disable();
4416 tr
->current_trace
->enabled
--;
4418 if (tr
->current_trace
->reset
)
4419 tr
->current_trace
->reset(tr
);
4421 /* Current trace needs to be nop_trace before synchronize_sched */
4422 tr
->current_trace
= &nop_trace
;
4424 #ifdef CONFIG_TRACER_MAX_TRACE
4425 had_max_tr
= tr
->allocated_snapshot
;
4427 if (had_max_tr
&& !t
->use_max_tr
) {
4429 * We need to make sure that the update_max_tr sees that
4430 * current_trace changed to nop_trace to keep it from
4431 * swapping the buffers after we resize it.
4432 * The update_max_tr is called from interrupts disabled
4433 * so a synchronized_sched() is sufficient.
4435 synchronize_sched();
4440 #ifdef CONFIG_TRACER_MAX_TRACE
4441 if (t
->use_max_tr
&& !had_max_tr
) {
4442 ret
= alloc_snapshot(tr
);
4449 ret
= tracer_init(t
, tr
);
4454 tr
->current_trace
= t
;
4455 tr
->current_trace
->enabled
++;
4456 trace_branch_enable(tr
);
4458 mutex_unlock(&trace_types_lock
);
4464 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
4465 size_t cnt
, loff_t
*ppos
)
4467 struct trace_array
*tr
= filp
->private_data
;
4468 char buf
[MAX_TRACER_SIZE
+1];
4475 if (cnt
> MAX_TRACER_SIZE
)
4476 cnt
= MAX_TRACER_SIZE
;
4478 if (copy_from_user(&buf
, ubuf
, cnt
))
4483 /* strip ending whitespace. */
4484 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
4487 err
= tracing_set_tracer(tr
, buf
);
4497 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
4498 size_t cnt
, loff_t
*ppos
)
4503 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4504 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4505 if (r
> sizeof(buf
))
4507 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4511 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
4512 size_t cnt
, loff_t
*ppos
)
4517 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4527 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
4528 size_t cnt
, loff_t
*ppos
)
4530 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
4534 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
4535 size_t cnt
, loff_t
*ppos
)
4537 struct trace_array
*tr
= filp
->private_data
;
4540 mutex_lock(&trace_types_lock
);
4541 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
4545 if (tr
->current_trace
->update_thresh
) {
4546 ret
= tr
->current_trace
->update_thresh(tr
);
4553 mutex_unlock(&trace_types_lock
);
4559 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4560 size_t cnt
, loff_t
*ppos
)
4562 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
4566 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4567 size_t cnt
, loff_t
*ppos
)
4569 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
4572 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4574 struct trace_array
*tr
= inode
->i_private
;
4575 struct trace_iterator
*iter
;
4578 if (tracing_disabled
)
4581 if (trace_array_get(tr
) < 0)
4584 mutex_lock(&trace_types_lock
);
4586 /* create a buffer to store the information to pass to userspace */
4587 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4590 __trace_array_put(tr
);
4594 trace_seq_init(&iter
->seq
);
4595 iter
->trace
= tr
->current_trace
;
4597 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4602 /* trace pipe does not show start of buffer */
4603 cpumask_setall(iter
->started
);
4605 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
4606 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4608 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4609 if (trace_clocks
[tr
->clock_id
].in_ns
)
4610 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4613 iter
->trace_buffer
= &tr
->trace_buffer
;
4614 iter
->cpu_file
= tracing_get_cpu(inode
);
4615 mutex_init(&iter
->mutex
);
4616 filp
->private_data
= iter
;
4618 if (iter
->trace
->pipe_open
)
4619 iter
->trace
->pipe_open(iter
);
4621 nonseekable_open(inode
, filp
);
4623 tr
->current_trace
->ref
++;
4625 mutex_unlock(&trace_types_lock
);
4631 __trace_array_put(tr
);
4632 mutex_unlock(&trace_types_lock
);
4636 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4638 struct trace_iterator
*iter
= file
->private_data
;
4639 struct trace_array
*tr
= inode
->i_private
;
4641 mutex_lock(&trace_types_lock
);
4643 tr
->current_trace
->ref
--;
4645 if (iter
->trace
->pipe_close
)
4646 iter
->trace
->pipe_close(iter
);
4648 mutex_unlock(&trace_types_lock
);
4650 free_cpumask_var(iter
->started
);
4651 mutex_destroy(&iter
->mutex
);
4654 trace_array_put(tr
);
4660 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4662 struct trace_array
*tr
= iter
->tr
;
4664 /* Iterators are static, they should be filled or empty */
4665 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4666 return POLLIN
| POLLRDNORM
;
4668 if (tr
->trace_flags
& TRACE_ITER_BLOCK
)
4670 * Always select as readable when in blocking mode
4672 return POLLIN
| POLLRDNORM
;
4674 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4679 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4681 struct trace_iterator
*iter
= filp
->private_data
;
4683 return trace_poll(iter
, filp
, poll_table
);
4686 /* Must be called with iter->mutex held. */
4687 static int tracing_wait_pipe(struct file
*filp
)
4689 struct trace_iterator
*iter
= filp
->private_data
;
4692 while (trace_empty(iter
)) {
4694 if ((filp
->f_flags
& O_NONBLOCK
)) {
4699 * We block until we read something and tracing is disabled.
4700 * We still block if tracing is disabled, but we have never
4701 * read anything. This allows a user to cat this file, and
4702 * then enable tracing. But after we have read something,
4703 * we give an EOF when tracing is again disabled.
4705 * iter->pos will be 0 if we haven't read anything.
4707 if (!tracing_is_on() && iter
->pos
)
4710 mutex_unlock(&iter
->mutex
);
4712 ret
= wait_on_pipe(iter
, false);
4714 mutex_lock(&iter
->mutex
);
4727 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4728 size_t cnt
, loff_t
*ppos
)
4730 struct trace_iterator
*iter
= filp
->private_data
;
4733 /* return any leftover data */
4734 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4738 trace_seq_init(&iter
->seq
);
4741 * Avoid more than one consumer on a single file descriptor
4742 * This is just a matter of traces coherency, the ring buffer itself
4745 mutex_lock(&iter
->mutex
);
4746 if (iter
->trace
->read
) {
4747 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4753 sret
= tracing_wait_pipe(filp
);
4757 /* stop when tracing is finished */
4758 if (trace_empty(iter
)) {
4763 if (cnt
>= PAGE_SIZE
)
4764 cnt
= PAGE_SIZE
- 1;
4766 /* reset all but tr, trace, and overruns */
4767 memset(&iter
->seq
, 0,
4768 sizeof(struct trace_iterator
) -
4769 offsetof(struct trace_iterator
, seq
));
4770 cpumask_clear(iter
->started
);
4773 trace_event_read_lock();
4774 trace_access_lock(iter
->cpu_file
);
4775 while (trace_find_next_entry_inc(iter
) != NULL
) {
4776 enum print_line_t ret
;
4777 int save_len
= iter
->seq
.seq
.len
;
4779 ret
= print_trace_line(iter
);
4780 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4781 /* don't print partial lines */
4782 iter
->seq
.seq
.len
= save_len
;
4785 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4786 trace_consume(iter
);
4788 if (trace_seq_used(&iter
->seq
) >= cnt
)
4792 * Setting the full flag means we reached the trace_seq buffer
4793 * size and we should leave by partial output condition above.
4794 * One of the trace_seq_* functions is not used properly.
4796 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4799 trace_access_unlock(iter
->cpu_file
);
4800 trace_event_read_unlock();
4802 /* Now copy what we have to the user */
4803 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4804 if (iter
->seq
.seq
.readpos
>= trace_seq_used(&iter
->seq
))
4805 trace_seq_init(&iter
->seq
);
4808 * If there was nothing to send to user, in spite of consuming trace
4809 * entries, go back to wait for more entries.
4815 mutex_unlock(&iter
->mutex
);
4820 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4823 __free_page(spd
->pages
[idx
]);
4826 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4828 .confirm
= generic_pipe_buf_confirm
,
4829 .release
= generic_pipe_buf_release
,
4830 .steal
= generic_pipe_buf_steal
,
4831 .get
= generic_pipe_buf_get
,
4835 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4841 /* Seq buffer is page-sized, exactly what we need. */
4843 save_len
= iter
->seq
.seq
.len
;
4844 ret
= print_trace_line(iter
);
4846 if (trace_seq_has_overflowed(&iter
->seq
)) {
4847 iter
->seq
.seq
.len
= save_len
;
4852 * This should not be hit, because it should only
4853 * be set if the iter->seq overflowed. But check it
4854 * anyway to be safe.
4856 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4857 iter
->seq
.seq
.len
= save_len
;
4861 count
= trace_seq_used(&iter
->seq
) - save_len
;
4864 iter
->seq
.seq
.len
= save_len
;
4868 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4869 trace_consume(iter
);
4871 if (!trace_find_next_entry_inc(iter
)) {
4881 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4883 struct pipe_inode_info
*pipe
,
4887 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4888 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4889 struct trace_iterator
*iter
= filp
->private_data
;
4890 struct splice_pipe_desc spd
= {
4892 .partial
= partial_def
,
4893 .nr_pages
= 0, /* This gets updated below. */
4894 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4896 .ops
= &tracing_pipe_buf_ops
,
4897 .spd_release
= tracing_spd_release_pipe
,
4903 if (splice_grow_spd(pipe
, &spd
))
4906 mutex_lock(&iter
->mutex
);
4908 if (iter
->trace
->splice_read
) {
4909 ret
= iter
->trace
->splice_read(iter
, filp
,
4910 ppos
, pipe
, len
, flags
);
4915 ret
= tracing_wait_pipe(filp
);
4919 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4924 trace_event_read_lock();
4925 trace_access_lock(iter
->cpu_file
);
4927 /* Fill as many pages as possible. */
4928 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
4929 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4933 rem
= tracing_fill_pipe_page(rem
, iter
);
4935 /* Copy the data into the page, so we can start over. */
4936 ret
= trace_seq_to_buffer(&iter
->seq
,
4937 page_address(spd
.pages
[i
]),
4938 trace_seq_used(&iter
->seq
));
4940 __free_page(spd
.pages
[i
]);
4943 spd
.partial
[i
].offset
= 0;
4944 spd
.partial
[i
].len
= trace_seq_used(&iter
->seq
);
4946 trace_seq_init(&iter
->seq
);
4949 trace_access_unlock(iter
->cpu_file
);
4950 trace_event_read_unlock();
4951 mutex_unlock(&iter
->mutex
);
4955 ret
= splice_to_pipe(pipe
, &spd
);
4957 splice_shrink_spd(&spd
);
4961 mutex_unlock(&iter
->mutex
);
4966 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4967 size_t cnt
, loff_t
*ppos
)
4969 struct inode
*inode
= file_inode(filp
);
4970 struct trace_array
*tr
= inode
->i_private
;
4971 int cpu
= tracing_get_cpu(inode
);
4976 mutex_lock(&trace_types_lock
);
4978 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4979 int cpu
, buf_size_same
;
4984 /* check if all cpu sizes are same */
4985 for_each_tracing_cpu(cpu
) {
4986 /* fill in the size from first enabled cpu */
4988 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4989 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4995 if (buf_size_same
) {
4996 if (!ring_buffer_expanded
)
4997 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4999 trace_buf_size
>> 10);
5001 r
= sprintf(buf
, "%lu\n", size
>> 10);
5003 r
= sprintf(buf
, "X\n");
5005 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
5007 mutex_unlock(&trace_types_lock
);
5009 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5014 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
5015 size_t cnt
, loff_t
*ppos
)
5017 struct inode
*inode
= file_inode(filp
);
5018 struct trace_array
*tr
= inode
->i_private
;
5022 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5026 /* must have at least 1 entry */
5030 /* value is in KB */
5032 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
5042 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
5043 size_t cnt
, loff_t
*ppos
)
5045 struct trace_array
*tr
= filp
->private_data
;
5048 unsigned long size
= 0, expanded_size
= 0;
5050 mutex_lock(&trace_types_lock
);
5051 for_each_tracing_cpu(cpu
) {
5052 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
5053 if (!ring_buffer_expanded
)
5054 expanded_size
+= trace_buf_size
>> 10;
5056 if (ring_buffer_expanded
)
5057 r
= sprintf(buf
, "%lu\n", size
);
5059 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
5060 mutex_unlock(&trace_types_lock
);
5062 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5066 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
5067 size_t cnt
, loff_t
*ppos
)
5070 * There is no need to read what the user has written, this function
5071 * is just to make sure that there is no error when "echo" is used
5080 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
5082 struct trace_array
*tr
= inode
->i_private
;
5084 /* disable tracing ? */
5085 if (tr
->trace_flags
& TRACE_ITER_STOP_ON_FREE
)
5086 tracer_tracing_off(tr
);
5087 /* resize the ring buffer to 0 */
5088 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
5090 trace_array_put(tr
);
5096 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
5097 size_t cnt
, loff_t
*fpos
)
5099 unsigned long addr
= (unsigned long)ubuf
;
5100 struct trace_array
*tr
= filp
->private_data
;
5101 struct ring_buffer_event
*event
;
5102 struct ring_buffer
*buffer
;
5103 struct print_entry
*entry
;
5104 unsigned long irq_flags
;
5105 struct page
*pages
[2];
5115 if (tracing_disabled
)
5118 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
5121 if (cnt
> TRACE_BUF_SIZE
)
5122 cnt
= TRACE_BUF_SIZE
;
5125 * Userspace is injecting traces into the kernel trace buffer.
5126 * We want to be as non intrusive as possible.
5127 * To do so, we do not want to allocate any special buffers
5128 * or take any locks, but instead write the userspace data
5129 * straight into the ring buffer.
5131 * First we need to pin the userspace buffer into memory,
5132 * which, most likely it is, because it just referenced it.
5133 * But there's no guarantee that it is. By using get_user_pages_fast()
5134 * and kmap_atomic/kunmap_atomic() we can get access to the
5135 * pages directly. We then write the data directly into the
5138 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
5140 /* check if we cross pages */
5141 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
5144 offset
= addr
& (PAGE_SIZE
- 1);
5147 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
5148 if (ret
< nr_pages
) {
5150 put_page(pages
[ret
]);
5155 for (i
= 0; i
< nr_pages
; i
++)
5156 map_page
[i
] = kmap_atomic(pages
[i
]);
5158 local_save_flags(irq_flags
);
5159 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
5160 buffer
= tr
->trace_buffer
.buffer
;
5161 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
5162 irq_flags
, preempt_count());
5164 /* Ring buffer disabled, return as if not open for write */
5169 entry
= ring_buffer_event_data(event
);
5170 entry
->ip
= _THIS_IP_
;
5172 if (nr_pages
== 2) {
5173 len
= PAGE_SIZE
- offset
;
5174 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
5175 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
5177 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
5179 if (entry
->buf
[cnt
- 1] != '\n') {
5180 entry
->buf
[cnt
] = '\n';
5181 entry
->buf
[cnt
+ 1] = '\0';
5183 entry
->buf
[cnt
] = '\0';
5185 __buffer_unlock_commit(buffer
, event
);
5192 for (i
= nr_pages
- 1; i
>= 0; i
--) {
5193 kunmap_atomic(map_page
[i
]);
5200 static int tracing_clock_show(struct seq_file
*m
, void *v
)
5202 struct trace_array
*tr
= m
->private;
5205 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
5207 "%s%s%s%s", i
? " " : "",
5208 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
5209 i
== tr
->clock_id
? "]" : "");
5215 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
5219 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
5220 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
5223 if (i
== ARRAY_SIZE(trace_clocks
))
5226 mutex_lock(&trace_types_lock
);
5230 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
5233 * New clock may not be consistent with the previous clock.
5234 * Reset the buffer so that it doesn't have incomparable timestamps.
5236 tracing_reset_online_cpus(&tr
->trace_buffer
);
5238 #ifdef CONFIG_TRACER_MAX_TRACE
5239 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&& tr
->max_buffer
.buffer
)
5240 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
5241 tracing_reset_online_cpus(&tr
->max_buffer
);
5244 mutex_unlock(&trace_types_lock
);
5249 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
5250 size_t cnt
, loff_t
*fpos
)
5252 struct seq_file
*m
= filp
->private_data
;
5253 struct trace_array
*tr
= m
->private;
5255 const char *clockstr
;
5258 if (cnt
>= sizeof(buf
))
5261 if (copy_from_user(&buf
, ubuf
, cnt
))
5266 clockstr
= strstrip(buf
);
5268 ret
= tracing_set_clock(tr
, clockstr
);
5277 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
5279 struct trace_array
*tr
= inode
->i_private
;
5282 if (tracing_disabled
)
5285 if (trace_array_get(tr
))
5288 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
5290 trace_array_put(tr
);
5295 struct ftrace_buffer_info
{
5296 struct trace_iterator iter
;
5301 #ifdef CONFIG_TRACER_SNAPSHOT
5302 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
5304 struct trace_array
*tr
= inode
->i_private
;
5305 struct trace_iterator
*iter
;
5309 if (trace_array_get(tr
) < 0)
5312 if (file
->f_mode
& FMODE_READ
) {
5313 iter
= __tracing_open(inode
, file
, true);
5315 ret
= PTR_ERR(iter
);
5317 /* Writes still need the seq_file to hold the private data */
5319 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
5322 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5330 iter
->trace_buffer
= &tr
->max_buffer
;
5331 iter
->cpu_file
= tracing_get_cpu(inode
);
5333 file
->private_data
= m
;
5337 trace_array_put(tr
);
5343 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5346 struct seq_file
*m
= filp
->private_data
;
5347 struct trace_iterator
*iter
= m
->private;
5348 struct trace_array
*tr
= iter
->tr
;
5352 ret
= tracing_update_buffers();
5356 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5360 mutex_lock(&trace_types_lock
);
5362 if (tr
->current_trace
->use_max_tr
) {
5369 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5373 if (tr
->allocated_snapshot
)
5377 /* Only allow per-cpu swap if the ring buffer supports it */
5378 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5379 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5384 if (!tr
->allocated_snapshot
) {
5385 ret
= alloc_snapshot(tr
);
5389 local_irq_disable();
5390 /* Now, we're going to swap */
5391 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5392 update_max_tr(tr
, current
, smp_processor_id());
5394 update_max_tr_single(tr
, current
, iter
->cpu_file
);
5398 if (tr
->allocated_snapshot
) {
5399 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5400 tracing_reset_online_cpus(&tr
->max_buffer
);
5402 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
5412 mutex_unlock(&trace_types_lock
);
5416 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
5418 struct seq_file
*m
= file
->private_data
;
5421 ret
= tracing_release(inode
, file
);
5423 if (file
->f_mode
& FMODE_READ
)
5426 /* If write only, the seq_file is just a stub */
5434 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
5435 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5436 size_t count
, loff_t
*ppos
);
5437 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
5438 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5439 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
5441 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
5443 struct ftrace_buffer_info
*info
;
5446 ret
= tracing_buffers_open(inode
, filp
);
5450 info
= filp
->private_data
;
5452 if (info
->iter
.trace
->use_max_tr
) {
5453 tracing_buffers_release(inode
, filp
);
5457 info
->iter
.snapshot
= true;
5458 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
5463 #endif /* CONFIG_TRACER_SNAPSHOT */
5466 static const struct file_operations tracing_thresh_fops
= {
5467 .open
= tracing_open_generic
,
5468 .read
= tracing_thresh_read
,
5469 .write
= tracing_thresh_write
,
5470 .llseek
= generic_file_llseek
,
5473 static const struct file_operations tracing_max_lat_fops
= {
5474 .open
= tracing_open_generic
,
5475 .read
= tracing_max_lat_read
,
5476 .write
= tracing_max_lat_write
,
5477 .llseek
= generic_file_llseek
,
5480 static const struct file_operations set_tracer_fops
= {
5481 .open
= tracing_open_generic
,
5482 .read
= tracing_set_trace_read
,
5483 .write
= tracing_set_trace_write
,
5484 .llseek
= generic_file_llseek
,
5487 static const struct file_operations tracing_pipe_fops
= {
5488 .open
= tracing_open_pipe
,
5489 .poll
= tracing_poll_pipe
,
5490 .read
= tracing_read_pipe
,
5491 .splice_read
= tracing_splice_read_pipe
,
5492 .release
= tracing_release_pipe
,
5493 .llseek
= no_llseek
,
5496 static const struct file_operations tracing_entries_fops
= {
5497 .open
= tracing_open_generic_tr
,
5498 .read
= tracing_entries_read
,
5499 .write
= tracing_entries_write
,
5500 .llseek
= generic_file_llseek
,
5501 .release
= tracing_release_generic_tr
,
5504 static const struct file_operations tracing_total_entries_fops
= {
5505 .open
= tracing_open_generic_tr
,
5506 .read
= tracing_total_entries_read
,
5507 .llseek
= generic_file_llseek
,
5508 .release
= tracing_release_generic_tr
,
5511 static const struct file_operations tracing_free_buffer_fops
= {
5512 .open
= tracing_open_generic_tr
,
5513 .write
= tracing_free_buffer_write
,
5514 .release
= tracing_free_buffer_release
,
5517 static const struct file_operations tracing_mark_fops
= {
5518 .open
= tracing_open_generic_tr
,
5519 .write
= tracing_mark_write
,
5520 .llseek
= generic_file_llseek
,
5521 .release
= tracing_release_generic_tr
,
5524 static const struct file_operations trace_clock_fops
= {
5525 .open
= tracing_clock_open
,
5527 .llseek
= seq_lseek
,
5528 .release
= tracing_single_release_tr
,
5529 .write
= tracing_clock_write
,
5532 #ifdef CONFIG_TRACER_SNAPSHOT
5533 static const struct file_operations snapshot_fops
= {
5534 .open
= tracing_snapshot_open
,
5536 .write
= tracing_snapshot_write
,
5537 .llseek
= tracing_lseek
,
5538 .release
= tracing_snapshot_release
,
5541 static const struct file_operations snapshot_raw_fops
= {
5542 .open
= snapshot_raw_open
,
5543 .read
= tracing_buffers_read
,
5544 .release
= tracing_buffers_release
,
5545 .splice_read
= tracing_buffers_splice_read
,
5546 .llseek
= no_llseek
,
5549 #endif /* CONFIG_TRACER_SNAPSHOT */
5551 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5553 struct trace_array
*tr
= inode
->i_private
;
5554 struct ftrace_buffer_info
*info
;
5557 if (tracing_disabled
)
5560 if (trace_array_get(tr
) < 0)
5563 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5565 trace_array_put(tr
);
5569 mutex_lock(&trace_types_lock
);
5572 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5573 info
->iter
.trace
= tr
->current_trace
;
5574 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5576 /* Force reading ring buffer for first read */
5577 info
->read
= (unsigned int)-1;
5579 filp
->private_data
= info
;
5581 tr
->current_trace
->ref
++;
5583 mutex_unlock(&trace_types_lock
);
5585 ret
= nonseekable_open(inode
, filp
);
5587 trace_array_put(tr
);
5593 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5595 struct ftrace_buffer_info
*info
= filp
->private_data
;
5596 struct trace_iterator
*iter
= &info
->iter
;
5598 return trace_poll(iter
, filp
, poll_table
);
5602 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5603 size_t count
, loff_t
*ppos
)
5605 struct ftrace_buffer_info
*info
= filp
->private_data
;
5606 struct trace_iterator
*iter
= &info
->iter
;
5613 #ifdef CONFIG_TRACER_MAX_TRACE
5614 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
5619 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5624 /* Do we have previous read data to read? */
5625 if (info
->read
< PAGE_SIZE
)
5629 trace_access_lock(iter
->cpu_file
);
5630 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5634 trace_access_unlock(iter
->cpu_file
);
5637 if (trace_empty(iter
)) {
5638 if ((filp
->f_flags
& O_NONBLOCK
))
5641 ret
= wait_on_pipe(iter
, false);
5652 size
= PAGE_SIZE
- info
->read
;
5656 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5668 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5670 struct ftrace_buffer_info
*info
= file
->private_data
;
5671 struct trace_iterator
*iter
= &info
->iter
;
5673 mutex_lock(&trace_types_lock
);
5675 iter
->tr
->current_trace
->ref
--;
5677 __trace_array_put(iter
->tr
);
5680 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5683 mutex_unlock(&trace_types_lock
);
5689 struct ring_buffer
*buffer
;
5694 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5695 struct pipe_buffer
*buf
)
5697 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5702 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5707 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5708 struct pipe_buffer
*buf
)
5710 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5715 /* Pipe buffer operations for a buffer. */
5716 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5718 .confirm
= generic_pipe_buf_confirm
,
5719 .release
= buffer_pipe_buf_release
,
5720 .steal
= generic_pipe_buf_steal
,
5721 .get
= buffer_pipe_buf_get
,
5725 * Callback from splice_to_pipe(), if we need to release some pages
5726 * at the end of the spd in case we error'ed out in filling the pipe.
5728 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5730 struct buffer_ref
*ref
=
5731 (struct buffer_ref
*)spd
->partial
[i
].private;
5736 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5738 spd
->partial
[i
].private = 0;
5742 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5743 struct pipe_inode_info
*pipe
, size_t len
,
5746 struct ftrace_buffer_info
*info
= file
->private_data
;
5747 struct trace_iterator
*iter
= &info
->iter
;
5748 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5749 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5750 struct splice_pipe_desc spd
= {
5752 .partial
= partial_def
,
5753 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5755 .ops
= &buffer_pipe_buf_ops
,
5756 .spd_release
= buffer_spd_release
,
5758 struct buffer_ref
*ref
;
5759 int entries
, size
, i
;
5762 #ifdef CONFIG_TRACER_MAX_TRACE
5763 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
5767 if (splice_grow_spd(pipe
, &spd
))
5770 if (*ppos
& (PAGE_SIZE
- 1))
5773 if (len
& (PAGE_SIZE
- 1)) {
5774 if (len
< PAGE_SIZE
)
5780 trace_access_lock(iter
->cpu_file
);
5781 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5783 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5787 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5794 ref
->buffer
= iter
->trace_buffer
->buffer
;
5795 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5802 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5803 len
, iter
->cpu_file
, 1);
5805 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5811 * zero out any left over data, this is going to
5814 size
= ring_buffer_page_len(ref
->page
);
5815 if (size
< PAGE_SIZE
)
5816 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5818 page
= virt_to_page(ref
->page
);
5820 spd
.pages
[i
] = page
;
5821 spd
.partial
[i
].len
= PAGE_SIZE
;
5822 spd
.partial
[i
].offset
= 0;
5823 spd
.partial
[i
].private = (unsigned long)ref
;
5827 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5830 trace_access_unlock(iter
->cpu_file
);
5833 /* did we read anything? */
5834 if (!spd
.nr_pages
) {
5838 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
))
5841 ret
= wait_on_pipe(iter
, true);
5848 ret
= splice_to_pipe(pipe
, &spd
);
5849 splice_shrink_spd(&spd
);
5854 static const struct file_operations tracing_buffers_fops
= {
5855 .open
= tracing_buffers_open
,
5856 .read
= tracing_buffers_read
,
5857 .poll
= tracing_buffers_poll
,
5858 .release
= tracing_buffers_release
,
5859 .splice_read
= tracing_buffers_splice_read
,
5860 .llseek
= no_llseek
,
5864 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5865 size_t count
, loff_t
*ppos
)
5867 struct inode
*inode
= file_inode(filp
);
5868 struct trace_array
*tr
= inode
->i_private
;
5869 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5870 int cpu
= tracing_get_cpu(inode
);
5871 struct trace_seq
*s
;
5873 unsigned long long t
;
5874 unsigned long usec_rem
;
5876 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5882 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5883 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5885 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5886 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5888 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5889 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5891 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5892 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5894 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5895 /* local or global for trace_clock */
5896 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5897 usec_rem
= do_div(t
, USEC_PER_SEC
);
5898 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5901 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5902 usec_rem
= do_div(t
, USEC_PER_SEC
);
5903 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5905 /* counter or tsc mode for trace_clock */
5906 trace_seq_printf(s
, "oldest event ts: %llu\n",
5907 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5909 trace_seq_printf(s
, "now ts: %llu\n",
5910 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5913 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5914 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5916 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5917 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5919 count
= simple_read_from_buffer(ubuf
, count
, ppos
,
5920 s
->buffer
, trace_seq_used(s
));
5927 static const struct file_operations tracing_stats_fops
= {
5928 .open
= tracing_open_generic_tr
,
5929 .read
= tracing_stats_read
,
5930 .llseek
= generic_file_llseek
,
5931 .release
= tracing_release_generic_tr
,
5934 #ifdef CONFIG_DYNAMIC_FTRACE
5936 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5942 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5943 size_t cnt
, loff_t
*ppos
)
5945 static char ftrace_dyn_info_buffer
[1024];
5946 static DEFINE_MUTEX(dyn_info_mutex
);
5947 unsigned long *p
= filp
->private_data
;
5948 char *buf
= ftrace_dyn_info_buffer
;
5949 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5952 mutex_lock(&dyn_info_mutex
);
5953 r
= sprintf(buf
, "%ld ", *p
);
5955 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5958 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5960 mutex_unlock(&dyn_info_mutex
);
5965 static const struct file_operations tracing_dyn_info_fops
= {
5966 .open
= tracing_open_generic
,
5967 .read
= tracing_read_dyn_info
,
5968 .llseek
= generic_file_llseek
,
5970 #endif /* CONFIG_DYNAMIC_FTRACE */
5972 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5974 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5980 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5982 unsigned long *count
= (long *)data
;
5994 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5995 struct ftrace_probe_ops
*ops
, void *data
)
5997 long count
= (long)data
;
5999 seq_printf(m
, "%ps:", (void *)ip
);
6001 seq_puts(m
, "snapshot");
6004 seq_puts(m
, ":unlimited\n");
6006 seq_printf(m
, ":count=%ld\n", count
);
6011 static struct ftrace_probe_ops snapshot_probe_ops
= {
6012 .func
= ftrace_snapshot
,
6013 .print
= ftrace_snapshot_print
,
6016 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
6017 .func
= ftrace_count_snapshot
,
6018 .print
= ftrace_snapshot_print
,
6022 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
6023 char *glob
, char *cmd
, char *param
, int enable
)
6025 struct ftrace_probe_ops
*ops
;
6026 void *count
= (void *)-1;
6030 /* hash funcs only work with set_ftrace_filter */
6034 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
6036 if (glob
[0] == '!') {
6037 unregister_ftrace_function_probe_func(glob
+1, ops
);
6044 number
= strsep(¶m
, ":");
6046 if (!strlen(number
))
6050 * We use the callback data field (which is a pointer)
6053 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
6058 ret
= register_ftrace_function_probe(glob
, ops
, count
);
6061 alloc_snapshot(&global_trace
);
6063 return ret
< 0 ? ret
: 0;
6066 static struct ftrace_func_command ftrace_snapshot_cmd
= {
6068 .func
= ftrace_trace_snapshot_callback
,
6071 static __init
int register_snapshot_cmd(void)
6073 return register_ftrace_command(&ftrace_snapshot_cmd
);
6076 static inline __init
int register_snapshot_cmd(void) { return 0; }
6077 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6079 static struct dentry
*tracing_get_dentry(struct trace_array
*tr
)
6081 if (WARN_ON(!tr
->dir
))
6082 return ERR_PTR(-ENODEV
);
6084 /* Top directory uses NULL as the parent */
6085 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
6088 /* All sub buffers have a descriptor */
6092 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
6094 struct dentry
*d_tracer
;
6097 return tr
->percpu_dir
;
6099 d_tracer
= tracing_get_dentry(tr
);
6100 if (IS_ERR(d_tracer
))
6103 tr
->percpu_dir
= tracefs_create_dir("per_cpu", d_tracer
);
6105 WARN_ONCE(!tr
->percpu_dir
,
6106 "Could not create tracefs directory 'per_cpu/%d'\n", cpu
);
6108 return tr
->percpu_dir
;
6111 static struct dentry
*
6112 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
6113 void *data
, long cpu
, const struct file_operations
*fops
)
6115 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
6117 if (ret
) /* See tracing_get_cpu() */
6118 d_inode(ret
)->i_cdev
= (void *)(cpu
+ 1);
6123 tracing_init_tracefs_percpu(struct trace_array
*tr
, long cpu
)
6125 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
6126 struct dentry
*d_cpu
;
6127 char cpu_dir
[30]; /* 30 characters should be more than enough */
6132 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
6133 d_cpu
= tracefs_create_dir(cpu_dir
, d_percpu
);
6135 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir
);
6139 /* per cpu trace_pipe */
6140 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
6141 tr
, cpu
, &tracing_pipe_fops
);
6144 trace_create_cpu_file("trace", 0644, d_cpu
,
6145 tr
, cpu
, &tracing_fops
);
6147 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
6148 tr
, cpu
, &tracing_buffers_fops
);
6150 trace_create_cpu_file("stats", 0444, d_cpu
,
6151 tr
, cpu
, &tracing_stats_fops
);
6153 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
6154 tr
, cpu
, &tracing_entries_fops
);
6156 #ifdef CONFIG_TRACER_SNAPSHOT
6157 trace_create_cpu_file("snapshot", 0644, d_cpu
,
6158 tr
, cpu
, &snapshot_fops
);
6160 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
6161 tr
, cpu
, &snapshot_raw_fops
);
6165 #ifdef CONFIG_FTRACE_SELFTEST
6166 /* Let selftest have access to static functions in this file */
6167 #include "trace_selftest.c"
6171 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
6174 struct trace_option_dentry
*topt
= filp
->private_data
;
6177 if (topt
->flags
->val
& topt
->opt
->bit
)
6182 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
6186 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6189 struct trace_option_dentry
*topt
= filp
->private_data
;
6193 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6197 if (val
!= 0 && val
!= 1)
6200 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
6201 mutex_lock(&trace_types_lock
);
6202 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
6204 mutex_unlock(&trace_types_lock
);
6215 static const struct file_operations trace_options_fops
= {
6216 .open
= tracing_open_generic
,
6217 .read
= trace_options_read
,
6218 .write
= trace_options_write
,
6219 .llseek
= generic_file_llseek
,
6223 * In order to pass in both the trace_array descriptor as well as the index
6224 * to the flag that the trace option file represents, the trace_array
6225 * has a character array of trace_flags_index[], which holds the index
6226 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6227 * The address of this character array is passed to the flag option file
6228 * read/write callbacks.
6230 * In order to extract both the index and the trace_array descriptor,
6231 * get_tr_index() uses the following algorithm.
6235 * As the pointer itself contains the address of the index (remember
6238 * Then to get the trace_array descriptor, by subtracting that index
6239 * from the ptr, we get to the start of the index itself.
6241 * ptr - idx == &index[0]
6243 * Then a simple container_of() from that pointer gets us to the
6244 * trace_array descriptor.
6246 static void get_tr_index(void *data
, struct trace_array
**ptr
,
6247 unsigned int *pindex
)
6249 *pindex
= *(unsigned char *)data
;
6251 *ptr
= container_of(data
- *pindex
, struct trace_array
,
6256 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
6259 void *tr_index
= filp
->private_data
;
6260 struct trace_array
*tr
;
6264 get_tr_index(tr_index
, &tr
, &index
);
6266 if (tr
->trace_flags
& (1 << index
))
6271 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
6275 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6278 void *tr_index
= filp
->private_data
;
6279 struct trace_array
*tr
;
6284 get_tr_index(tr_index
, &tr
, &index
);
6286 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6290 if (val
!= 0 && val
!= 1)
6293 mutex_lock(&trace_types_lock
);
6294 ret
= set_tracer_flag(tr
, 1 << index
, val
);
6295 mutex_unlock(&trace_types_lock
);
6305 static const struct file_operations trace_options_core_fops
= {
6306 .open
= tracing_open_generic
,
6307 .read
= trace_options_core_read
,
6308 .write
= trace_options_core_write
,
6309 .llseek
= generic_file_llseek
,
6312 struct dentry
*trace_create_file(const char *name
,
6314 struct dentry
*parent
,
6316 const struct file_operations
*fops
)
6320 ret
= tracefs_create_file(name
, mode
, parent
, data
, fops
);
6322 pr_warning("Could not create tracefs '%s' entry\n", name
);
6328 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
6330 struct dentry
*d_tracer
;
6335 d_tracer
= tracing_get_dentry(tr
);
6336 if (IS_ERR(d_tracer
))
6339 tr
->options
= tracefs_create_dir("options", d_tracer
);
6341 pr_warning("Could not create tracefs directory 'options'\n");
6349 create_trace_option_file(struct trace_array
*tr
,
6350 struct trace_option_dentry
*topt
,
6351 struct tracer_flags
*flags
,
6352 struct tracer_opt
*opt
)
6354 struct dentry
*t_options
;
6356 t_options
= trace_options_init_dentry(tr
);
6360 topt
->flags
= flags
;
6364 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
6365 &trace_options_fops
);
6370 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
6372 struct trace_option_dentry
*topts
;
6373 struct trace_options
*tr_topts
;
6374 struct tracer_flags
*flags
;
6375 struct tracer_opt
*opts
;
6382 flags
= tracer
->flags
;
6384 if (!flags
|| !flags
->opts
)
6388 * If this is an instance, only create flags for tracers
6389 * the instance may have.
6391 if (!trace_ok_for_array(tracer
, tr
))
6394 for (i
= 0; i
< tr
->nr_topts
; i
++) {
6396 * Check if these flags have already been added.
6397 * Some tracers share flags.
6399 if (tr
->topts
[i
].tracer
->flags
== tracer
->flags
)
6405 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6408 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
6412 tr_topts
= krealloc(tr
->topts
, sizeof(*tr
->topts
) * (tr
->nr_topts
+ 1),
6419 tr
->topts
= tr_topts
;
6420 tr
->topts
[tr
->nr_topts
].tracer
= tracer
;
6421 tr
->topts
[tr
->nr_topts
].topts
= topts
;
6424 for (cnt
= 0; opts
[cnt
].name
; cnt
++) {
6425 create_trace_option_file(tr
, &topts
[cnt
], flags
,
6427 WARN_ONCE(topts
[cnt
].entry
== NULL
,
6428 "Failed to create trace option: %s",
6433 static struct dentry
*
6434 create_trace_option_core_file(struct trace_array
*tr
,
6435 const char *option
, long index
)
6437 struct dentry
*t_options
;
6439 t_options
= trace_options_init_dentry(tr
);
6443 return trace_create_file(option
, 0644, t_options
,
6444 (void *)&tr
->trace_flags_index
[index
],
6445 &trace_options_core_fops
);
6448 static void create_trace_options_dir(struct trace_array
*tr
)
6450 struct dentry
*t_options
;
6451 bool top_level
= tr
== &global_trace
;
6454 t_options
= trace_options_init_dentry(tr
);
6458 for (i
= 0; trace_options
[i
]; i
++) {
6460 !((1 << i
) & TOP_LEVEL_TRACE_FLAGS
))
6461 create_trace_option_core_file(tr
, trace_options
[i
], i
);
6466 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
6467 size_t cnt
, loff_t
*ppos
)
6469 struct trace_array
*tr
= filp
->private_data
;
6473 r
= tracer_tracing_is_on(tr
);
6474 r
= sprintf(buf
, "%d\n", r
);
6476 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6480 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
6481 size_t cnt
, loff_t
*ppos
)
6483 struct trace_array
*tr
= filp
->private_data
;
6484 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
6488 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6493 mutex_lock(&trace_types_lock
);
6495 tracer_tracing_on(tr
);
6496 if (tr
->current_trace
->start
)
6497 tr
->current_trace
->start(tr
);
6499 tracer_tracing_off(tr
);
6500 if (tr
->current_trace
->stop
)
6501 tr
->current_trace
->stop(tr
);
6503 mutex_unlock(&trace_types_lock
);
6511 static const struct file_operations rb_simple_fops
= {
6512 .open
= tracing_open_generic_tr
,
6513 .read
= rb_simple_read
,
6514 .write
= rb_simple_write
,
6515 .release
= tracing_release_generic_tr
,
6516 .llseek
= default_llseek
,
6519 struct dentry
*trace_instance_dir
;
6522 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
6525 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
6527 enum ring_buffer_flags rb_flags
;
6529 rb_flags
= tr
->trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
6533 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
6537 buf
->data
= alloc_percpu(struct trace_array_cpu
);
6539 ring_buffer_free(buf
->buffer
);
6543 /* Allocate the first page for all buffers */
6544 set_buffer_entries(&tr
->trace_buffer
,
6545 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
6550 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
6554 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6558 #ifdef CONFIG_TRACER_MAX_TRACE
6559 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6560 allocate_snapshot
? size
: 1);
6562 ring_buffer_free(tr
->trace_buffer
.buffer
);
6563 free_percpu(tr
->trace_buffer
.data
);
6566 tr
->allocated_snapshot
= allocate_snapshot
;
6569 * Only the top level trace array gets its snapshot allocated
6570 * from the kernel command line.
6572 allocate_snapshot
= false;
6577 static void free_trace_buffer(struct trace_buffer
*buf
)
6580 ring_buffer_free(buf
->buffer
);
6582 free_percpu(buf
->data
);
6587 static void free_trace_buffers(struct trace_array
*tr
)
6592 free_trace_buffer(&tr
->trace_buffer
);
6594 #ifdef CONFIG_TRACER_MAX_TRACE
6595 free_trace_buffer(&tr
->max_buffer
);
6599 static void init_trace_flags_index(struct trace_array
*tr
)
6603 /* Used by the trace options files */
6604 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++)
6605 tr
->trace_flags_index
[i
] = i
;
6608 static void __update_tracer_options(struct trace_array
*tr
)
6612 for (t
= trace_types
; t
; t
= t
->next
)
6613 add_tracer_options(tr
, t
);
6616 static void update_tracer_options(struct trace_array
*tr
)
6618 mutex_lock(&trace_types_lock
);
6619 __update_tracer_options(tr
);
6620 mutex_unlock(&trace_types_lock
);
6623 static int instance_mkdir(const char *name
)
6625 struct trace_array
*tr
;
6628 mutex_lock(&trace_types_lock
);
6631 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6632 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6637 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6641 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6645 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
6648 tr
->trace_flags
= global_trace
.trace_flags
;
6650 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
6652 raw_spin_lock_init(&tr
->start_lock
);
6654 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6656 tr
->current_trace
= &nop_trace
;
6658 INIT_LIST_HEAD(&tr
->systems
);
6659 INIT_LIST_HEAD(&tr
->events
);
6661 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6664 tr
->dir
= tracefs_create_dir(name
, trace_instance_dir
);
6668 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6670 tracefs_remove_recursive(tr
->dir
);
6674 init_tracer_tracefs(tr
, tr
->dir
);
6675 init_trace_flags_index(tr
);
6676 __update_tracer_options(tr
);
6678 list_add(&tr
->list
, &ftrace_trace_arrays
);
6680 mutex_unlock(&trace_types_lock
);
6685 free_trace_buffers(tr
);
6686 free_cpumask_var(tr
->tracing_cpumask
);
6691 mutex_unlock(&trace_types_lock
);
6697 static int instance_rmdir(const char *name
)
6699 struct trace_array
*tr
;
6704 mutex_lock(&trace_types_lock
);
6707 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6708 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6717 if (tr
->ref
|| (tr
->current_trace
&& tr
->current_trace
->ref
))
6720 list_del(&tr
->list
);
6722 tracing_set_nop(tr
);
6723 event_trace_del_tracer(tr
);
6724 ftrace_destroy_function_files(tr
);
6725 tracefs_remove_recursive(tr
->dir
);
6726 free_trace_buffers(tr
);
6728 for (i
= 0; i
< tr
->nr_topts
; i
++) {
6729 kfree(tr
->topts
[i
].topts
);
6739 mutex_unlock(&trace_types_lock
);
6744 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6746 trace_instance_dir
= tracefs_create_instance_dir("instances", d_tracer
,
6749 if (WARN_ON(!trace_instance_dir
))
6754 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6758 trace_create_file("available_tracers", 0444, d_tracer
,
6759 tr
, &show_traces_fops
);
6761 trace_create_file("current_tracer", 0644, d_tracer
,
6762 tr
, &set_tracer_fops
);
6764 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6765 tr
, &tracing_cpumask_fops
);
6767 trace_create_file("trace_options", 0644, d_tracer
,
6768 tr
, &tracing_iter_fops
);
6770 trace_create_file("trace", 0644, d_tracer
,
6773 trace_create_file("trace_pipe", 0444, d_tracer
,
6774 tr
, &tracing_pipe_fops
);
6776 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6777 tr
, &tracing_entries_fops
);
6779 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6780 tr
, &tracing_total_entries_fops
);
6782 trace_create_file("free_buffer", 0200, d_tracer
,
6783 tr
, &tracing_free_buffer_fops
);
6785 trace_create_file("trace_marker", 0220, d_tracer
,
6786 tr
, &tracing_mark_fops
);
6788 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6791 trace_create_file("tracing_on", 0644, d_tracer
,
6792 tr
, &rb_simple_fops
);
6794 create_trace_options_dir(tr
);
6796 #ifdef CONFIG_TRACER_MAX_TRACE
6797 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6798 &tr
->max_latency
, &tracing_max_lat_fops
);
6801 if (ftrace_create_function_files(tr
, d_tracer
))
6802 WARN(1, "Could not allocate function filter files");
6804 #ifdef CONFIG_TRACER_SNAPSHOT
6805 trace_create_file("snapshot", 0644, d_tracer
,
6806 tr
, &snapshot_fops
);
6809 for_each_tracing_cpu(cpu
)
6810 tracing_init_tracefs_percpu(tr
, cpu
);
6814 static struct vfsmount
*trace_automount(void *ingore
)
6816 struct vfsmount
*mnt
;
6817 struct file_system_type
*type
;
6820 * To maintain backward compatibility for tools that mount
6821 * debugfs to get to the tracing facility, tracefs is automatically
6822 * mounted to the debugfs/tracing directory.
6824 type
= get_fs_type("tracefs");
6827 mnt
= vfs_kern_mount(type
, 0, "tracefs", NULL
);
6828 put_filesystem(type
);
6837 * tracing_init_dentry - initialize top level trace array
6839 * This is called when creating files or directories in the tracing
6840 * directory. It is called via fs_initcall() by any of the boot up code
6841 * and expects to return the dentry of the top level tracing directory.
6843 struct dentry
*tracing_init_dentry(void)
6845 struct trace_array
*tr
= &global_trace
;
6847 /* The top level trace array uses NULL as parent */
6851 if (WARN_ON(!debugfs_initialized()))
6852 return ERR_PTR(-ENODEV
);
6855 * As there may still be users that expect the tracing
6856 * files to exist in debugfs/tracing, we must automount
6857 * the tracefs file system there, so older tools still
6858 * work with the newer kerenl.
6860 tr
->dir
= debugfs_create_automount("tracing", NULL
,
6861 trace_automount
, NULL
);
6863 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6864 return ERR_PTR(-ENOMEM
);
6870 extern struct trace_enum_map
*__start_ftrace_enum_maps
[];
6871 extern struct trace_enum_map
*__stop_ftrace_enum_maps
[];
6873 static void __init
trace_enum_init(void)
6877 len
= __stop_ftrace_enum_maps
- __start_ftrace_enum_maps
;
6878 trace_insert_enum_map(NULL
, __start_ftrace_enum_maps
, len
);
6881 #ifdef CONFIG_MODULES
6882 static void trace_module_add_enums(struct module
*mod
)
6884 if (!mod
->num_trace_enums
)
6888 * Modules with bad taint do not have events created, do
6889 * not bother with enums either.
6891 if (trace_module_has_bad_taint(mod
))
6894 trace_insert_enum_map(mod
, mod
->trace_enums
, mod
->num_trace_enums
);
6897 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6898 static void trace_module_remove_enums(struct module
*mod
)
6900 union trace_enum_map_item
*map
;
6901 union trace_enum_map_item
**last
= &trace_enum_maps
;
6903 if (!mod
->num_trace_enums
)
6906 mutex_lock(&trace_enum_mutex
);
6908 map
= trace_enum_maps
;
6911 if (map
->head
.mod
== mod
)
6913 map
= trace_enum_jmp_to_tail(map
);
6914 last
= &map
->tail
.next
;
6915 map
= map
->tail
.next
;
6920 *last
= trace_enum_jmp_to_tail(map
)->tail
.next
;
6923 mutex_unlock(&trace_enum_mutex
);
6926 static inline void trace_module_remove_enums(struct module
*mod
) { }
6927 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6929 static int trace_module_notify(struct notifier_block
*self
,
6930 unsigned long val
, void *data
)
6932 struct module
*mod
= data
;
6935 case MODULE_STATE_COMING
:
6936 trace_module_add_enums(mod
);
6938 case MODULE_STATE_GOING
:
6939 trace_module_remove_enums(mod
);
6946 static struct notifier_block trace_module_nb
= {
6947 .notifier_call
= trace_module_notify
,
6950 #endif /* CONFIG_MODULES */
6952 static __init
int tracer_init_tracefs(void)
6954 struct dentry
*d_tracer
;
6956 trace_access_lock_init();
6958 d_tracer
= tracing_init_dentry();
6959 if (IS_ERR(d_tracer
))
6962 init_tracer_tracefs(&global_trace
, d_tracer
);
6964 trace_create_file("tracing_thresh", 0644, d_tracer
,
6965 &global_trace
, &tracing_thresh_fops
);
6967 trace_create_file("README", 0444, d_tracer
,
6968 NULL
, &tracing_readme_fops
);
6970 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6971 NULL
, &tracing_saved_cmdlines_fops
);
6973 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
6974 NULL
, &tracing_saved_cmdlines_size_fops
);
6978 trace_create_enum_file(d_tracer
);
6980 #ifdef CONFIG_MODULES
6981 register_module_notifier(&trace_module_nb
);
6984 #ifdef CONFIG_DYNAMIC_FTRACE
6985 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6986 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6989 create_trace_instances(d_tracer
);
6991 update_tracer_options(&global_trace
);
6996 static int trace_panic_handler(struct notifier_block
*this,
6997 unsigned long event
, void *unused
)
6999 if (ftrace_dump_on_oops
)
7000 ftrace_dump(ftrace_dump_on_oops
);
7004 static struct notifier_block trace_panic_notifier
= {
7005 .notifier_call
= trace_panic_handler
,
7007 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
7010 static int trace_die_handler(struct notifier_block
*self
,
7016 if (ftrace_dump_on_oops
)
7017 ftrace_dump(ftrace_dump_on_oops
);
7025 static struct notifier_block trace_die_notifier
= {
7026 .notifier_call
= trace_die_handler
,
7031 * printk is set to max of 1024, we really don't need it that big.
7032 * Nothing should be printing 1000 characters anyway.
7034 #define TRACE_MAX_PRINT 1000
7037 * Define here KERN_TRACE so that we have one place to modify
7038 * it if we decide to change what log level the ftrace dump
7041 #define KERN_TRACE KERN_EMERG
7044 trace_printk_seq(struct trace_seq
*s
)
7046 /* Probably should print a warning here. */
7047 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
7048 s
->seq
.len
= TRACE_MAX_PRINT
;
7051 * More paranoid code. Although the buffer size is set to
7052 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7053 * an extra layer of protection.
7055 if (WARN_ON_ONCE(s
->seq
.len
>= s
->seq
.size
))
7056 s
->seq
.len
= s
->seq
.size
- 1;
7058 /* should be zero ended, but we are paranoid. */
7059 s
->buffer
[s
->seq
.len
] = 0;
7061 printk(KERN_TRACE
"%s", s
->buffer
);
7066 void trace_init_global_iter(struct trace_iterator
*iter
)
7068 iter
->tr
= &global_trace
;
7069 iter
->trace
= iter
->tr
->current_trace
;
7070 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
7071 iter
->trace_buffer
= &global_trace
.trace_buffer
;
7073 if (iter
->trace
&& iter
->trace
->open
)
7074 iter
->trace
->open(iter
);
7076 /* Annotate start of buffers if we had overruns */
7077 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
7078 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
7080 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7081 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
7082 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
7085 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
7087 /* use static because iter can be a bit big for the stack */
7088 static struct trace_iterator iter
;
7089 static atomic_t dump_running
;
7090 struct trace_array
*tr
= &global_trace
;
7091 unsigned int old_userobj
;
7092 unsigned long flags
;
7095 /* Only allow one dump user at a time. */
7096 if (atomic_inc_return(&dump_running
) != 1) {
7097 atomic_dec(&dump_running
);
7102 * Always turn off tracing when we dump.
7103 * We don't need to show trace output of what happens
7104 * between multiple crashes.
7106 * If the user does a sysrq-z, then they can re-enable
7107 * tracing with echo 1 > tracing_on.
7111 local_irq_save(flags
);
7113 /* Simulate the iterator */
7114 trace_init_global_iter(&iter
);
7116 for_each_tracing_cpu(cpu
) {
7117 atomic_inc(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
7120 old_userobj
= tr
->trace_flags
& TRACE_ITER_SYM_USEROBJ
;
7122 /* don't look at user memory in panic mode */
7123 tr
->trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
7125 switch (oops_dump_mode
) {
7127 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
7130 iter
.cpu_file
= raw_smp_processor_id();
7135 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
7136 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
7139 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
7141 /* Did function tracer already get disabled? */
7142 if (ftrace_is_dead()) {
7143 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7144 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7148 * We need to stop all tracing on all CPUS to read the
7149 * the next buffer. This is a bit expensive, but is
7150 * not done often. We fill all what we can read,
7151 * and then release the locks again.
7154 while (!trace_empty(&iter
)) {
7157 printk(KERN_TRACE
"---------------------------------\n");
7161 /* reset all but tr, trace, and overruns */
7162 memset(&iter
.seq
, 0,
7163 sizeof(struct trace_iterator
) -
7164 offsetof(struct trace_iterator
, seq
));
7165 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
7168 if (trace_find_next_entry_inc(&iter
) != NULL
) {
7171 ret
= print_trace_line(&iter
);
7172 if (ret
!= TRACE_TYPE_NO_CONSUME
)
7173 trace_consume(&iter
);
7175 touch_nmi_watchdog();
7177 trace_printk_seq(&iter
.seq
);
7181 printk(KERN_TRACE
" (ftrace buffer empty)\n");
7183 printk(KERN_TRACE
"---------------------------------\n");
7186 tr
->trace_flags
|= old_userobj
;
7188 for_each_tracing_cpu(cpu
) {
7189 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
7191 atomic_dec(&dump_running
);
7192 local_irq_restore(flags
);
7194 EXPORT_SYMBOL_GPL(ftrace_dump
);
7196 __init
static int tracer_alloc_buffers(void)
7202 * Make sure we don't accidently add more trace options
7203 * than we have bits for.
7205 BUILD_BUG_ON(TRACE_ITER_LAST_BIT
> TRACE_FLAGS_MAX_SIZE
);
7207 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
7210 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
7211 goto out_free_buffer_mask
;
7213 /* Only allocate trace_printk buffers if a trace_printk exists */
7214 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
7215 /* Must be called before global_trace.buffer is allocated */
7216 trace_printk_init_buffers();
7218 /* To save memory, keep the ring buffer size to its minimum */
7219 if (ring_buffer_expanded
)
7220 ring_buf_size
= trace_buf_size
;
7224 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
7225 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
7227 raw_spin_lock_init(&global_trace
.start_lock
);
7229 /* Used for event triggers */
7230 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
7232 goto out_free_cpumask
;
7234 if (trace_create_savedcmd() < 0)
7235 goto out_free_temp_buffer
;
7237 /* TODO: make the number of buffers hot pluggable with CPUS */
7238 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
7239 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
7241 goto out_free_savedcmd
;
7244 if (global_trace
.buffer_disabled
)
7247 if (trace_boot_clock
) {
7248 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
7250 pr_warning("Trace clock %s not defined, going back to default\n",
7255 * register_tracer() might reference current_trace, so it
7256 * needs to be set before we register anything. This is
7257 * just a bootstrap of current_trace anyway.
7259 global_trace
.current_trace
= &nop_trace
;
7261 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
7263 ftrace_init_global_array_ops(&global_trace
);
7265 init_trace_flags_index(&global_trace
);
7267 register_tracer(&nop_trace
);
7269 /* All seems OK, enable tracing */
7270 tracing_disabled
= 0;
7272 atomic_notifier_chain_register(&panic_notifier_list
,
7273 &trace_panic_notifier
);
7275 register_die_notifier(&trace_die_notifier
);
7277 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
7279 INIT_LIST_HEAD(&global_trace
.systems
);
7280 INIT_LIST_HEAD(&global_trace
.events
);
7281 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
7283 apply_trace_boot_options();
7285 register_snapshot_cmd();
7290 free_saved_cmdlines_buffer(savedcmd
);
7291 out_free_temp_buffer
:
7292 ring_buffer_free(temp_buffer
);
7294 free_cpumask_var(global_trace
.tracing_cpumask
);
7295 out_free_buffer_mask
:
7296 free_cpumask_var(tracing_buffer_mask
);
7301 void __init
trace_init(void)
7303 if (tracepoint_printk
) {
7304 tracepoint_print_iter
=
7305 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
7306 if (WARN_ON(!tracepoint_print_iter
))
7307 tracepoint_printk
= 0;
7309 tracer_alloc_buffers();
7313 __init
static int clear_boot_tracer(void)
7316 * The default tracer at boot buffer is an init section.
7317 * This function is called in lateinit. If we did not
7318 * find the boot tracer, then clear it out, to prevent
7319 * later registration from accessing the buffer that is
7320 * about to be freed.
7322 if (!default_bootup_tracer
)
7325 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
7326 default_bootup_tracer
);
7327 default_bootup_tracer
= NULL
;
7332 fs_initcall(tracer_init_tracefs
);
7333 late_initcall(clear_boot_tracer
);