2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded
;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running
;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled
;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt
[] = {
71 static struct tracer_flags dummy_tracer_flags
= {
73 .opts
= dummy_tracer_opt
77 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
83 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
87 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
90 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
95 static int tracing_disabled
= 1;
97 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
99 cpumask_var_t __read_mostly tracing_buffer_mask
;
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 enum ftrace_dump_mode ftrace_dump_on_oops
;
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning
;
122 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
124 #define MAX_TRACER_SIZE 100
125 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
126 static char *default_bootup_tracer
;
128 static bool allocate_snapshot
;
130 static int __init
set_cmdline_ftrace(char *str
)
132 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
133 default_bootup_tracer
= bootup_tracer_buf
;
134 /* We are using ftrace early, expand it */
135 ring_buffer_expanded
= true;
138 __setup("ftrace=", set_cmdline_ftrace
);
140 static int __init
set_ftrace_dump_on_oops(char *str
)
142 if (*str
++ != '=' || !*str
) {
143 ftrace_dump_on_oops
= DUMP_ALL
;
147 if (!strcmp("orig_cpu", str
)) {
148 ftrace_dump_on_oops
= DUMP_ORIG
;
154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
156 static int __init
stop_trace_on_warning(char *str
)
158 __disable_trace_on_warning
= 1;
161 __setup("traceoff_on_warning=", stop_trace_on_warning
);
163 static int __init
boot_alloc_snapshot(char *str
)
165 allocate_snapshot
= true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded
= true;
170 __setup("alloc_snapshot", boot_alloc_snapshot
);
173 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
174 static char *trace_boot_options __initdata
;
176 static int __init
set_trace_boot_options(char *str
)
178 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
179 trace_boot_options
= trace_boot_options_buf
;
182 __setup("trace_options=", set_trace_boot_options
);
184 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
185 static char *trace_boot_clock __initdata
;
187 static int __init
set_trace_boot_clock(char *str
)
189 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
190 trace_boot_clock
= trace_boot_clock_buf
;
193 __setup("trace_clock=", set_trace_boot_clock
);
196 unsigned long long ns2usecs(cycle_t nsec
)
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
215 static struct trace_array global_trace
;
217 LIST_HEAD(ftrace_trace_arrays
);
219 int trace_array_get(struct trace_array
*this_tr
)
221 struct trace_array
*tr
;
224 mutex_lock(&trace_types_lock
);
225 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
232 mutex_unlock(&trace_types_lock
);
237 static void __trace_array_put(struct trace_array
*this_tr
)
239 WARN_ON(!this_tr
->ref
);
243 void trace_array_put(struct trace_array
*this_tr
)
245 mutex_lock(&trace_types_lock
);
246 __trace_array_put(this_tr
);
247 mutex_unlock(&trace_types_lock
);
250 int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
251 struct ring_buffer
*buffer
,
252 struct ring_buffer_event
*event
)
254 if (unlikely(file
->flags
& FTRACE_EVENT_FL_FILTERED
) &&
255 !filter_match_preds(file
->filter
, rec
)) {
256 ring_buffer_discard_commit(buffer
, event
);
262 EXPORT_SYMBOL_GPL(filter_check_discard
);
264 int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
265 struct ring_buffer
*buffer
,
266 struct ring_buffer_event
*event
)
268 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
269 !filter_match_preds(call
->filter
, rec
)) {
270 ring_buffer_discard_commit(buffer
, event
);
276 EXPORT_SYMBOL_GPL(call_filter_check_discard
);
278 static cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
282 /* Early boot up does not have a buffer yet */
284 return trace_clock_local();
286 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
287 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
292 cycle_t
ftrace_now(int cpu
)
294 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
298 * tracing_is_enabled - Show if global_trace has been disabled
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
306 int tracing_is_enabled(void)
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
314 return !global_trace
.buffer_disabled
;
318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
327 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
329 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
331 /* trace_types holds a link list of available tracers. */
332 static struct tracer
*trace_types __read_mostly
;
335 * trace_types_lock is used to protect the trace_types list.
337 DEFINE_MUTEX(trace_types_lock
);
340 * serialize the access of the ring buffer
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
354 * These primitives allow multi process access to different cpu ring buffer
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
362 static DECLARE_RWSEM(all_cpu_access_lock
);
363 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
365 static inline void trace_access_lock(int cpu
)
367 if (cpu
== RING_BUFFER_ALL_CPUS
) {
368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock
);
371 /* gain it for accessing a cpu ring buffer. */
373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
374 down_read(&all_cpu_access_lock
);
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
381 static inline void trace_access_unlock(int cpu
)
383 if (cpu
== RING_BUFFER_ALL_CPUS
) {
384 up_write(&all_cpu_access_lock
);
386 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
387 up_read(&all_cpu_access_lock
);
391 static inline void trace_access_lock_init(void)
395 for_each_possible_cpu(cpu
)
396 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
401 static DEFINE_MUTEX(access_lock
);
403 static inline void trace_access_lock(int cpu
)
406 mutex_lock(&access_lock
);
409 static inline void trace_access_unlock(int cpu
)
412 mutex_unlock(&access_lock
);
415 static inline void trace_access_lock_init(void)
421 /* trace_flags holds trace_options default values */
422 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
423 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
424 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_RECORD_CMD
| TRACE_ITER_OVERWRITE
|
425 TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
| TRACE_ITER_FUNCTION
;
427 static void tracer_tracing_on(struct trace_array
*tr
)
429 if (tr
->trace_buffer
.buffer
)
430 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
439 tr
->buffer_disabled
= 0;
440 /* Make the flag seen by readers */
445 * tracing_on - enable tracing buffers
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
450 void tracing_on(void)
452 tracer_tracing_on(&global_trace
);
454 EXPORT_SYMBOL_GPL(tracing_on
);
457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
462 int __trace_puts(unsigned long ip
, const char *str
, int size
)
464 struct ring_buffer_event
*event
;
465 struct ring_buffer
*buffer
;
466 struct print_entry
*entry
;
467 unsigned long irq_flags
;
471 if (!(trace_flags
& TRACE_ITER_PRINTK
))
474 pc
= preempt_count();
476 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
479 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
481 local_save_flags(irq_flags
);
482 buffer
= global_trace
.trace_buffer
.buffer
;
483 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
488 entry
= ring_buffer_event_data(event
);
491 memcpy(&entry
->buf
, str
, size
);
493 /* Add a newline if necessary */
494 if (entry
->buf
[size
- 1] != '\n') {
495 entry
->buf
[size
] = '\n';
496 entry
->buf
[size
+ 1] = '\0';
498 entry
->buf
[size
] = '\0';
500 __buffer_unlock_commit(buffer
, event
);
501 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
505 EXPORT_SYMBOL_GPL(__trace_puts
);
508 * __trace_bputs - write the pointer to a constant string into trace buffer
509 * @ip: The address of the caller
510 * @str: The constant string to write to the buffer to
512 int __trace_bputs(unsigned long ip
, const char *str
)
514 struct ring_buffer_event
*event
;
515 struct ring_buffer
*buffer
;
516 struct bputs_entry
*entry
;
517 unsigned long irq_flags
;
518 int size
= sizeof(struct bputs_entry
);
521 if (!(trace_flags
& TRACE_ITER_PRINTK
))
524 pc
= preempt_count();
526 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
529 local_save_flags(irq_flags
);
530 buffer
= global_trace
.trace_buffer
.buffer
;
531 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
536 entry
= ring_buffer_event_data(event
);
540 __buffer_unlock_commit(buffer
, event
);
541 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
545 EXPORT_SYMBOL_GPL(__trace_bputs
);
547 #ifdef CONFIG_TRACER_SNAPSHOT
549 * trace_snapshot - take a snapshot of the current buffer.
551 * This causes a swap between the snapshot buffer and the current live
552 * tracing buffer. You can use this to take snapshots of the live
553 * trace when some condition is triggered, but continue to trace.
555 * Note, make sure to allocate the snapshot with either
556 * a tracing_snapshot_alloc(), or by doing it manually
557 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
559 * If the snapshot buffer is not allocated, it will stop tracing.
560 * Basically making a permanent snapshot.
562 void tracing_snapshot(void)
564 struct trace_array
*tr
= &global_trace
;
565 struct tracer
*tracer
= tr
->current_trace
;
569 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
570 internal_trace_puts("*** snapshot is being ignored ***\n");
574 if (!tr
->allocated_snapshot
) {
575 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
576 internal_trace_puts("*** stopping trace here! ***\n");
581 /* Note, snapshot can not be used when the tracer uses it */
582 if (tracer
->use_max_tr
) {
583 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
584 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
588 local_irq_save(flags
);
589 update_max_tr(tr
, current
, smp_processor_id());
590 local_irq_restore(flags
);
592 EXPORT_SYMBOL_GPL(tracing_snapshot
);
594 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
595 struct trace_buffer
*size_buf
, int cpu_id
);
596 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
598 static int alloc_snapshot(struct trace_array
*tr
)
602 if (!tr
->allocated_snapshot
) {
604 /* allocate spare buffer */
605 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
606 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
610 tr
->allocated_snapshot
= true;
616 static void free_snapshot(struct trace_array
*tr
)
619 * We don't free the ring buffer. instead, resize it because
620 * The max_tr ring buffer has some state (e.g. ring->clock) and
621 * we want preserve it.
623 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
624 set_buffer_entries(&tr
->max_buffer
, 1);
625 tracing_reset_online_cpus(&tr
->max_buffer
);
626 tr
->allocated_snapshot
= false;
630 * tracing_alloc_snapshot - allocate snapshot buffer.
632 * This only allocates the snapshot buffer if it isn't already
633 * allocated - it doesn't also take a snapshot.
635 * This is meant to be used in cases where the snapshot buffer needs
636 * to be set up for events that can't sleep but need to be able to
637 * trigger a snapshot.
639 int tracing_alloc_snapshot(void)
641 struct trace_array
*tr
= &global_trace
;
644 ret
= alloc_snapshot(tr
);
649 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
652 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
654 * This is similar to trace_snapshot(), but it will allocate the
655 * snapshot buffer if it isn't already allocated. Use this only
656 * where it is safe to sleep, as the allocation may sleep.
658 * This causes a swap between the snapshot buffer and the current live
659 * tracing buffer. You can use this to take snapshots of the live
660 * trace when some condition is triggered, but continue to trace.
662 void tracing_snapshot_alloc(void)
666 ret
= tracing_alloc_snapshot();
672 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
674 void tracing_snapshot(void)
676 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
678 EXPORT_SYMBOL_GPL(tracing_snapshot
);
679 int tracing_alloc_snapshot(void)
681 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
684 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
685 void tracing_snapshot_alloc(void)
690 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
691 #endif /* CONFIG_TRACER_SNAPSHOT */
693 static void tracer_tracing_off(struct trace_array
*tr
)
695 if (tr
->trace_buffer
.buffer
)
696 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
698 * This flag is looked at when buffers haven't been allocated
699 * yet, or by some tracers (like irqsoff), that just want to
700 * know if the ring buffer has been disabled, but it can handle
701 * races of where it gets disabled but we still do a record.
702 * As the check is in the fast path of the tracers, it is more
703 * important to be fast than accurate.
705 tr
->buffer_disabled
= 1;
706 /* Make the flag seen by readers */
711 * tracing_off - turn off tracing buffers
713 * This function stops the tracing buffers from recording data.
714 * It does not disable any overhead the tracers themselves may
715 * be causing. This function simply causes all recording to
716 * the ring buffers to fail.
718 void tracing_off(void)
720 tracer_tracing_off(&global_trace
);
722 EXPORT_SYMBOL_GPL(tracing_off
);
724 void disable_trace_on_warning(void)
726 if (__disable_trace_on_warning
)
731 * tracer_tracing_is_on - show real state of ring buffer enabled
732 * @tr : the trace array to know if ring buffer is enabled
734 * Shows real state of the ring buffer if it is enabled or not.
736 static int tracer_tracing_is_on(struct trace_array
*tr
)
738 if (tr
->trace_buffer
.buffer
)
739 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
740 return !tr
->buffer_disabled
;
744 * tracing_is_on - show state of ring buffers enabled
746 int tracing_is_on(void)
748 return tracer_tracing_is_on(&global_trace
);
750 EXPORT_SYMBOL_GPL(tracing_is_on
);
752 static int __init
set_buf_size(char *str
)
754 unsigned long buf_size
;
758 buf_size
= memparse(str
, &str
);
759 /* nr_entries can not be zero */
762 trace_buf_size
= buf_size
;
765 __setup("trace_buf_size=", set_buf_size
);
767 static int __init
set_tracing_thresh(char *str
)
769 unsigned long threshold
;
774 ret
= kstrtoul(str
, 0, &threshold
);
777 tracing_thresh
= threshold
* 1000;
780 __setup("tracing_thresh=", set_tracing_thresh
);
782 unsigned long nsecs_to_usecs(unsigned long nsecs
)
787 /* These must match the bit postions in trace_iterator_flags */
788 static const char *trace_options
[] = {
821 int in_ns
; /* is this clock in nanoseconds? */
823 { trace_clock_local
, "local", 1 },
824 { trace_clock_global
, "global", 1 },
825 { trace_clock_counter
, "counter", 0 },
826 { trace_clock_jiffies
, "uptime", 0 },
827 { trace_clock
, "perf", 1 },
828 { ktime_get_mono_fast_ns
, "mono", 1 },
833 * trace_parser_get_init - gets the buffer for trace parser
835 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
837 memset(parser
, 0, sizeof(*parser
));
839 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
848 * trace_parser_put - frees the buffer for trace parser
850 void trace_parser_put(struct trace_parser
*parser
)
852 kfree(parser
->buffer
);
856 * trace_get_user - reads the user input string separated by space
857 * (matched by isspace(ch))
859 * For each string found the 'struct trace_parser' is updated,
860 * and the function returns.
862 * Returns number of bytes read.
864 * See kernel/trace/trace.h for 'struct trace_parser' details.
866 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
867 size_t cnt
, loff_t
*ppos
)
874 trace_parser_clear(parser
);
876 ret
= get_user(ch
, ubuf
++);
884 * The parser is not finished with the last write,
885 * continue reading the user input without skipping spaces.
888 /* skip white space */
889 while (cnt
&& isspace(ch
)) {
890 ret
= get_user(ch
, ubuf
++);
897 /* only spaces were written */
907 /* read the non-space input */
908 while (cnt
&& !isspace(ch
)) {
909 if (parser
->idx
< parser
->size
- 1)
910 parser
->buffer
[parser
->idx
++] = ch
;
915 ret
= get_user(ch
, ubuf
++);
922 /* We either got finished input or we have to wait for another call. */
924 parser
->buffer
[parser
->idx
] = 0;
925 parser
->cont
= false;
926 } else if (parser
->idx
< parser
->size
- 1) {
928 parser
->buffer
[parser
->idx
++] = ch
;
941 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
945 if (s
->len
<= s
->readpos
)
948 len
= s
->len
- s
->readpos
;
951 memcpy(buf
, s
->buffer
+ s
->readpos
, cnt
);
957 unsigned long __read_mostly tracing_thresh
;
959 #ifdef CONFIG_TRACER_MAX_TRACE
961 * Copy the new maximum trace into the separate maximum-trace
962 * structure. (this way the maximum trace is permanently saved,
963 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
966 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
968 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
969 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
970 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
971 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
974 max_buf
->time_start
= data
->preempt_timestamp
;
976 max_data
->saved_latency
= tr
->max_latency
;
977 max_data
->critical_start
= data
->critical_start
;
978 max_data
->critical_end
= data
->critical_end
;
980 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
981 max_data
->pid
= tsk
->pid
;
983 * If tsk == current, then use current_uid(), as that does not use
984 * RCU. The irq tracer can be called out of RCU scope.
987 max_data
->uid
= current_uid();
989 max_data
->uid
= task_uid(tsk
);
991 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
992 max_data
->policy
= tsk
->policy
;
993 max_data
->rt_priority
= tsk
->rt_priority
;
995 /* record this tasks comm */
996 tracing_record_cmdline(tsk
);
1000 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1002 * @tsk: the task with the latency
1003 * @cpu: The cpu that initiated the trace.
1005 * Flip the buffers between the @tr and the max_tr and record information
1006 * about which task was the cause of this latency.
1009 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1011 struct ring_buffer
*buf
;
1016 WARN_ON_ONCE(!irqs_disabled());
1018 if (!tr
->allocated_snapshot
) {
1019 /* Only the nop tracer should hit this when disabling */
1020 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1024 arch_spin_lock(&tr
->max_lock
);
1026 buf
= tr
->trace_buffer
.buffer
;
1027 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1028 tr
->max_buffer
.buffer
= buf
;
1030 __update_max_tr(tr
, tsk
, cpu
);
1031 arch_spin_unlock(&tr
->max_lock
);
1035 * update_max_tr_single - only copy one trace over, and reset the rest
1037 * @tsk - task with the latency
1038 * @cpu - the cpu of the buffer to copy.
1040 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1043 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1050 WARN_ON_ONCE(!irqs_disabled());
1051 if (!tr
->allocated_snapshot
) {
1052 /* Only the nop tracer should hit this when disabling */
1053 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1057 arch_spin_lock(&tr
->max_lock
);
1059 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1061 if (ret
== -EBUSY
) {
1063 * We failed to swap the buffer due to a commit taking
1064 * place on this CPU. We fail to record, but we reset
1065 * the max trace buffer (no one writes directly to it)
1066 * and flag that it failed.
1068 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1069 "Failed to swap buffers due to commit in progress\n");
1072 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1074 __update_max_tr(tr
, tsk
, cpu
);
1075 arch_spin_unlock(&tr
->max_lock
);
1077 #endif /* CONFIG_TRACER_MAX_TRACE */
1079 static int wait_on_pipe(struct trace_iterator
*iter
, bool full
)
1081 /* Iterators are static, they should be filled or empty */
1082 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1085 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
1089 #ifdef CONFIG_FTRACE_STARTUP_TEST
1090 static int run_tracer_selftest(struct tracer
*type
)
1092 struct trace_array
*tr
= &global_trace
;
1093 struct tracer
*saved_tracer
= tr
->current_trace
;
1096 if (!type
->selftest
|| tracing_selftest_disabled
)
1100 * Run a selftest on this tracer.
1101 * Here we reset the trace buffer, and set the current
1102 * tracer to be this tracer. The tracer can then run some
1103 * internal tracing to verify that everything is in order.
1104 * If we fail, we do not register this tracer.
1106 tracing_reset_online_cpus(&tr
->trace_buffer
);
1108 tr
->current_trace
= type
;
1110 #ifdef CONFIG_TRACER_MAX_TRACE
1111 if (type
->use_max_tr
) {
1112 /* If we expanded the buffers, make sure the max is expanded too */
1113 if (ring_buffer_expanded
)
1114 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1115 RING_BUFFER_ALL_CPUS
);
1116 tr
->allocated_snapshot
= true;
1120 /* the test is responsible for initializing and enabling */
1121 pr_info("Testing tracer %s: ", type
->name
);
1122 ret
= type
->selftest(type
, tr
);
1123 /* the test is responsible for resetting too */
1124 tr
->current_trace
= saved_tracer
;
1126 printk(KERN_CONT
"FAILED!\n");
1127 /* Add the warning after printing 'FAILED' */
1131 /* Only reset on passing, to avoid touching corrupted buffers */
1132 tracing_reset_online_cpus(&tr
->trace_buffer
);
1134 #ifdef CONFIG_TRACER_MAX_TRACE
1135 if (type
->use_max_tr
) {
1136 tr
->allocated_snapshot
= false;
1138 /* Shrink the max buffer again */
1139 if (ring_buffer_expanded
)
1140 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1141 RING_BUFFER_ALL_CPUS
);
1145 printk(KERN_CONT
"PASSED\n");
1149 static inline int run_tracer_selftest(struct tracer
*type
)
1153 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1156 * register_tracer - register a tracer with the ftrace system.
1157 * @type - the plugin for the tracer
1159 * Register a new plugin tracer.
1161 int register_tracer(struct tracer
*type
)
1167 pr_info("Tracer must have a name\n");
1171 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1172 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1176 mutex_lock(&trace_types_lock
);
1178 tracing_selftest_running
= true;
1180 for (t
= trace_types
; t
; t
= t
->next
) {
1181 if (strcmp(type
->name
, t
->name
) == 0) {
1183 pr_info("Tracer %s already registered\n",
1190 if (!type
->set_flag
)
1191 type
->set_flag
= &dummy_set_flag
;
1193 type
->flags
= &dummy_tracer_flags
;
1195 if (!type
->flags
->opts
)
1196 type
->flags
->opts
= dummy_tracer_opt
;
1198 ret
= run_tracer_selftest(type
);
1202 type
->next
= trace_types
;
1206 tracing_selftest_running
= false;
1207 mutex_unlock(&trace_types_lock
);
1209 if (ret
|| !default_bootup_tracer
)
1212 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1215 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1216 /* Do we want this tracer to start on bootup? */
1217 tracing_set_tracer(&global_trace
, type
->name
);
1218 default_bootup_tracer
= NULL
;
1219 /* disable other selftests, since this will break it. */
1220 tracing_selftest_disabled
= true;
1221 #ifdef CONFIG_FTRACE_STARTUP_TEST
1222 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1230 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1232 struct ring_buffer
*buffer
= buf
->buffer
;
1237 ring_buffer_record_disable(buffer
);
1239 /* Make sure all commits have finished */
1240 synchronize_sched();
1241 ring_buffer_reset_cpu(buffer
, cpu
);
1243 ring_buffer_record_enable(buffer
);
1246 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1248 struct ring_buffer
*buffer
= buf
->buffer
;
1254 ring_buffer_record_disable(buffer
);
1256 /* Make sure all commits have finished */
1257 synchronize_sched();
1259 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1261 for_each_online_cpu(cpu
)
1262 ring_buffer_reset_cpu(buffer
, cpu
);
1264 ring_buffer_record_enable(buffer
);
1267 /* Must have trace_types_lock held */
1268 void tracing_reset_all_online_cpus(void)
1270 struct trace_array
*tr
;
1272 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1273 tracing_reset_online_cpus(&tr
->trace_buffer
);
1274 #ifdef CONFIG_TRACER_MAX_TRACE
1275 tracing_reset_online_cpus(&tr
->max_buffer
);
1280 #define SAVED_CMDLINES_DEFAULT 128
1281 #define NO_CMDLINE_MAP UINT_MAX
1282 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1283 struct saved_cmdlines_buffer
{
1284 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1285 unsigned *map_cmdline_to_pid
;
1286 unsigned cmdline_num
;
1288 char *saved_cmdlines
;
1290 static struct saved_cmdlines_buffer
*savedcmd
;
1292 /* temporary disable recording */
1293 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1295 static inline char *get_saved_cmdlines(int idx
)
1297 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1300 static inline void set_cmdline(int idx
, const char *cmdline
)
1302 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1305 static int allocate_cmdlines_buffer(unsigned int val
,
1306 struct saved_cmdlines_buffer
*s
)
1308 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1310 if (!s
->map_cmdline_to_pid
)
1313 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1314 if (!s
->saved_cmdlines
) {
1315 kfree(s
->map_cmdline_to_pid
);
1320 s
->cmdline_num
= val
;
1321 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1322 sizeof(s
->map_pid_to_cmdline
));
1323 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1324 val
* sizeof(*s
->map_cmdline_to_pid
));
1329 static int trace_create_savedcmd(void)
1333 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1337 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1347 int is_tracing_stopped(void)
1349 return global_trace
.stop_count
;
1353 * tracing_start - quick start of the tracer
1355 * If tracing is enabled but was stopped by tracing_stop,
1356 * this will start the tracer back up.
1358 void tracing_start(void)
1360 struct ring_buffer
*buffer
;
1361 unsigned long flags
;
1363 if (tracing_disabled
)
1366 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1367 if (--global_trace
.stop_count
) {
1368 if (global_trace
.stop_count
< 0) {
1369 /* Someone screwed up their debugging */
1371 global_trace
.stop_count
= 0;
1376 /* Prevent the buffers from switching */
1377 arch_spin_lock(&global_trace
.max_lock
);
1379 buffer
= global_trace
.trace_buffer
.buffer
;
1381 ring_buffer_record_enable(buffer
);
1383 #ifdef CONFIG_TRACER_MAX_TRACE
1384 buffer
= global_trace
.max_buffer
.buffer
;
1386 ring_buffer_record_enable(buffer
);
1389 arch_spin_unlock(&global_trace
.max_lock
);
1392 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1395 static void tracing_start_tr(struct trace_array
*tr
)
1397 struct ring_buffer
*buffer
;
1398 unsigned long flags
;
1400 if (tracing_disabled
)
1403 /* If global, we need to also start the max tracer */
1404 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1405 return tracing_start();
1407 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1409 if (--tr
->stop_count
) {
1410 if (tr
->stop_count
< 0) {
1411 /* Someone screwed up their debugging */
1418 buffer
= tr
->trace_buffer
.buffer
;
1420 ring_buffer_record_enable(buffer
);
1423 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1427 * tracing_stop - quick stop of the tracer
1429 * Light weight way to stop tracing. Use in conjunction with
1432 void tracing_stop(void)
1434 struct ring_buffer
*buffer
;
1435 unsigned long flags
;
1437 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1438 if (global_trace
.stop_count
++)
1441 /* Prevent the buffers from switching */
1442 arch_spin_lock(&global_trace
.max_lock
);
1444 buffer
= global_trace
.trace_buffer
.buffer
;
1446 ring_buffer_record_disable(buffer
);
1448 #ifdef CONFIG_TRACER_MAX_TRACE
1449 buffer
= global_trace
.max_buffer
.buffer
;
1451 ring_buffer_record_disable(buffer
);
1454 arch_spin_unlock(&global_trace
.max_lock
);
1457 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1460 static void tracing_stop_tr(struct trace_array
*tr
)
1462 struct ring_buffer
*buffer
;
1463 unsigned long flags
;
1465 /* If global, we need to also stop the max tracer */
1466 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1467 return tracing_stop();
1469 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1470 if (tr
->stop_count
++)
1473 buffer
= tr
->trace_buffer
.buffer
;
1475 ring_buffer_record_disable(buffer
);
1478 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1481 void trace_stop_cmdline_recording(void);
1483 static int trace_save_cmdline(struct task_struct
*tsk
)
1487 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1491 * It's not the end of the world if we don't get
1492 * the lock, but we also don't want to spin
1493 * nor do we want to disable interrupts,
1494 * so if we miss here, then better luck next time.
1496 if (!arch_spin_trylock(&trace_cmdline_lock
))
1499 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1500 if (idx
== NO_CMDLINE_MAP
) {
1501 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1504 * Check whether the cmdline buffer at idx has a pid
1505 * mapped. We are going to overwrite that entry so we
1506 * need to clear the map_pid_to_cmdline. Otherwise we
1507 * would read the new comm for the old pid.
1509 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1510 if (pid
!= NO_CMDLINE_MAP
)
1511 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1513 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1514 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1516 savedcmd
->cmdline_idx
= idx
;
1519 set_cmdline(idx
, tsk
->comm
);
1521 arch_spin_unlock(&trace_cmdline_lock
);
1526 static void __trace_find_cmdline(int pid
, char comm
[])
1531 strcpy(comm
, "<idle>");
1535 if (WARN_ON_ONCE(pid
< 0)) {
1536 strcpy(comm
, "<XXX>");
1540 if (pid
> PID_MAX_DEFAULT
) {
1541 strcpy(comm
, "<...>");
1545 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1546 if (map
!= NO_CMDLINE_MAP
)
1547 strcpy(comm
, get_saved_cmdlines(map
));
1549 strcpy(comm
, "<...>");
1552 void trace_find_cmdline(int pid
, char comm
[])
1555 arch_spin_lock(&trace_cmdline_lock
);
1557 __trace_find_cmdline(pid
, comm
);
1559 arch_spin_unlock(&trace_cmdline_lock
);
1563 void tracing_record_cmdline(struct task_struct
*tsk
)
1565 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1568 if (!__this_cpu_read(trace_cmdline_save
))
1571 if (trace_save_cmdline(tsk
))
1572 __this_cpu_write(trace_cmdline_save
, false);
1576 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1579 struct task_struct
*tsk
= current
;
1581 entry
->preempt_count
= pc
& 0xff;
1582 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1584 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1585 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1587 TRACE_FLAG_IRQS_NOSUPPORT
|
1589 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1590 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1591 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
1592 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
1594 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1596 struct ring_buffer_event
*
1597 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1600 unsigned long flags
, int pc
)
1602 struct ring_buffer_event
*event
;
1604 event
= ring_buffer_lock_reserve(buffer
, len
);
1605 if (event
!= NULL
) {
1606 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1608 tracing_generic_entry_update(ent
, flags
, pc
);
1616 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1618 __this_cpu_write(trace_cmdline_save
, true);
1619 ring_buffer_unlock_commit(buffer
, event
);
1623 __trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1624 struct ring_buffer_event
*event
,
1625 unsigned long flags
, int pc
)
1627 __buffer_unlock_commit(buffer
, event
);
1629 ftrace_trace_stack(buffer
, flags
, 6, pc
);
1630 ftrace_trace_userstack(buffer
, flags
, pc
);
1633 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1634 struct ring_buffer_event
*event
,
1635 unsigned long flags
, int pc
)
1637 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1639 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1641 static struct ring_buffer
*temp_buffer
;
1643 struct ring_buffer_event
*
1644 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1645 struct ftrace_event_file
*ftrace_file
,
1646 int type
, unsigned long len
,
1647 unsigned long flags
, int pc
)
1649 struct ring_buffer_event
*entry
;
1651 *current_rb
= ftrace_file
->tr
->trace_buffer
.buffer
;
1652 entry
= trace_buffer_lock_reserve(*current_rb
,
1653 type
, len
, flags
, pc
);
1655 * If tracing is off, but we have triggers enabled
1656 * we still need to look at the event data. Use the temp_buffer
1657 * to store the trace event for the tigger to use. It's recusive
1658 * safe and will not be recorded anywhere.
1660 if (!entry
&& ftrace_file
->flags
& FTRACE_EVENT_FL_TRIGGER_COND
) {
1661 *current_rb
= temp_buffer
;
1662 entry
= trace_buffer_lock_reserve(*current_rb
,
1663 type
, len
, flags
, pc
);
1667 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1669 struct ring_buffer_event
*
1670 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1671 int type
, unsigned long len
,
1672 unsigned long flags
, int pc
)
1674 *current_rb
= global_trace
.trace_buffer
.buffer
;
1675 return trace_buffer_lock_reserve(*current_rb
,
1676 type
, len
, flags
, pc
);
1678 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1680 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
1681 struct ring_buffer_event
*event
,
1682 unsigned long flags
, int pc
)
1684 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1686 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit
);
1688 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
1689 struct ring_buffer_event
*event
,
1690 unsigned long flags
, int pc
,
1691 struct pt_regs
*regs
)
1693 __buffer_unlock_commit(buffer
, event
);
1695 ftrace_trace_stack_regs(buffer
, flags
, 0, pc
, regs
);
1696 ftrace_trace_userstack(buffer
, flags
, pc
);
1698 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1700 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1701 struct ring_buffer_event
*event
)
1703 ring_buffer_discard_commit(buffer
, event
);
1705 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1708 trace_function(struct trace_array
*tr
,
1709 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1712 struct ftrace_event_call
*call
= &event_function
;
1713 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1714 struct ring_buffer_event
*event
;
1715 struct ftrace_entry
*entry
;
1717 /* If we are reading the ring buffer, don't trace */
1718 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1721 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1725 entry
= ring_buffer_event_data(event
);
1727 entry
->parent_ip
= parent_ip
;
1729 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1730 __buffer_unlock_commit(buffer
, event
);
1733 #ifdef CONFIG_STACKTRACE
1735 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1736 struct ftrace_stack
{
1737 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1740 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1741 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1743 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1744 unsigned long flags
,
1745 int skip
, int pc
, struct pt_regs
*regs
)
1747 struct ftrace_event_call
*call
= &event_kernel_stack
;
1748 struct ring_buffer_event
*event
;
1749 struct stack_entry
*entry
;
1750 struct stack_trace trace
;
1752 int size
= FTRACE_STACK_ENTRIES
;
1754 trace
.nr_entries
= 0;
1758 * Since events can happen in NMIs there's no safe way to
1759 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1760 * or NMI comes in, it will just have to use the default
1761 * FTRACE_STACK_SIZE.
1763 preempt_disable_notrace();
1765 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1767 * We don't need any atomic variables, just a barrier.
1768 * If an interrupt comes in, we don't care, because it would
1769 * have exited and put the counter back to what we want.
1770 * We just need a barrier to keep gcc from moving things
1774 if (use_stack
== 1) {
1775 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
1776 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1779 save_stack_trace_regs(regs
, &trace
);
1781 save_stack_trace(&trace
);
1783 if (trace
.nr_entries
> size
)
1784 size
= trace
.nr_entries
;
1786 /* From now on, use_stack is a boolean */
1789 size
*= sizeof(unsigned long);
1791 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1792 sizeof(*entry
) + size
, flags
, pc
);
1795 entry
= ring_buffer_event_data(event
);
1797 memset(&entry
->caller
, 0, size
);
1800 memcpy(&entry
->caller
, trace
.entries
,
1801 trace
.nr_entries
* sizeof(unsigned long));
1803 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1804 trace
.entries
= entry
->caller
;
1806 save_stack_trace_regs(regs
, &trace
);
1808 save_stack_trace(&trace
);
1811 entry
->size
= trace
.nr_entries
;
1813 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1814 __buffer_unlock_commit(buffer
, event
);
1817 /* Again, don't let gcc optimize things here */
1819 __this_cpu_dec(ftrace_stack_reserve
);
1820 preempt_enable_notrace();
1824 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
1825 int skip
, int pc
, struct pt_regs
*regs
)
1827 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1830 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1833 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
1836 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1839 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
1842 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1845 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1849 * trace_dump_stack - record a stack back trace in the trace buffer
1850 * @skip: Number of functions to skip (helper handlers)
1852 void trace_dump_stack(int skip
)
1854 unsigned long flags
;
1856 if (tracing_disabled
|| tracing_selftest_running
)
1859 local_save_flags(flags
);
1862 * Skip 3 more, seems to get us at the caller of
1866 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1867 flags
, skip
, preempt_count(), NULL
);
1870 static DEFINE_PER_CPU(int, user_stack_count
);
1873 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1875 struct ftrace_event_call
*call
= &event_user_stack
;
1876 struct ring_buffer_event
*event
;
1877 struct userstack_entry
*entry
;
1878 struct stack_trace trace
;
1880 if (!(trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1884 * NMIs can not handle page faults, even with fix ups.
1885 * The save user stack can (and often does) fault.
1887 if (unlikely(in_nmi()))
1891 * prevent recursion, since the user stack tracing may
1892 * trigger other kernel events.
1895 if (__this_cpu_read(user_stack_count
))
1898 __this_cpu_inc(user_stack_count
);
1900 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1901 sizeof(*entry
), flags
, pc
);
1903 goto out_drop_count
;
1904 entry
= ring_buffer_event_data(event
);
1906 entry
->tgid
= current
->tgid
;
1907 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1909 trace
.nr_entries
= 0;
1910 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1912 trace
.entries
= entry
->caller
;
1914 save_stack_trace_user(&trace
);
1915 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1916 __buffer_unlock_commit(buffer
, event
);
1919 __this_cpu_dec(user_stack_count
);
1925 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1927 ftrace_trace_userstack(tr
, flags
, preempt_count());
1931 #endif /* CONFIG_STACKTRACE */
1933 /* created for use with alloc_percpu */
1934 struct trace_buffer_struct
{
1935 char buffer
[TRACE_BUF_SIZE
];
1938 static struct trace_buffer_struct
*trace_percpu_buffer
;
1939 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1940 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1941 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1944 * The buffer used is dependent on the context. There is a per cpu
1945 * buffer for normal context, softirq contex, hard irq context and
1946 * for NMI context. Thise allows for lockless recording.
1948 * Note, if the buffers failed to be allocated, then this returns NULL
1950 static char *get_trace_buf(void)
1952 struct trace_buffer_struct
*percpu_buffer
;
1955 * If we have allocated per cpu buffers, then we do not
1956 * need to do any locking.
1959 percpu_buffer
= trace_percpu_nmi_buffer
;
1961 percpu_buffer
= trace_percpu_irq_buffer
;
1962 else if (in_softirq())
1963 percpu_buffer
= trace_percpu_sirq_buffer
;
1965 percpu_buffer
= trace_percpu_buffer
;
1970 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
1973 static int alloc_percpu_trace_buffer(void)
1975 struct trace_buffer_struct
*buffers
;
1976 struct trace_buffer_struct
*sirq_buffers
;
1977 struct trace_buffer_struct
*irq_buffers
;
1978 struct trace_buffer_struct
*nmi_buffers
;
1980 buffers
= alloc_percpu(struct trace_buffer_struct
);
1984 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1988 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1992 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
1996 trace_percpu_buffer
= buffers
;
1997 trace_percpu_sirq_buffer
= sirq_buffers
;
1998 trace_percpu_irq_buffer
= irq_buffers
;
1999 trace_percpu_nmi_buffer
= nmi_buffers
;
2004 free_percpu(irq_buffers
);
2006 free_percpu(sirq_buffers
);
2008 free_percpu(buffers
);
2010 WARN(1, "Could not allocate percpu trace_printk buffer");
2014 static int buffers_allocated
;
2016 void trace_printk_init_buffers(void)
2018 if (buffers_allocated
)
2021 if (alloc_percpu_trace_buffer())
2024 /* trace_printk() is for debug use only. Don't use it in production. */
2026 pr_warning("\n**********************************************************\n");
2027 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2028 pr_warning("** **\n");
2029 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2030 pr_warning("** **\n");
2031 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2032 pr_warning("** unsafe for produciton use. **\n");
2033 pr_warning("** **\n");
2034 pr_warning("** If you see this message and you are not debugging **\n");
2035 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2036 pr_warning("** **\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("**********************************************************\n");
2040 /* Expand the buffers to set size */
2041 tracing_update_buffers();
2043 buffers_allocated
= 1;
2046 * trace_printk_init_buffers() can be called by modules.
2047 * If that happens, then we need to start cmdline recording
2048 * directly here. If the global_trace.buffer is already
2049 * allocated here, then this was called by module code.
2051 if (global_trace
.trace_buffer
.buffer
)
2052 tracing_start_cmdline_record();
2055 void trace_printk_start_comm(void)
2057 /* Start tracing comms if trace printk is set */
2058 if (!buffers_allocated
)
2060 tracing_start_cmdline_record();
2063 static void trace_printk_start_stop_comm(int enabled
)
2065 if (!buffers_allocated
)
2069 tracing_start_cmdline_record();
2071 tracing_stop_cmdline_record();
2075 * trace_vbprintk - write binary msg to tracing buffer
2078 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2080 struct ftrace_event_call
*call
= &event_bprint
;
2081 struct ring_buffer_event
*event
;
2082 struct ring_buffer
*buffer
;
2083 struct trace_array
*tr
= &global_trace
;
2084 struct bprint_entry
*entry
;
2085 unsigned long flags
;
2087 int len
= 0, size
, pc
;
2089 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2092 /* Don't pollute graph traces with trace_vprintk internals */
2093 pause_graph_tracing();
2095 pc
= preempt_count();
2096 preempt_disable_notrace();
2098 tbuffer
= get_trace_buf();
2104 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2106 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2109 local_save_flags(flags
);
2110 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2111 buffer
= tr
->trace_buffer
.buffer
;
2112 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2116 entry
= ring_buffer_event_data(event
);
2120 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2121 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2122 __buffer_unlock_commit(buffer
, event
);
2123 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2127 preempt_enable_notrace();
2128 unpause_graph_tracing();
2132 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2135 __trace_array_vprintk(struct ring_buffer
*buffer
,
2136 unsigned long ip
, const char *fmt
, va_list args
)
2138 struct ftrace_event_call
*call
= &event_print
;
2139 struct ring_buffer_event
*event
;
2140 int len
= 0, size
, pc
;
2141 struct print_entry
*entry
;
2142 unsigned long flags
;
2145 if (tracing_disabled
|| tracing_selftest_running
)
2148 /* Don't pollute graph traces with trace_vprintk internals */
2149 pause_graph_tracing();
2151 pc
= preempt_count();
2152 preempt_disable_notrace();
2155 tbuffer
= get_trace_buf();
2161 len
= vsnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2162 if (len
> TRACE_BUF_SIZE
)
2165 local_save_flags(flags
);
2166 size
= sizeof(*entry
) + len
+ 1;
2167 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2171 entry
= ring_buffer_event_data(event
);
2174 memcpy(&entry
->buf
, tbuffer
, len
);
2175 entry
->buf
[len
] = '\0';
2176 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2177 __buffer_unlock_commit(buffer
, event
);
2178 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2181 preempt_enable_notrace();
2182 unpause_graph_tracing();
2187 int trace_array_vprintk(struct trace_array
*tr
,
2188 unsigned long ip
, const char *fmt
, va_list args
)
2190 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2193 int trace_array_printk(struct trace_array
*tr
,
2194 unsigned long ip
, const char *fmt
, ...)
2199 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2203 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2208 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2209 unsigned long ip
, const char *fmt
, ...)
2214 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2218 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2223 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2225 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2227 EXPORT_SYMBOL_GPL(trace_vprintk
);
2229 static void trace_iterator_increment(struct trace_iterator
*iter
)
2231 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2235 ring_buffer_read(buf_iter
, NULL
);
2238 static struct trace_entry
*
2239 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2240 unsigned long *lost_events
)
2242 struct ring_buffer_event
*event
;
2243 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2246 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2248 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2252 iter
->ent_size
= ring_buffer_event_length(event
);
2253 return ring_buffer_event_data(event
);
2259 static struct trace_entry
*
2260 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2261 unsigned long *missing_events
, u64
*ent_ts
)
2263 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2264 struct trace_entry
*ent
, *next
= NULL
;
2265 unsigned long lost_events
= 0, next_lost
= 0;
2266 int cpu_file
= iter
->cpu_file
;
2267 u64 next_ts
= 0, ts
;
2273 * If we are in a per_cpu trace file, don't bother by iterating over
2274 * all cpu and peek directly.
2276 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2277 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2279 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2281 *ent_cpu
= cpu_file
;
2286 for_each_tracing_cpu(cpu
) {
2288 if (ring_buffer_empty_cpu(buffer
, cpu
))
2291 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2294 * Pick the entry with the smallest timestamp:
2296 if (ent
&& (!next
|| ts
< next_ts
)) {
2300 next_lost
= lost_events
;
2301 next_size
= iter
->ent_size
;
2305 iter
->ent_size
= next_size
;
2308 *ent_cpu
= next_cpu
;
2314 *missing_events
= next_lost
;
2319 /* Find the next real entry, without updating the iterator itself */
2320 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2321 int *ent_cpu
, u64
*ent_ts
)
2323 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2326 /* Find the next real entry, and increment the iterator to the next entry */
2327 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2329 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2330 &iter
->lost_events
, &iter
->ts
);
2333 trace_iterator_increment(iter
);
2335 return iter
->ent
? iter
: NULL
;
2338 static void trace_consume(struct trace_iterator
*iter
)
2340 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2341 &iter
->lost_events
);
2344 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2346 struct trace_iterator
*iter
= m
->private;
2350 WARN_ON_ONCE(iter
->leftover
);
2354 /* can't go backwards */
2359 ent
= trace_find_next_entry_inc(iter
);
2363 while (ent
&& iter
->idx
< i
)
2364 ent
= trace_find_next_entry_inc(iter
);
2371 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2373 struct ring_buffer_event
*event
;
2374 struct ring_buffer_iter
*buf_iter
;
2375 unsigned long entries
= 0;
2378 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2380 buf_iter
= trace_buffer_iter(iter
, cpu
);
2384 ring_buffer_iter_reset(buf_iter
);
2387 * We could have the case with the max latency tracers
2388 * that a reset never took place on a cpu. This is evident
2389 * by the timestamp being before the start of the buffer.
2391 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2392 if (ts
>= iter
->trace_buffer
->time_start
)
2395 ring_buffer_read(buf_iter
, NULL
);
2398 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2402 * The current tracer is copied to avoid a global locking
2405 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2407 struct trace_iterator
*iter
= m
->private;
2408 struct trace_array
*tr
= iter
->tr
;
2409 int cpu_file
= iter
->cpu_file
;
2415 * copy the tracer to avoid using a global lock all around.
2416 * iter->trace is a copy of current_trace, the pointer to the
2417 * name may be used instead of a strcmp(), as iter->trace->name
2418 * will point to the same string as current_trace->name.
2420 mutex_lock(&trace_types_lock
);
2421 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2422 *iter
->trace
= *tr
->current_trace
;
2423 mutex_unlock(&trace_types_lock
);
2425 #ifdef CONFIG_TRACER_MAX_TRACE
2426 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2427 return ERR_PTR(-EBUSY
);
2430 if (!iter
->snapshot
)
2431 atomic_inc(&trace_record_cmdline_disabled
);
2433 if (*pos
!= iter
->pos
) {
2438 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2439 for_each_tracing_cpu(cpu
)
2440 tracing_iter_reset(iter
, cpu
);
2442 tracing_iter_reset(iter
, cpu_file
);
2445 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2450 * If we overflowed the seq_file before, then we want
2451 * to just reuse the trace_seq buffer again.
2457 p
= s_next(m
, p
, &l
);
2461 trace_event_read_lock();
2462 trace_access_lock(cpu_file
);
2466 static void s_stop(struct seq_file
*m
, void *p
)
2468 struct trace_iterator
*iter
= m
->private;
2470 #ifdef CONFIG_TRACER_MAX_TRACE
2471 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2475 if (!iter
->snapshot
)
2476 atomic_dec(&trace_record_cmdline_disabled
);
2478 trace_access_unlock(iter
->cpu_file
);
2479 trace_event_read_unlock();
2483 get_total_entries(struct trace_buffer
*buf
,
2484 unsigned long *total
, unsigned long *entries
)
2486 unsigned long count
;
2492 for_each_tracing_cpu(cpu
) {
2493 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2495 * If this buffer has skipped entries, then we hold all
2496 * entries for the trace and we need to ignore the
2497 * ones before the time stamp.
2499 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2500 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2501 /* total is the same as the entries */
2505 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2510 static void print_lat_help_header(struct seq_file
*m
)
2512 seq_puts(m
, "# _------=> CPU# \n");
2513 seq_puts(m
, "# / _-----=> irqs-off \n");
2514 seq_puts(m
, "# | / _----=> need-resched \n");
2515 seq_puts(m
, "# || / _---=> hardirq/softirq \n");
2516 seq_puts(m
, "# ||| / _--=> preempt-depth \n");
2517 seq_puts(m
, "# |||| / delay \n");
2518 seq_puts(m
, "# cmd pid ||||| time | caller \n");
2519 seq_puts(m
, "# \\ / ||||| \\ | / \n");
2522 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2524 unsigned long total
;
2525 unsigned long entries
;
2527 get_total_entries(buf
, &total
, &entries
);
2528 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2529 entries
, total
, num_online_cpus());
2533 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2535 print_event_info(buf
, m
);
2536 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2537 seq_puts(m
, "# | | | | |\n");
2540 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2542 print_event_info(buf
, m
);
2543 seq_puts(m
, "# _-----=> irqs-off\n");
2544 seq_puts(m
, "# / _----=> need-resched\n");
2545 seq_puts(m
, "# | / _---=> hardirq/softirq\n");
2546 seq_puts(m
, "# || / _--=> preempt-depth\n");
2547 seq_puts(m
, "# ||| / delay\n");
2548 seq_puts(m
, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2549 seq_puts(m
, "# | | | |||| | |\n");
2553 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2555 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2556 struct trace_buffer
*buf
= iter
->trace_buffer
;
2557 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2558 struct tracer
*type
= iter
->trace
;
2559 unsigned long entries
;
2560 unsigned long total
;
2561 const char *name
= "preemption";
2565 get_total_entries(buf
, &total
, &entries
);
2567 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2569 seq_puts(m
, "# -----------------------------------"
2570 "---------------------------------\n");
2571 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2572 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2573 nsecs_to_usecs(data
->saved_latency
),
2577 #if defined(CONFIG_PREEMPT_NONE)
2579 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2581 #elif defined(CONFIG_PREEMPT)
2586 /* These are reserved for later use */
2589 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2593 seq_puts(m
, "# -----------------\n");
2594 seq_printf(m
, "# | task: %.16s-%d "
2595 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2596 data
->comm
, data
->pid
,
2597 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2598 data
->policy
, data
->rt_priority
);
2599 seq_puts(m
, "# -----------------\n");
2601 if (data
->critical_start
) {
2602 seq_puts(m
, "# => started at: ");
2603 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2604 trace_print_seq(m
, &iter
->seq
);
2605 seq_puts(m
, "\n# => ended at: ");
2606 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2607 trace_print_seq(m
, &iter
->seq
);
2608 seq_puts(m
, "\n#\n");
2614 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2616 struct trace_seq
*s
= &iter
->seq
;
2618 if (!(trace_flags
& TRACE_ITER_ANNOTATE
))
2621 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2624 if (cpumask_test_cpu(iter
->cpu
, iter
->started
))
2627 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2630 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2632 /* Don't print started cpu buffer for the first entry of the trace */
2634 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2638 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2640 struct trace_seq
*s
= &iter
->seq
;
2641 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2642 struct trace_entry
*entry
;
2643 struct trace_event
*event
;
2647 test_cpu_buff_start(iter
);
2649 event
= ftrace_find_event(entry
->type
);
2651 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2652 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2653 if (!trace_print_lat_context(iter
))
2656 if (!trace_print_context(iter
))
2662 return event
->funcs
->trace(iter
, sym_flags
, event
);
2664 if (!trace_seq_printf(s
, "Unknown type %d\n", entry
->type
))
2667 return TRACE_TYPE_HANDLED
;
2669 return TRACE_TYPE_PARTIAL_LINE
;
2672 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2674 struct trace_seq
*s
= &iter
->seq
;
2675 struct trace_entry
*entry
;
2676 struct trace_event
*event
;
2680 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2681 if (!trace_seq_printf(s
, "%d %d %llu ",
2682 entry
->pid
, iter
->cpu
, iter
->ts
))
2686 event
= ftrace_find_event(entry
->type
);
2688 return event
->funcs
->raw(iter
, 0, event
);
2690 if (!trace_seq_printf(s
, "%d ?\n", entry
->type
))
2693 return TRACE_TYPE_HANDLED
;
2695 return TRACE_TYPE_PARTIAL_LINE
;
2698 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2700 struct trace_seq
*s
= &iter
->seq
;
2701 unsigned char newline
= '\n';
2702 struct trace_entry
*entry
;
2703 struct trace_event
*event
;
2707 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2708 SEQ_PUT_HEX_FIELD_RET(s
, entry
->pid
);
2709 SEQ_PUT_HEX_FIELD_RET(s
, iter
->cpu
);
2710 SEQ_PUT_HEX_FIELD_RET(s
, iter
->ts
);
2713 event
= ftrace_find_event(entry
->type
);
2715 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2716 if (ret
!= TRACE_TYPE_HANDLED
)
2720 SEQ_PUT_FIELD_RET(s
, newline
);
2722 return TRACE_TYPE_HANDLED
;
2725 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2727 struct trace_seq
*s
= &iter
->seq
;
2728 struct trace_entry
*entry
;
2729 struct trace_event
*event
;
2733 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2734 SEQ_PUT_FIELD_RET(s
, entry
->pid
);
2735 SEQ_PUT_FIELD_RET(s
, iter
->cpu
);
2736 SEQ_PUT_FIELD_RET(s
, iter
->ts
);
2739 event
= ftrace_find_event(entry
->type
);
2740 return event
? event
->funcs
->binary(iter
, 0, event
) :
2744 int trace_empty(struct trace_iterator
*iter
)
2746 struct ring_buffer_iter
*buf_iter
;
2749 /* If we are looking at one CPU buffer, only check that one */
2750 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2751 cpu
= iter
->cpu_file
;
2752 buf_iter
= trace_buffer_iter(iter
, cpu
);
2754 if (!ring_buffer_iter_empty(buf_iter
))
2757 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2763 for_each_tracing_cpu(cpu
) {
2764 buf_iter
= trace_buffer_iter(iter
, cpu
);
2766 if (!ring_buffer_iter_empty(buf_iter
))
2769 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2777 /* Called with trace_event_read_lock() held. */
2778 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2780 enum print_line_t ret
;
2782 if (iter
->lost_events
&&
2783 !trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2784 iter
->cpu
, iter
->lost_events
))
2785 return TRACE_TYPE_PARTIAL_LINE
;
2787 if (iter
->trace
&& iter
->trace
->print_line
) {
2788 ret
= iter
->trace
->print_line(iter
);
2789 if (ret
!= TRACE_TYPE_UNHANDLED
)
2793 if (iter
->ent
->type
== TRACE_BPUTS
&&
2794 trace_flags
& TRACE_ITER_PRINTK
&&
2795 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2796 return trace_print_bputs_msg_only(iter
);
2798 if (iter
->ent
->type
== TRACE_BPRINT
&&
2799 trace_flags
& TRACE_ITER_PRINTK
&&
2800 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2801 return trace_print_bprintk_msg_only(iter
);
2803 if (iter
->ent
->type
== TRACE_PRINT
&&
2804 trace_flags
& TRACE_ITER_PRINTK
&&
2805 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2806 return trace_print_printk_msg_only(iter
);
2808 if (trace_flags
& TRACE_ITER_BIN
)
2809 return print_bin_fmt(iter
);
2811 if (trace_flags
& TRACE_ITER_HEX
)
2812 return print_hex_fmt(iter
);
2814 if (trace_flags
& TRACE_ITER_RAW
)
2815 return print_raw_fmt(iter
);
2817 return print_trace_fmt(iter
);
2820 void trace_latency_header(struct seq_file
*m
)
2822 struct trace_iterator
*iter
= m
->private;
2824 /* print nothing if the buffers are empty */
2825 if (trace_empty(iter
))
2828 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2829 print_trace_header(m
, iter
);
2831 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2832 print_lat_help_header(m
);
2835 void trace_default_header(struct seq_file
*m
)
2837 struct trace_iterator
*iter
= m
->private;
2839 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2842 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2843 /* print nothing if the buffers are empty */
2844 if (trace_empty(iter
))
2846 print_trace_header(m
, iter
);
2847 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2848 print_lat_help_header(m
);
2850 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2851 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2852 print_func_help_header_irq(iter
->trace_buffer
, m
);
2854 print_func_help_header(iter
->trace_buffer
, m
);
2859 static void test_ftrace_alive(struct seq_file
*m
)
2861 if (!ftrace_is_dead())
2863 seq_printf(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2864 seq_printf(m
, "# MAY BE MISSING FUNCTION EVENTS\n");
2867 #ifdef CONFIG_TRACER_MAX_TRACE
2868 static void show_snapshot_main_help(struct seq_file
*m
)
2870 seq_printf(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2871 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2872 seq_printf(m
, "# Takes a snapshot of the main buffer.\n");
2873 seq_printf(m
, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2874 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2875 seq_printf(m
, "# is not a '0' or '1')\n");
2878 static void show_snapshot_percpu_help(struct seq_file
*m
)
2880 seq_printf(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2881 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2882 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2883 seq_printf(m
, "# Takes a snapshot of the main buffer for this cpu.\n");
2885 seq_printf(m
, "# echo 1 > snapshot : Not supported with this kernel.\n");
2886 seq_printf(m
, "# Must use main snapshot file to allocate.\n");
2888 seq_printf(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2889 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2890 seq_printf(m
, "# is not a '0' or '1')\n");
2893 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2895 if (iter
->tr
->allocated_snapshot
)
2896 seq_printf(m
, "#\n# * Snapshot is allocated *\n#\n");
2898 seq_printf(m
, "#\n# * Snapshot is freed *\n#\n");
2900 seq_printf(m
, "# Snapshot commands:\n");
2901 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2902 show_snapshot_main_help(m
);
2904 show_snapshot_percpu_help(m
);
2907 /* Should never be called */
2908 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2911 static int s_show(struct seq_file
*m
, void *v
)
2913 struct trace_iterator
*iter
= v
;
2916 if (iter
->ent
== NULL
) {
2918 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2920 test_ftrace_alive(m
);
2922 if (iter
->snapshot
&& trace_empty(iter
))
2923 print_snapshot_help(m
, iter
);
2924 else if (iter
->trace
&& iter
->trace
->print_header
)
2925 iter
->trace
->print_header(m
);
2927 trace_default_header(m
);
2929 } else if (iter
->leftover
) {
2931 * If we filled the seq_file buffer earlier, we
2932 * want to just show it now.
2934 ret
= trace_print_seq(m
, &iter
->seq
);
2936 /* ret should this time be zero, but you never know */
2937 iter
->leftover
= ret
;
2940 print_trace_line(iter
);
2941 ret
= trace_print_seq(m
, &iter
->seq
);
2943 * If we overflow the seq_file buffer, then it will
2944 * ask us for this data again at start up.
2946 * ret is 0 if seq_file write succeeded.
2949 iter
->leftover
= ret
;
2956 * Should be used after trace_array_get(), trace_types_lock
2957 * ensures that i_cdev was already initialized.
2959 static inline int tracing_get_cpu(struct inode
*inode
)
2961 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
2962 return (long)inode
->i_cdev
- 1;
2963 return RING_BUFFER_ALL_CPUS
;
2966 static const struct seq_operations tracer_seq_ops
= {
2973 static struct trace_iterator
*
2974 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
2976 struct trace_array
*tr
= inode
->i_private
;
2977 struct trace_iterator
*iter
;
2980 if (tracing_disabled
)
2981 return ERR_PTR(-ENODEV
);
2983 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
2985 return ERR_PTR(-ENOMEM
);
2987 iter
->buffer_iter
= kzalloc(sizeof(*iter
->buffer_iter
) * num_possible_cpus(),
2989 if (!iter
->buffer_iter
)
2993 * We make a copy of the current tracer to avoid concurrent
2994 * changes on it while we are reading.
2996 mutex_lock(&trace_types_lock
);
2997 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3001 *iter
->trace
= *tr
->current_trace
;
3003 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3008 #ifdef CONFIG_TRACER_MAX_TRACE
3009 /* Currently only the top directory has a snapshot */
3010 if (tr
->current_trace
->print_max
|| snapshot
)
3011 iter
->trace_buffer
= &tr
->max_buffer
;
3014 iter
->trace_buffer
= &tr
->trace_buffer
;
3015 iter
->snapshot
= snapshot
;
3017 iter
->cpu_file
= tracing_get_cpu(inode
);
3018 mutex_init(&iter
->mutex
);
3020 /* Notify the tracer early; before we stop tracing. */
3021 if (iter
->trace
&& iter
->trace
->open
)
3022 iter
->trace
->open(iter
);
3024 /* Annotate start of buffers if we had overruns */
3025 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3026 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3028 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3029 if (trace_clocks
[tr
->clock_id
].in_ns
)
3030 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3032 /* stop the trace while dumping if we are not opening "snapshot" */
3033 if (!iter
->snapshot
)
3034 tracing_stop_tr(tr
);
3036 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3037 for_each_tracing_cpu(cpu
) {
3038 iter
->buffer_iter
[cpu
] =
3039 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3041 ring_buffer_read_prepare_sync();
3042 for_each_tracing_cpu(cpu
) {
3043 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3044 tracing_iter_reset(iter
, cpu
);
3047 cpu
= iter
->cpu_file
;
3048 iter
->buffer_iter
[cpu
] =
3049 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3050 ring_buffer_read_prepare_sync();
3051 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3052 tracing_iter_reset(iter
, cpu
);
3055 mutex_unlock(&trace_types_lock
);
3060 mutex_unlock(&trace_types_lock
);
3062 kfree(iter
->buffer_iter
);
3064 seq_release_private(inode
, file
);
3065 return ERR_PTR(-ENOMEM
);
3068 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3070 if (tracing_disabled
)
3073 filp
->private_data
= inode
->i_private
;
3077 bool tracing_is_disabled(void)
3079 return (tracing_disabled
) ? true: false;
3083 * Open and update trace_array ref count.
3084 * Must have the current trace_array passed to it.
3086 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3088 struct trace_array
*tr
= inode
->i_private
;
3090 if (tracing_disabled
)
3093 if (trace_array_get(tr
) < 0)
3096 filp
->private_data
= inode
->i_private
;
3101 static int tracing_release(struct inode
*inode
, struct file
*file
)
3103 struct trace_array
*tr
= inode
->i_private
;
3104 struct seq_file
*m
= file
->private_data
;
3105 struct trace_iterator
*iter
;
3108 if (!(file
->f_mode
& FMODE_READ
)) {
3109 trace_array_put(tr
);
3113 /* Writes do not use seq_file */
3115 mutex_lock(&trace_types_lock
);
3117 for_each_tracing_cpu(cpu
) {
3118 if (iter
->buffer_iter
[cpu
])
3119 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3122 if (iter
->trace
&& iter
->trace
->close
)
3123 iter
->trace
->close(iter
);
3125 if (!iter
->snapshot
)
3126 /* reenable tracing if it was previously enabled */
3127 tracing_start_tr(tr
);
3129 __trace_array_put(tr
);
3131 mutex_unlock(&trace_types_lock
);
3133 mutex_destroy(&iter
->mutex
);
3134 free_cpumask_var(iter
->started
);
3136 kfree(iter
->buffer_iter
);
3137 seq_release_private(inode
, file
);
3142 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3144 struct trace_array
*tr
= inode
->i_private
;
3146 trace_array_put(tr
);
3150 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3152 struct trace_array
*tr
= inode
->i_private
;
3154 trace_array_put(tr
);
3156 return single_release(inode
, file
);
3159 static int tracing_open(struct inode
*inode
, struct file
*file
)
3161 struct trace_array
*tr
= inode
->i_private
;
3162 struct trace_iterator
*iter
;
3165 if (trace_array_get(tr
) < 0)
3168 /* If this file was open for write, then erase contents */
3169 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3170 int cpu
= tracing_get_cpu(inode
);
3172 if (cpu
== RING_BUFFER_ALL_CPUS
)
3173 tracing_reset_online_cpus(&tr
->trace_buffer
);
3175 tracing_reset(&tr
->trace_buffer
, cpu
);
3178 if (file
->f_mode
& FMODE_READ
) {
3179 iter
= __tracing_open(inode
, file
, false);
3181 ret
= PTR_ERR(iter
);
3182 else if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
3183 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3187 trace_array_put(tr
);
3193 * Some tracers are not suitable for instance buffers.
3194 * A tracer is always available for the global array (toplevel)
3195 * or if it explicitly states that it is.
3198 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
3200 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
3203 /* Find the next tracer that this trace array may use */
3204 static struct tracer
*
3205 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
3207 while (t
&& !trace_ok_for_array(t
, tr
))
3214 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3216 struct trace_array
*tr
= m
->private;
3217 struct tracer
*t
= v
;
3222 t
= get_tracer_for_array(tr
, t
->next
);
3227 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3229 struct trace_array
*tr
= m
->private;
3233 mutex_lock(&trace_types_lock
);
3235 t
= get_tracer_for_array(tr
, trace_types
);
3236 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3242 static void t_stop(struct seq_file
*m
, void *p
)
3244 mutex_unlock(&trace_types_lock
);
3247 static int t_show(struct seq_file
*m
, void *v
)
3249 struct tracer
*t
= v
;
3254 seq_printf(m
, "%s", t
->name
);
3263 static const struct seq_operations show_traces_seq_ops
= {
3270 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3272 struct trace_array
*tr
= inode
->i_private
;
3276 if (tracing_disabled
)
3279 ret
= seq_open(file
, &show_traces_seq_ops
);
3283 m
= file
->private_data
;
3290 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3291 size_t count
, loff_t
*ppos
)
3296 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
3300 if (file
->f_mode
& FMODE_READ
)
3301 ret
= seq_lseek(file
, offset
, whence
);
3303 file
->f_pos
= ret
= 0;
3308 static const struct file_operations tracing_fops
= {
3309 .open
= tracing_open
,
3311 .write
= tracing_write_stub
,
3312 .llseek
= tracing_lseek
,
3313 .release
= tracing_release
,
3316 static const struct file_operations show_traces_fops
= {
3317 .open
= show_traces_open
,
3319 .release
= seq_release
,
3320 .llseek
= seq_lseek
,
3324 * The tracer itself will not take this lock, but still we want
3325 * to provide a consistent cpumask to user-space:
3327 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3330 * Temporary storage for the character representation of the
3331 * CPU bitmask (and one more byte for the newline):
3333 static char mask_str
[NR_CPUS
+ 1];
3336 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3337 size_t count
, loff_t
*ppos
)
3339 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3342 mutex_lock(&tracing_cpumask_update_lock
);
3344 len
= cpumask_scnprintf(mask_str
, count
, tr
->tracing_cpumask
);
3345 if (count
- len
< 2) {
3349 len
+= sprintf(mask_str
+ len
, "\n");
3350 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3353 mutex_unlock(&tracing_cpumask_update_lock
);
3359 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3360 size_t count
, loff_t
*ppos
)
3362 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3363 cpumask_var_t tracing_cpumask_new
;
3366 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3369 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3373 mutex_lock(&tracing_cpumask_update_lock
);
3375 local_irq_disable();
3376 arch_spin_lock(&tr
->max_lock
);
3377 for_each_tracing_cpu(cpu
) {
3379 * Increase/decrease the disabled counter if we are
3380 * about to flip a bit in the cpumask:
3382 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3383 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3384 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3385 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3387 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3388 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3389 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3390 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3393 arch_spin_unlock(&tr
->max_lock
);
3396 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
3398 mutex_unlock(&tracing_cpumask_update_lock
);
3399 free_cpumask_var(tracing_cpumask_new
);
3404 free_cpumask_var(tracing_cpumask_new
);
3409 static const struct file_operations tracing_cpumask_fops
= {
3410 .open
= tracing_open_generic_tr
,
3411 .read
= tracing_cpumask_read
,
3412 .write
= tracing_cpumask_write
,
3413 .release
= tracing_release_generic_tr
,
3414 .llseek
= generic_file_llseek
,
3417 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3419 struct tracer_opt
*trace_opts
;
3420 struct trace_array
*tr
= m
->private;
3424 mutex_lock(&trace_types_lock
);
3425 tracer_flags
= tr
->current_trace
->flags
->val
;
3426 trace_opts
= tr
->current_trace
->flags
->opts
;
3428 for (i
= 0; trace_options
[i
]; i
++) {
3429 if (trace_flags
& (1 << i
))
3430 seq_printf(m
, "%s\n", trace_options
[i
]);
3432 seq_printf(m
, "no%s\n", trace_options
[i
]);
3435 for (i
= 0; trace_opts
[i
].name
; i
++) {
3436 if (tracer_flags
& trace_opts
[i
].bit
)
3437 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3439 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3441 mutex_unlock(&trace_types_lock
);
3446 static int __set_tracer_option(struct trace_array
*tr
,
3447 struct tracer_flags
*tracer_flags
,
3448 struct tracer_opt
*opts
, int neg
)
3450 struct tracer
*trace
= tr
->current_trace
;
3453 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
3458 tracer_flags
->val
&= ~opts
->bit
;
3460 tracer_flags
->val
|= opts
->bit
;
3464 /* Try to assign a tracer specific option */
3465 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
3467 struct tracer
*trace
= tr
->current_trace
;
3468 struct tracer_flags
*tracer_flags
= trace
->flags
;
3469 struct tracer_opt
*opts
= NULL
;
3472 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3473 opts
= &tracer_flags
->opts
[i
];
3475 if (strcmp(cmp
, opts
->name
) == 0)
3476 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
3482 /* Some tracers require overwrite to stay enabled */
3483 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3485 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3491 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3493 /* do nothing if flag is already set */
3494 if (!!(trace_flags
& mask
) == !!enabled
)
3497 /* Give the tracer a chance to approve the change */
3498 if (tr
->current_trace
->flag_changed
)
3499 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
3503 trace_flags
|= mask
;
3505 trace_flags
&= ~mask
;
3507 if (mask
== TRACE_ITER_RECORD_CMD
)
3508 trace_event_enable_cmd_record(enabled
);
3510 if (mask
== TRACE_ITER_OVERWRITE
) {
3511 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3512 #ifdef CONFIG_TRACER_MAX_TRACE
3513 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3517 if (mask
== TRACE_ITER_PRINTK
)
3518 trace_printk_start_stop_comm(enabled
);
3523 static int trace_set_options(struct trace_array
*tr
, char *option
)
3530 cmp
= strstrip(option
);
3532 if (strncmp(cmp
, "no", 2) == 0) {
3537 mutex_lock(&trace_types_lock
);
3539 for (i
= 0; trace_options
[i
]; i
++) {
3540 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3541 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3546 /* If no option could be set, test the specific tracer options */
3547 if (!trace_options
[i
])
3548 ret
= set_tracer_option(tr
, cmp
, neg
);
3550 mutex_unlock(&trace_types_lock
);
3556 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3557 size_t cnt
, loff_t
*ppos
)
3559 struct seq_file
*m
= filp
->private_data
;
3560 struct trace_array
*tr
= m
->private;
3564 if (cnt
>= sizeof(buf
))
3567 if (copy_from_user(&buf
, ubuf
, cnt
))
3572 ret
= trace_set_options(tr
, buf
);
3581 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3583 struct trace_array
*tr
= inode
->i_private
;
3586 if (tracing_disabled
)
3589 if (trace_array_get(tr
) < 0)
3592 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3594 trace_array_put(tr
);
3599 static const struct file_operations tracing_iter_fops
= {
3600 .open
= tracing_trace_options_open
,
3602 .llseek
= seq_lseek
,
3603 .release
= tracing_single_release_tr
,
3604 .write
= tracing_trace_options_write
,
3607 static const char readme_msg
[] =
3608 "tracing mini-HOWTO:\n\n"
3609 "# echo 0 > tracing_on : quick way to disable tracing\n"
3610 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3611 " Important files:\n"
3612 " trace\t\t\t- The static contents of the buffer\n"
3613 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3614 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3615 " current_tracer\t- function and latency tracers\n"
3616 " available_tracers\t- list of configured tracers for current_tracer\n"
3617 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3618 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3619 " trace_clock\t\t-change the clock used to order events\n"
3620 " local: Per cpu clock but may not be synced across CPUs\n"
3621 " global: Synced across CPUs but slows tracing down.\n"
3622 " counter: Not a clock, but just an increment\n"
3623 " uptime: Jiffy counter from time of boot\n"
3624 " perf: Same clock that perf events use\n"
3625 #ifdef CONFIG_X86_64
3626 " x86-tsc: TSC cycle counter\n"
3628 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3629 " tracing_cpumask\t- Limit which CPUs to trace\n"
3630 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3631 "\t\t\t Remove sub-buffer with rmdir\n"
3632 " trace_options\t\t- Set format or modify how tracing happens\n"
3633 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3634 "\t\t\t option name\n"
3635 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3636 #ifdef CONFIG_DYNAMIC_FTRACE
3637 "\n available_filter_functions - list of functions that can be filtered on\n"
3638 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3639 "\t\t\t functions\n"
3640 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3641 "\t modules: Can select a group via module\n"
3642 "\t Format: :mod:<module-name>\n"
3643 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3644 "\t triggers: a command to perform when function is hit\n"
3645 "\t Format: <function>:<trigger>[:count]\n"
3646 "\t trigger: traceon, traceoff\n"
3647 "\t\t enable_event:<system>:<event>\n"
3648 "\t\t disable_event:<system>:<event>\n"
3649 #ifdef CONFIG_STACKTRACE
3652 #ifdef CONFIG_TRACER_SNAPSHOT
3657 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3658 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3659 "\t The first one will disable tracing every time do_fault is hit\n"
3660 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3661 "\t The first time do trap is hit and it disables tracing, the\n"
3662 "\t counter will decrement to 2. If tracing is already disabled,\n"
3663 "\t the counter will not decrement. It only decrements when the\n"
3664 "\t trigger did work\n"
3665 "\t To remove trigger without count:\n"
3666 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3667 "\t To remove trigger with a count:\n"
3668 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3669 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3670 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3671 "\t modules: Can select a group via module command :mod:\n"
3672 "\t Does not accept triggers\n"
3673 #endif /* CONFIG_DYNAMIC_FTRACE */
3674 #ifdef CONFIG_FUNCTION_TRACER
3675 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3678 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3679 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3680 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3681 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3683 #ifdef CONFIG_TRACER_SNAPSHOT
3684 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3685 "\t\t\t snapshot buffer. Read the contents for more\n"
3686 "\t\t\t information\n"
3688 #ifdef CONFIG_STACK_TRACER
3689 " stack_trace\t\t- Shows the max stack trace when active\n"
3690 " stack_max_size\t- Shows current max stack size that was traced\n"
3691 "\t\t\t Write into this file to reset the max size (trigger a\n"
3692 "\t\t\t new trace)\n"
3693 #ifdef CONFIG_DYNAMIC_FTRACE
3694 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3697 #endif /* CONFIG_STACK_TRACER */
3698 " events/\t\t- Directory containing all trace event subsystems:\n"
3699 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3700 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3701 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3703 " filter\t\t- If set, only events passing filter are traced\n"
3704 " events/<system>/<event>/\t- Directory containing control files for\n"
3706 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3707 " filter\t\t- If set, only events passing filter are traced\n"
3708 " trigger\t\t- If set, a command to perform when event is hit\n"
3709 "\t Format: <trigger>[:count][if <filter>]\n"
3710 "\t trigger: traceon, traceoff\n"
3711 "\t enable_event:<system>:<event>\n"
3712 "\t disable_event:<system>:<event>\n"
3713 #ifdef CONFIG_STACKTRACE
3716 #ifdef CONFIG_TRACER_SNAPSHOT
3719 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3720 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3721 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3722 "\t events/block/block_unplug/trigger\n"
3723 "\t The first disables tracing every time block_unplug is hit.\n"
3724 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3725 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3726 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3727 "\t Like function triggers, the counter is only decremented if it\n"
3728 "\t enabled or disabled tracing.\n"
3729 "\t To remove a trigger without a count:\n"
3730 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3731 "\t To remove a trigger with a count:\n"
3732 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3733 "\t Filters can be ignored when removing a trigger.\n"
3737 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3738 size_t cnt
, loff_t
*ppos
)
3740 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3741 readme_msg
, strlen(readme_msg
));
3744 static const struct file_operations tracing_readme_fops
= {
3745 .open
= tracing_open_generic
,
3746 .read
= tracing_readme_read
,
3747 .llseek
= generic_file_llseek
,
3750 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3752 unsigned int *ptr
= v
;
3754 if (*pos
|| m
->count
)
3759 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
3761 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
3770 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
3776 arch_spin_lock(&trace_cmdline_lock
);
3778 v
= &savedcmd
->map_cmdline_to_pid
[0];
3780 v
= saved_cmdlines_next(m
, v
, &l
);
3788 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
3790 arch_spin_unlock(&trace_cmdline_lock
);
3794 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
3796 char buf
[TASK_COMM_LEN
];
3797 unsigned int *pid
= v
;
3799 __trace_find_cmdline(*pid
, buf
);
3800 seq_printf(m
, "%d %s\n", *pid
, buf
);
3804 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
3805 .start
= saved_cmdlines_start
,
3806 .next
= saved_cmdlines_next
,
3807 .stop
= saved_cmdlines_stop
,
3808 .show
= saved_cmdlines_show
,
3811 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
3813 if (tracing_disabled
)
3816 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
3819 static const struct file_operations tracing_saved_cmdlines_fops
= {
3820 .open
= tracing_saved_cmdlines_open
,
3822 .llseek
= seq_lseek
,
3823 .release
= seq_release
,
3827 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
3828 size_t cnt
, loff_t
*ppos
)
3833 arch_spin_lock(&trace_cmdline_lock
);
3834 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
3835 arch_spin_unlock(&trace_cmdline_lock
);
3837 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3840 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
3842 kfree(s
->saved_cmdlines
);
3843 kfree(s
->map_cmdline_to_pid
);
3847 static int tracing_resize_saved_cmdlines(unsigned int val
)
3849 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
3851 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
3855 if (allocate_cmdlines_buffer(val
, s
) < 0) {
3860 arch_spin_lock(&trace_cmdline_lock
);
3861 savedcmd_temp
= savedcmd
;
3863 arch_spin_unlock(&trace_cmdline_lock
);
3864 free_saved_cmdlines_buffer(savedcmd_temp
);
3870 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
3871 size_t cnt
, loff_t
*ppos
)
3876 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
3880 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3881 if (!val
|| val
> PID_MAX_DEFAULT
)
3884 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
3893 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
3894 .open
= tracing_open_generic
,
3895 .read
= tracing_saved_cmdlines_size_read
,
3896 .write
= tracing_saved_cmdlines_size_write
,
3900 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
3901 size_t cnt
, loff_t
*ppos
)
3903 struct trace_array
*tr
= filp
->private_data
;
3904 char buf
[MAX_TRACER_SIZE
+2];
3907 mutex_lock(&trace_types_lock
);
3908 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
3909 mutex_unlock(&trace_types_lock
);
3911 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3914 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
3916 tracing_reset_online_cpus(&tr
->trace_buffer
);
3920 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
3924 for_each_tracing_cpu(cpu
)
3925 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
3928 #ifdef CONFIG_TRACER_MAX_TRACE
3929 /* resize @tr's buffer to the size of @size_tr's entries */
3930 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
3931 struct trace_buffer
*size_buf
, int cpu_id
)
3935 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
3936 for_each_tracing_cpu(cpu
) {
3937 ret
= ring_buffer_resize(trace_buf
->buffer
,
3938 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
3941 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
3942 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
3945 ret
= ring_buffer_resize(trace_buf
->buffer
,
3946 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
3948 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
3949 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
3954 #endif /* CONFIG_TRACER_MAX_TRACE */
3956 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
3957 unsigned long size
, int cpu
)
3962 * If kernel or user changes the size of the ring buffer
3963 * we use the size that was given, and we can forget about
3964 * expanding it later.
3966 ring_buffer_expanded
= true;
3968 /* May be called before buffers are initialized */
3969 if (!tr
->trace_buffer
.buffer
)
3972 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
3976 #ifdef CONFIG_TRACER_MAX_TRACE
3977 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
3978 !tr
->current_trace
->use_max_tr
)
3981 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
3983 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
3984 &tr
->trace_buffer
, cpu
);
3987 * AARGH! We are left with different
3988 * size max buffer!!!!
3989 * The max buffer is our "snapshot" buffer.
3990 * When a tracer needs a snapshot (one of the
3991 * latency tracers), it swaps the max buffer
3992 * with the saved snap shot. We succeeded to
3993 * update the size of the main buffer, but failed to
3994 * update the size of the max buffer. But when we tried
3995 * to reset the main buffer to the original size, we
3996 * failed there too. This is very unlikely to
3997 * happen, but if it does, warn and kill all
4001 tracing_disabled
= 1;
4006 if (cpu
== RING_BUFFER_ALL_CPUS
)
4007 set_buffer_entries(&tr
->max_buffer
, size
);
4009 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
4012 #endif /* CONFIG_TRACER_MAX_TRACE */
4014 if (cpu
== RING_BUFFER_ALL_CPUS
)
4015 set_buffer_entries(&tr
->trace_buffer
, size
);
4017 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
4022 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
4023 unsigned long size
, int cpu_id
)
4027 mutex_lock(&trace_types_lock
);
4029 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
4030 /* make sure, this cpu is enabled in the mask */
4031 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
4037 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
4042 mutex_unlock(&trace_types_lock
);
4049 * tracing_update_buffers - used by tracing facility to expand ring buffers
4051 * To save on memory when the tracing is never used on a system with it
4052 * configured in. The ring buffers are set to a minimum size. But once
4053 * a user starts to use the tracing facility, then they need to grow
4054 * to their default size.
4056 * This function is to be called when a tracer is about to be used.
4058 int tracing_update_buffers(void)
4062 mutex_lock(&trace_types_lock
);
4063 if (!ring_buffer_expanded
)
4064 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
4065 RING_BUFFER_ALL_CPUS
);
4066 mutex_unlock(&trace_types_lock
);
4071 struct trace_option_dentry
;
4073 static struct trace_option_dentry
*
4074 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
4077 destroy_trace_option_files(struct trace_option_dentry
*topts
);
4080 * Used to clear out the tracer before deletion of an instance.
4081 * Must have trace_types_lock held.
4083 static void tracing_set_nop(struct trace_array
*tr
)
4085 if (tr
->current_trace
== &nop_trace
)
4088 tr
->current_trace
->enabled
--;
4090 if (tr
->current_trace
->reset
)
4091 tr
->current_trace
->reset(tr
);
4093 tr
->current_trace
= &nop_trace
;
4096 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
4098 static struct trace_option_dentry
*topts
;
4100 #ifdef CONFIG_TRACER_MAX_TRACE
4105 mutex_lock(&trace_types_lock
);
4107 if (!ring_buffer_expanded
) {
4108 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
4109 RING_BUFFER_ALL_CPUS
);
4115 for (t
= trace_types
; t
; t
= t
->next
) {
4116 if (strcmp(t
->name
, buf
) == 0)
4123 if (t
== tr
->current_trace
)
4126 /* Some tracers are only allowed for the top level buffer */
4127 if (!trace_ok_for_array(t
, tr
)) {
4132 trace_branch_disable();
4134 tr
->current_trace
->enabled
--;
4136 if (tr
->current_trace
->reset
)
4137 tr
->current_trace
->reset(tr
);
4139 /* Current trace needs to be nop_trace before synchronize_sched */
4140 tr
->current_trace
= &nop_trace
;
4142 #ifdef CONFIG_TRACER_MAX_TRACE
4143 had_max_tr
= tr
->allocated_snapshot
;
4145 if (had_max_tr
&& !t
->use_max_tr
) {
4147 * We need to make sure that the update_max_tr sees that
4148 * current_trace changed to nop_trace to keep it from
4149 * swapping the buffers after we resize it.
4150 * The update_max_tr is called from interrupts disabled
4151 * so a synchronized_sched() is sufficient.
4153 synchronize_sched();
4157 /* Currently, only the top instance has options */
4158 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
4159 destroy_trace_option_files(topts
);
4160 topts
= create_trace_option_files(tr
, t
);
4163 #ifdef CONFIG_TRACER_MAX_TRACE
4164 if (t
->use_max_tr
&& !had_max_tr
) {
4165 ret
= alloc_snapshot(tr
);
4172 ret
= tracer_init(t
, tr
);
4177 tr
->current_trace
= t
;
4178 tr
->current_trace
->enabled
++;
4179 trace_branch_enable(tr
);
4181 mutex_unlock(&trace_types_lock
);
4187 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
4188 size_t cnt
, loff_t
*ppos
)
4190 struct trace_array
*tr
= filp
->private_data
;
4191 char buf
[MAX_TRACER_SIZE
+1];
4198 if (cnt
> MAX_TRACER_SIZE
)
4199 cnt
= MAX_TRACER_SIZE
;
4201 if (copy_from_user(&buf
, ubuf
, cnt
))
4206 /* strip ending whitespace. */
4207 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
4210 err
= tracing_set_tracer(tr
, buf
);
4220 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
4221 size_t cnt
, loff_t
*ppos
)
4226 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4227 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4228 if (r
> sizeof(buf
))
4230 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4234 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
4235 size_t cnt
, loff_t
*ppos
)
4240 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4250 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
4251 size_t cnt
, loff_t
*ppos
)
4253 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
4257 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
4258 size_t cnt
, loff_t
*ppos
)
4260 struct trace_array
*tr
= filp
->private_data
;
4263 mutex_lock(&trace_types_lock
);
4264 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
4268 if (tr
->current_trace
->update_thresh
) {
4269 ret
= tr
->current_trace
->update_thresh(tr
);
4276 mutex_unlock(&trace_types_lock
);
4282 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4283 size_t cnt
, loff_t
*ppos
)
4285 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
4289 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4290 size_t cnt
, loff_t
*ppos
)
4292 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
4295 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4297 struct trace_array
*tr
= inode
->i_private
;
4298 struct trace_iterator
*iter
;
4301 if (tracing_disabled
)
4304 if (trace_array_get(tr
) < 0)
4307 mutex_lock(&trace_types_lock
);
4309 /* create a buffer to store the information to pass to userspace */
4310 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4313 __trace_array_put(tr
);
4318 * We make a copy of the current tracer to avoid concurrent
4319 * changes on it while we are reading.
4321 iter
->trace
= kmalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4326 *iter
->trace
= *tr
->current_trace
;
4328 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4333 /* trace pipe does not show start of buffer */
4334 cpumask_setall(iter
->started
);
4336 if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
4337 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4339 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4340 if (trace_clocks
[tr
->clock_id
].in_ns
)
4341 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4344 iter
->trace_buffer
= &tr
->trace_buffer
;
4345 iter
->cpu_file
= tracing_get_cpu(inode
);
4346 mutex_init(&iter
->mutex
);
4347 filp
->private_data
= iter
;
4349 if (iter
->trace
->pipe_open
)
4350 iter
->trace
->pipe_open(iter
);
4352 nonseekable_open(inode
, filp
);
4354 mutex_unlock(&trace_types_lock
);
4360 __trace_array_put(tr
);
4361 mutex_unlock(&trace_types_lock
);
4365 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4367 struct trace_iterator
*iter
= file
->private_data
;
4368 struct trace_array
*tr
= inode
->i_private
;
4370 mutex_lock(&trace_types_lock
);
4372 if (iter
->trace
->pipe_close
)
4373 iter
->trace
->pipe_close(iter
);
4375 mutex_unlock(&trace_types_lock
);
4377 free_cpumask_var(iter
->started
);
4378 mutex_destroy(&iter
->mutex
);
4382 trace_array_put(tr
);
4388 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4390 /* Iterators are static, they should be filled or empty */
4391 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4392 return POLLIN
| POLLRDNORM
;
4394 if (trace_flags
& TRACE_ITER_BLOCK
)
4396 * Always select as readable when in blocking mode
4398 return POLLIN
| POLLRDNORM
;
4400 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4405 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4407 struct trace_iterator
*iter
= filp
->private_data
;
4409 return trace_poll(iter
, filp
, poll_table
);
4412 /* Must be called with trace_types_lock mutex held. */
4413 static int tracing_wait_pipe(struct file
*filp
)
4415 struct trace_iterator
*iter
= filp
->private_data
;
4418 while (trace_empty(iter
)) {
4420 if ((filp
->f_flags
& O_NONBLOCK
)) {
4425 * We block until we read something and tracing is disabled.
4426 * We still block if tracing is disabled, but we have never
4427 * read anything. This allows a user to cat this file, and
4428 * then enable tracing. But after we have read something,
4429 * we give an EOF when tracing is again disabled.
4431 * iter->pos will be 0 if we haven't read anything.
4433 if (!tracing_is_on() && iter
->pos
)
4436 mutex_unlock(&iter
->mutex
);
4438 ret
= wait_on_pipe(iter
, false);
4440 mutex_lock(&iter
->mutex
);
4453 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4454 size_t cnt
, loff_t
*ppos
)
4456 struct trace_iterator
*iter
= filp
->private_data
;
4457 struct trace_array
*tr
= iter
->tr
;
4460 /* return any leftover data */
4461 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4465 trace_seq_init(&iter
->seq
);
4467 /* copy the tracer to avoid using a global lock all around */
4468 mutex_lock(&trace_types_lock
);
4469 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4470 *iter
->trace
= *tr
->current_trace
;
4471 mutex_unlock(&trace_types_lock
);
4474 * Avoid more than one consumer on a single file descriptor
4475 * This is just a matter of traces coherency, the ring buffer itself
4478 mutex_lock(&iter
->mutex
);
4479 if (iter
->trace
->read
) {
4480 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4486 sret
= tracing_wait_pipe(filp
);
4490 /* stop when tracing is finished */
4491 if (trace_empty(iter
)) {
4496 if (cnt
>= PAGE_SIZE
)
4497 cnt
= PAGE_SIZE
- 1;
4499 /* reset all but tr, trace, and overruns */
4500 memset(&iter
->seq
, 0,
4501 sizeof(struct trace_iterator
) -
4502 offsetof(struct trace_iterator
, seq
));
4503 cpumask_clear(iter
->started
);
4506 trace_event_read_lock();
4507 trace_access_lock(iter
->cpu_file
);
4508 while (trace_find_next_entry_inc(iter
) != NULL
) {
4509 enum print_line_t ret
;
4510 int len
= iter
->seq
.len
;
4512 ret
= print_trace_line(iter
);
4513 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4514 /* don't print partial lines */
4515 iter
->seq
.len
= len
;
4518 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4519 trace_consume(iter
);
4521 if (iter
->seq
.len
>= cnt
)
4525 * Setting the full flag means we reached the trace_seq buffer
4526 * size and we should leave by partial output condition above.
4527 * One of the trace_seq_* functions is not used properly.
4529 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4532 trace_access_unlock(iter
->cpu_file
);
4533 trace_event_read_unlock();
4535 /* Now copy what we have to the user */
4536 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4537 if (iter
->seq
.readpos
>= iter
->seq
.len
)
4538 trace_seq_init(&iter
->seq
);
4541 * If there was nothing to send to user, in spite of consuming trace
4542 * entries, go back to wait for more entries.
4548 mutex_unlock(&iter
->mutex
);
4553 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4556 __free_page(spd
->pages
[idx
]);
4559 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4561 .confirm
= generic_pipe_buf_confirm
,
4562 .release
= generic_pipe_buf_release
,
4563 .steal
= generic_pipe_buf_steal
,
4564 .get
= generic_pipe_buf_get
,
4568 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4573 /* Seq buffer is page-sized, exactly what we need. */
4575 count
= iter
->seq
.len
;
4576 ret
= print_trace_line(iter
);
4577 count
= iter
->seq
.len
- count
;
4580 iter
->seq
.len
-= count
;
4583 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4584 iter
->seq
.len
-= count
;
4588 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4589 trace_consume(iter
);
4591 if (!trace_find_next_entry_inc(iter
)) {
4601 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4603 struct pipe_inode_info
*pipe
,
4607 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4608 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4609 struct trace_iterator
*iter
= filp
->private_data
;
4610 struct splice_pipe_desc spd
= {
4612 .partial
= partial_def
,
4613 .nr_pages
= 0, /* This gets updated below. */
4614 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4616 .ops
= &tracing_pipe_buf_ops
,
4617 .spd_release
= tracing_spd_release_pipe
,
4619 struct trace_array
*tr
= iter
->tr
;
4624 if (splice_grow_spd(pipe
, &spd
))
4627 /* copy the tracer to avoid using a global lock all around */
4628 mutex_lock(&trace_types_lock
);
4629 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4630 *iter
->trace
= *tr
->current_trace
;
4631 mutex_unlock(&trace_types_lock
);
4633 mutex_lock(&iter
->mutex
);
4635 if (iter
->trace
->splice_read
) {
4636 ret
= iter
->trace
->splice_read(iter
, filp
,
4637 ppos
, pipe
, len
, flags
);
4642 ret
= tracing_wait_pipe(filp
);
4646 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4651 trace_event_read_lock();
4652 trace_access_lock(iter
->cpu_file
);
4654 /* Fill as many pages as possible. */
4655 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
4656 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4660 rem
= tracing_fill_pipe_page(rem
, iter
);
4662 /* Copy the data into the page, so we can start over. */
4663 ret
= trace_seq_to_buffer(&iter
->seq
,
4664 page_address(spd
.pages
[i
]),
4667 __free_page(spd
.pages
[i
]);
4670 spd
.partial
[i
].offset
= 0;
4671 spd
.partial
[i
].len
= iter
->seq
.len
;
4673 trace_seq_init(&iter
->seq
);
4676 trace_access_unlock(iter
->cpu_file
);
4677 trace_event_read_unlock();
4678 mutex_unlock(&iter
->mutex
);
4682 ret
= splice_to_pipe(pipe
, &spd
);
4684 splice_shrink_spd(&spd
);
4688 mutex_unlock(&iter
->mutex
);
4693 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4694 size_t cnt
, loff_t
*ppos
)
4696 struct inode
*inode
= file_inode(filp
);
4697 struct trace_array
*tr
= inode
->i_private
;
4698 int cpu
= tracing_get_cpu(inode
);
4703 mutex_lock(&trace_types_lock
);
4705 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4706 int cpu
, buf_size_same
;
4711 /* check if all cpu sizes are same */
4712 for_each_tracing_cpu(cpu
) {
4713 /* fill in the size from first enabled cpu */
4715 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4716 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4722 if (buf_size_same
) {
4723 if (!ring_buffer_expanded
)
4724 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4726 trace_buf_size
>> 10);
4728 r
= sprintf(buf
, "%lu\n", size
>> 10);
4730 r
= sprintf(buf
, "X\n");
4732 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
4734 mutex_unlock(&trace_types_lock
);
4736 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4741 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
4742 size_t cnt
, loff_t
*ppos
)
4744 struct inode
*inode
= file_inode(filp
);
4745 struct trace_array
*tr
= inode
->i_private
;
4749 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4753 /* must have at least 1 entry */
4757 /* value is in KB */
4759 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
4769 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
4770 size_t cnt
, loff_t
*ppos
)
4772 struct trace_array
*tr
= filp
->private_data
;
4775 unsigned long size
= 0, expanded_size
= 0;
4777 mutex_lock(&trace_types_lock
);
4778 for_each_tracing_cpu(cpu
) {
4779 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
4780 if (!ring_buffer_expanded
)
4781 expanded_size
+= trace_buf_size
>> 10;
4783 if (ring_buffer_expanded
)
4784 r
= sprintf(buf
, "%lu\n", size
);
4786 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
4787 mutex_unlock(&trace_types_lock
);
4789 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4793 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
4794 size_t cnt
, loff_t
*ppos
)
4797 * There is no need to read what the user has written, this function
4798 * is just to make sure that there is no error when "echo" is used
4807 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
4809 struct trace_array
*tr
= inode
->i_private
;
4811 /* disable tracing ? */
4812 if (trace_flags
& TRACE_ITER_STOP_ON_FREE
)
4813 tracer_tracing_off(tr
);
4814 /* resize the ring buffer to 0 */
4815 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
4817 trace_array_put(tr
);
4823 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
4824 size_t cnt
, loff_t
*fpos
)
4826 unsigned long addr
= (unsigned long)ubuf
;
4827 struct trace_array
*tr
= filp
->private_data
;
4828 struct ring_buffer_event
*event
;
4829 struct ring_buffer
*buffer
;
4830 struct print_entry
*entry
;
4831 unsigned long irq_flags
;
4832 struct page
*pages
[2];
4842 if (tracing_disabled
)
4845 if (!(trace_flags
& TRACE_ITER_MARKERS
))
4848 if (cnt
> TRACE_BUF_SIZE
)
4849 cnt
= TRACE_BUF_SIZE
;
4852 * Userspace is injecting traces into the kernel trace buffer.
4853 * We want to be as non intrusive as possible.
4854 * To do so, we do not want to allocate any special buffers
4855 * or take any locks, but instead write the userspace data
4856 * straight into the ring buffer.
4858 * First we need to pin the userspace buffer into memory,
4859 * which, most likely it is, because it just referenced it.
4860 * But there's no guarantee that it is. By using get_user_pages_fast()
4861 * and kmap_atomic/kunmap_atomic() we can get access to the
4862 * pages directly. We then write the data directly into the
4865 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
4867 /* check if we cross pages */
4868 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
4871 offset
= addr
& (PAGE_SIZE
- 1);
4874 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
4875 if (ret
< nr_pages
) {
4877 put_page(pages
[ret
]);
4882 for (i
= 0; i
< nr_pages
; i
++)
4883 map_page
[i
] = kmap_atomic(pages
[i
]);
4885 local_save_flags(irq_flags
);
4886 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
4887 buffer
= tr
->trace_buffer
.buffer
;
4888 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
4889 irq_flags
, preempt_count());
4891 /* Ring buffer disabled, return as if not open for write */
4896 entry
= ring_buffer_event_data(event
);
4897 entry
->ip
= _THIS_IP_
;
4899 if (nr_pages
== 2) {
4900 len
= PAGE_SIZE
- offset
;
4901 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
4902 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
4904 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
4906 if (entry
->buf
[cnt
- 1] != '\n') {
4907 entry
->buf
[cnt
] = '\n';
4908 entry
->buf
[cnt
+ 1] = '\0';
4910 entry
->buf
[cnt
] = '\0';
4912 __buffer_unlock_commit(buffer
, event
);
4919 for (i
= 0; i
< nr_pages
; i
++){
4920 kunmap_atomic(map_page
[i
]);
4927 static int tracing_clock_show(struct seq_file
*m
, void *v
)
4929 struct trace_array
*tr
= m
->private;
4932 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
4934 "%s%s%s%s", i
? " " : "",
4935 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
4936 i
== tr
->clock_id
? "]" : "");
4942 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
4946 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
4947 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
4950 if (i
== ARRAY_SIZE(trace_clocks
))
4953 mutex_lock(&trace_types_lock
);
4957 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
4960 * New clock may not be consistent with the previous clock.
4961 * Reset the buffer so that it doesn't have incomparable timestamps.
4963 tracing_reset_online_cpus(&tr
->trace_buffer
);
4965 #ifdef CONFIG_TRACER_MAX_TRACE
4966 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&& tr
->max_buffer
.buffer
)
4967 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
4968 tracing_reset_online_cpus(&tr
->max_buffer
);
4971 mutex_unlock(&trace_types_lock
);
4976 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
4977 size_t cnt
, loff_t
*fpos
)
4979 struct seq_file
*m
= filp
->private_data
;
4980 struct trace_array
*tr
= m
->private;
4982 const char *clockstr
;
4985 if (cnt
>= sizeof(buf
))
4988 if (copy_from_user(&buf
, ubuf
, cnt
))
4993 clockstr
= strstrip(buf
);
4995 ret
= tracing_set_clock(tr
, clockstr
);
5004 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
5006 struct trace_array
*tr
= inode
->i_private
;
5009 if (tracing_disabled
)
5012 if (trace_array_get(tr
))
5015 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
5017 trace_array_put(tr
);
5022 struct ftrace_buffer_info
{
5023 struct trace_iterator iter
;
5028 #ifdef CONFIG_TRACER_SNAPSHOT
5029 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
5031 struct trace_array
*tr
= inode
->i_private
;
5032 struct trace_iterator
*iter
;
5036 if (trace_array_get(tr
) < 0)
5039 if (file
->f_mode
& FMODE_READ
) {
5040 iter
= __tracing_open(inode
, file
, true);
5042 ret
= PTR_ERR(iter
);
5044 /* Writes still need the seq_file to hold the private data */
5046 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
5049 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5057 iter
->trace_buffer
= &tr
->max_buffer
;
5058 iter
->cpu_file
= tracing_get_cpu(inode
);
5060 file
->private_data
= m
;
5064 trace_array_put(tr
);
5070 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5073 struct seq_file
*m
= filp
->private_data
;
5074 struct trace_iterator
*iter
= m
->private;
5075 struct trace_array
*tr
= iter
->tr
;
5079 ret
= tracing_update_buffers();
5083 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5087 mutex_lock(&trace_types_lock
);
5089 if (tr
->current_trace
->use_max_tr
) {
5096 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5100 if (tr
->allocated_snapshot
)
5104 /* Only allow per-cpu swap if the ring buffer supports it */
5105 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5106 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5111 if (!tr
->allocated_snapshot
) {
5112 ret
= alloc_snapshot(tr
);
5116 local_irq_disable();
5117 /* Now, we're going to swap */
5118 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5119 update_max_tr(tr
, current
, smp_processor_id());
5121 update_max_tr_single(tr
, current
, iter
->cpu_file
);
5125 if (tr
->allocated_snapshot
) {
5126 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5127 tracing_reset_online_cpus(&tr
->max_buffer
);
5129 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
5139 mutex_unlock(&trace_types_lock
);
5143 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
5145 struct seq_file
*m
= file
->private_data
;
5148 ret
= tracing_release(inode
, file
);
5150 if (file
->f_mode
& FMODE_READ
)
5153 /* If write only, the seq_file is just a stub */
5161 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
5162 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5163 size_t count
, loff_t
*ppos
);
5164 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
5165 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5166 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
5168 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
5170 struct ftrace_buffer_info
*info
;
5173 ret
= tracing_buffers_open(inode
, filp
);
5177 info
= filp
->private_data
;
5179 if (info
->iter
.trace
->use_max_tr
) {
5180 tracing_buffers_release(inode
, filp
);
5184 info
->iter
.snapshot
= true;
5185 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
5190 #endif /* CONFIG_TRACER_SNAPSHOT */
5193 static const struct file_operations tracing_thresh_fops
= {
5194 .open
= tracing_open_generic
,
5195 .read
= tracing_thresh_read
,
5196 .write
= tracing_thresh_write
,
5197 .llseek
= generic_file_llseek
,
5200 static const struct file_operations tracing_max_lat_fops
= {
5201 .open
= tracing_open_generic
,
5202 .read
= tracing_max_lat_read
,
5203 .write
= tracing_max_lat_write
,
5204 .llseek
= generic_file_llseek
,
5207 static const struct file_operations set_tracer_fops
= {
5208 .open
= tracing_open_generic
,
5209 .read
= tracing_set_trace_read
,
5210 .write
= tracing_set_trace_write
,
5211 .llseek
= generic_file_llseek
,
5214 static const struct file_operations tracing_pipe_fops
= {
5215 .open
= tracing_open_pipe
,
5216 .poll
= tracing_poll_pipe
,
5217 .read
= tracing_read_pipe
,
5218 .splice_read
= tracing_splice_read_pipe
,
5219 .release
= tracing_release_pipe
,
5220 .llseek
= no_llseek
,
5223 static const struct file_operations tracing_entries_fops
= {
5224 .open
= tracing_open_generic_tr
,
5225 .read
= tracing_entries_read
,
5226 .write
= tracing_entries_write
,
5227 .llseek
= generic_file_llseek
,
5228 .release
= tracing_release_generic_tr
,
5231 static const struct file_operations tracing_total_entries_fops
= {
5232 .open
= tracing_open_generic_tr
,
5233 .read
= tracing_total_entries_read
,
5234 .llseek
= generic_file_llseek
,
5235 .release
= tracing_release_generic_tr
,
5238 static const struct file_operations tracing_free_buffer_fops
= {
5239 .open
= tracing_open_generic_tr
,
5240 .write
= tracing_free_buffer_write
,
5241 .release
= tracing_free_buffer_release
,
5244 static const struct file_operations tracing_mark_fops
= {
5245 .open
= tracing_open_generic_tr
,
5246 .write
= tracing_mark_write
,
5247 .llseek
= generic_file_llseek
,
5248 .release
= tracing_release_generic_tr
,
5251 static const struct file_operations trace_clock_fops
= {
5252 .open
= tracing_clock_open
,
5254 .llseek
= seq_lseek
,
5255 .release
= tracing_single_release_tr
,
5256 .write
= tracing_clock_write
,
5259 #ifdef CONFIG_TRACER_SNAPSHOT
5260 static const struct file_operations snapshot_fops
= {
5261 .open
= tracing_snapshot_open
,
5263 .write
= tracing_snapshot_write
,
5264 .llseek
= tracing_lseek
,
5265 .release
= tracing_snapshot_release
,
5268 static const struct file_operations snapshot_raw_fops
= {
5269 .open
= snapshot_raw_open
,
5270 .read
= tracing_buffers_read
,
5271 .release
= tracing_buffers_release
,
5272 .splice_read
= tracing_buffers_splice_read
,
5273 .llseek
= no_llseek
,
5276 #endif /* CONFIG_TRACER_SNAPSHOT */
5278 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5280 struct trace_array
*tr
= inode
->i_private
;
5281 struct ftrace_buffer_info
*info
;
5284 if (tracing_disabled
)
5287 if (trace_array_get(tr
) < 0)
5290 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5292 trace_array_put(tr
);
5296 mutex_lock(&trace_types_lock
);
5299 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5300 info
->iter
.trace
= tr
->current_trace
;
5301 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5303 /* Force reading ring buffer for first read */
5304 info
->read
= (unsigned int)-1;
5306 filp
->private_data
= info
;
5308 mutex_unlock(&trace_types_lock
);
5310 ret
= nonseekable_open(inode
, filp
);
5312 trace_array_put(tr
);
5318 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5320 struct ftrace_buffer_info
*info
= filp
->private_data
;
5321 struct trace_iterator
*iter
= &info
->iter
;
5323 return trace_poll(iter
, filp
, poll_table
);
5327 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5328 size_t count
, loff_t
*ppos
)
5330 struct ftrace_buffer_info
*info
= filp
->private_data
;
5331 struct trace_iterator
*iter
= &info
->iter
;
5338 mutex_lock(&trace_types_lock
);
5340 #ifdef CONFIG_TRACER_MAX_TRACE
5341 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5348 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5354 /* Do we have previous read data to read? */
5355 if (info
->read
< PAGE_SIZE
)
5359 trace_access_lock(iter
->cpu_file
);
5360 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5364 trace_access_unlock(iter
->cpu_file
);
5367 if (trace_empty(iter
)) {
5368 if ((filp
->f_flags
& O_NONBLOCK
)) {
5372 mutex_unlock(&trace_types_lock
);
5373 ret
= wait_on_pipe(iter
, false);
5374 mutex_lock(&trace_types_lock
);
5387 size
= PAGE_SIZE
- info
->read
;
5391 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5402 mutex_unlock(&trace_types_lock
);
5407 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5409 struct ftrace_buffer_info
*info
= file
->private_data
;
5410 struct trace_iterator
*iter
= &info
->iter
;
5412 mutex_lock(&trace_types_lock
);
5414 __trace_array_put(iter
->tr
);
5417 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5420 mutex_unlock(&trace_types_lock
);
5426 struct ring_buffer
*buffer
;
5431 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5432 struct pipe_buffer
*buf
)
5434 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5439 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5444 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5445 struct pipe_buffer
*buf
)
5447 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5452 /* Pipe buffer operations for a buffer. */
5453 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5455 .confirm
= generic_pipe_buf_confirm
,
5456 .release
= buffer_pipe_buf_release
,
5457 .steal
= generic_pipe_buf_steal
,
5458 .get
= buffer_pipe_buf_get
,
5462 * Callback from splice_to_pipe(), if we need to release some pages
5463 * at the end of the spd in case we error'ed out in filling the pipe.
5465 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5467 struct buffer_ref
*ref
=
5468 (struct buffer_ref
*)spd
->partial
[i
].private;
5473 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5475 spd
->partial
[i
].private = 0;
5479 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5480 struct pipe_inode_info
*pipe
, size_t len
,
5483 struct ftrace_buffer_info
*info
= file
->private_data
;
5484 struct trace_iterator
*iter
= &info
->iter
;
5485 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5486 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5487 struct splice_pipe_desc spd
= {
5489 .partial
= partial_def
,
5490 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5492 .ops
= &buffer_pipe_buf_ops
,
5493 .spd_release
= buffer_spd_release
,
5495 struct buffer_ref
*ref
;
5496 int entries
, size
, i
;
5499 mutex_lock(&trace_types_lock
);
5501 #ifdef CONFIG_TRACER_MAX_TRACE
5502 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5508 if (splice_grow_spd(pipe
, &spd
)) {
5513 if (*ppos
& (PAGE_SIZE
- 1)) {
5518 if (len
& (PAGE_SIZE
- 1)) {
5519 if (len
< PAGE_SIZE
) {
5527 trace_access_lock(iter
->cpu_file
);
5528 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5530 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5534 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5541 ref
->buffer
= iter
->trace_buffer
->buffer
;
5542 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5549 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5550 len
, iter
->cpu_file
, 1);
5552 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5558 * zero out any left over data, this is going to
5561 size
= ring_buffer_page_len(ref
->page
);
5562 if (size
< PAGE_SIZE
)
5563 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5565 page
= virt_to_page(ref
->page
);
5567 spd
.pages
[i
] = page
;
5568 spd
.partial
[i
].len
= PAGE_SIZE
;
5569 spd
.partial
[i
].offset
= 0;
5570 spd
.partial
[i
].private = (unsigned long)ref
;
5574 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5577 trace_access_unlock(iter
->cpu_file
);
5580 /* did we read anything? */
5581 if (!spd
.nr_pages
) {
5585 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
)) {
5589 mutex_unlock(&trace_types_lock
);
5590 ret
= wait_on_pipe(iter
, true);
5591 mutex_lock(&trace_types_lock
);
5598 ret
= splice_to_pipe(pipe
, &spd
);
5599 splice_shrink_spd(&spd
);
5601 mutex_unlock(&trace_types_lock
);
5606 static const struct file_operations tracing_buffers_fops
= {
5607 .open
= tracing_buffers_open
,
5608 .read
= tracing_buffers_read
,
5609 .poll
= tracing_buffers_poll
,
5610 .release
= tracing_buffers_release
,
5611 .splice_read
= tracing_buffers_splice_read
,
5612 .llseek
= no_llseek
,
5616 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5617 size_t count
, loff_t
*ppos
)
5619 struct inode
*inode
= file_inode(filp
);
5620 struct trace_array
*tr
= inode
->i_private
;
5621 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5622 int cpu
= tracing_get_cpu(inode
);
5623 struct trace_seq
*s
;
5625 unsigned long long t
;
5626 unsigned long usec_rem
;
5628 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5634 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5635 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5637 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5638 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5640 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5641 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5643 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5644 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5646 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5647 /* local or global for trace_clock */
5648 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5649 usec_rem
= do_div(t
, USEC_PER_SEC
);
5650 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5653 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5654 usec_rem
= do_div(t
, USEC_PER_SEC
);
5655 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5657 /* counter or tsc mode for trace_clock */
5658 trace_seq_printf(s
, "oldest event ts: %llu\n",
5659 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5661 trace_seq_printf(s
, "now ts: %llu\n",
5662 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5665 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5666 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5668 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5669 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5671 count
= simple_read_from_buffer(ubuf
, count
, ppos
, s
->buffer
, s
->len
);
5678 static const struct file_operations tracing_stats_fops
= {
5679 .open
= tracing_open_generic_tr
,
5680 .read
= tracing_stats_read
,
5681 .llseek
= generic_file_llseek
,
5682 .release
= tracing_release_generic_tr
,
5685 #ifdef CONFIG_DYNAMIC_FTRACE
5687 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5693 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5694 size_t cnt
, loff_t
*ppos
)
5696 static char ftrace_dyn_info_buffer
[1024];
5697 static DEFINE_MUTEX(dyn_info_mutex
);
5698 unsigned long *p
= filp
->private_data
;
5699 char *buf
= ftrace_dyn_info_buffer
;
5700 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5703 mutex_lock(&dyn_info_mutex
);
5704 r
= sprintf(buf
, "%ld ", *p
);
5706 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5709 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5711 mutex_unlock(&dyn_info_mutex
);
5716 static const struct file_operations tracing_dyn_info_fops
= {
5717 .open
= tracing_open_generic
,
5718 .read
= tracing_read_dyn_info
,
5719 .llseek
= generic_file_llseek
,
5721 #endif /* CONFIG_DYNAMIC_FTRACE */
5723 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5725 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5731 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5733 unsigned long *count
= (long *)data
;
5745 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5746 struct ftrace_probe_ops
*ops
, void *data
)
5748 long count
= (long)data
;
5750 seq_printf(m
, "%ps:", (void *)ip
);
5752 seq_printf(m
, "snapshot");
5755 seq_printf(m
, ":unlimited\n");
5757 seq_printf(m
, ":count=%ld\n", count
);
5762 static struct ftrace_probe_ops snapshot_probe_ops
= {
5763 .func
= ftrace_snapshot
,
5764 .print
= ftrace_snapshot_print
,
5767 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
5768 .func
= ftrace_count_snapshot
,
5769 .print
= ftrace_snapshot_print
,
5773 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
5774 char *glob
, char *cmd
, char *param
, int enable
)
5776 struct ftrace_probe_ops
*ops
;
5777 void *count
= (void *)-1;
5781 /* hash funcs only work with set_ftrace_filter */
5785 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
5787 if (glob
[0] == '!') {
5788 unregister_ftrace_function_probe_func(glob
+1, ops
);
5795 number
= strsep(¶m
, ":");
5797 if (!strlen(number
))
5801 * We use the callback data field (which is a pointer)
5804 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
5809 ret
= register_ftrace_function_probe(glob
, ops
, count
);
5812 alloc_snapshot(&global_trace
);
5814 return ret
< 0 ? ret
: 0;
5817 static struct ftrace_func_command ftrace_snapshot_cmd
= {
5819 .func
= ftrace_trace_snapshot_callback
,
5822 static __init
int register_snapshot_cmd(void)
5824 return register_ftrace_command(&ftrace_snapshot_cmd
);
5827 static inline __init
int register_snapshot_cmd(void) { return 0; }
5828 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5830 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
)
5835 if (!debugfs_initialized())
5838 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
5839 tr
->dir
= debugfs_create_dir("tracing", NULL
);
5842 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5847 struct dentry
*tracing_init_dentry(void)
5849 return tracing_init_dentry_tr(&global_trace
);
5852 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
5854 struct dentry
*d_tracer
;
5857 return tr
->percpu_dir
;
5859 d_tracer
= tracing_init_dentry_tr(tr
);
5863 tr
->percpu_dir
= debugfs_create_dir("per_cpu", d_tracer
);
5865 WARN_ONCE(!tr
->percpu_dir
,
5866 "Could not create debugfs directory 'per_cpu/%d'\n", cpu
);
5868 return tr
->percpu_dir
;
5871 static struct dentry
*
5872 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
5873 void *data
, long cpu
, const struct file_operations
*fops
)
5875 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
5877 if (ret
) /* See tracing_get_cpu() */
5878 ret
->d_inode
->i_cdev
= (void *)(cpu
+ 1);
5883 tracing_init_debugfs_percpu(struct trace_array
*tr
, long cpu
)
5885 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
5886 struct dentry
*d_cpu
;
5887 char cpu_dir
[30]; /* 30 characters should be more than enough */
5892 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
5893 d_cpu
= debugfs_create_dir(cpu_dir
, d_percpu
);
5895 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir
);
5899 /* per cpu trace_pipe */
5900 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
5901 tr
, cpu
, &tracing_pipe_fops
);
5904 trace_create_cpu_file("trace", 0644, d_cpu
,
5905 tr
, cpu
, &tracing_fops
);
5907 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
5908 tr
, cpu
, &tracing_buffers_fops
);
5910 trace_create_cpu_file("stats", 0444, d_cpu
,
5911 tr
, cpu
, &tracing_stats_fops
);
5913 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
5914 tr
, cpu
, &tracing_entries_fops
);
5916 #ifdef CONFIG_TRACER_SNAPSHOT
5917 trace_create_cpu_file("snapshot", 0644, d_cpu
,
5918 tr
, cpu
, &snapshot_fops
);
5920 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
5921 tr
, cpu
, &snapshot_raw_fops
);
5925 #ifdef CONFIG_FTRACE_SELFTEST
5926 /* Let selftest have access to static functions in this file */
5927 #include "trace_selftest.c"
5930 struct trace_option_dentry
{
5931 struct tracer_opt
*opt
;
5932 struct tracer_flags
*flags
;
5933 struct trace_array
*tr
;
5934 struct dentry
*entry
;
5938 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5941 struct trace_option_dentry
*topt
= filp
->private_data
;
5944 if (topt
->flags
->val
& topt
->opt
->bit
)
5949 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5953 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5956 struct trace_option_dentry
*topt
= filp
->private_data
;
5960 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5964 if (val
!= 0 && val
!= 1)
5967 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
5968 mutex_lock(&trace_types_lock
);
5969 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
5971 mutex_unlock(&trace_types_lock
);
5982 static const struct file_operations trace_options_fops
= {
5983 .open
= tracing_open_generic
,
5984 .read
= trace_options_read
,
5985 .write
= trace_options_write
,
5986 .llseek
= generic_file_llseek
,
5990 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5993 long index
= (long)filp
->private_data
;
5996 if (trace_flags
& (1 << index
))
6001 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
6005 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6008 struct trace_array
*tr
= &global_trace
;
6009 long index
= (long)filp
->private_data
;
6013 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6017 if (val
!= 0 && val
!= 1)
6020 mutex_lock(&trace_types_lock
);
6021 ret
= set_tracer_flag(tr
, 1 << index
, val
);
6022 mutex_unlock(&trace_types_lock
);
6032 static const struct file_operations trace_options_core_fops
= {
6033 .open
= tracing_open_generic
,
6034 .read
= trace_options_core_read
,
6035 .write
= trace_options_core_write
,
6036 .llseek
= generic_file_llseek
,
6039 struct dentry
*trace_create_file(const char *name
,
6041 struct dentry
*parent
,
6043 const struct file_operations
*fops
)
6047 ret
= debugfs_create_file(name
, mode
, parent
, data
, fops
);
6049 pr_warning("Could not create debugfs '%s' entry\n", name
);
6055 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
6057 struct dentry
*d_tracer
;
6062 d_tracer
= tracing_init_dentry_tr(tr
);
6066 tr
->options
= debugfs_create_dir("options", d_tracer
);
6068 pr_warning("Could not create debugfs directory 'options'\n");
6076 create_trace_option_file(struct trace_array
*tr
,
6077 struct trace_option_dentry
*topt
,
6078 struct tracer_flags
*flags
,
6079 struct tracer_opt
*opt
)
6081 struct dentry
*t_options
;
6083 t_options
= trace_options_init_dentry(tr
);
6087 topt
->flags
= flags
;
6091 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
6092 &trace_options_fops
);
6096 static struct trace_option_dentry
*
6097 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
6099 struct trace_option_dentry
*topts
;
6100 struct tracer_flags
*flags
;
6101 struct tracer_opt
*opts
;
6107 flags
= tracer
->flags
;
6109 if (!flags
|| !flags
->opts
)
6114 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6117 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
6121 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6122 create_trace_option_file(tr
, &topts
[cnt
], flags
,
6129 destroy_trace_option_files(struct trace_option_dentry
*topts
)
6136 for (cnt
= 0; topts
[cnt
].opt
; cnt
++)
6137 debugfs_remove(topts
[cnt
].entry
);
6142 static struct dentry
*
6143 create_trace_option_core_file(struct trace_array
*tr
,
6144 const char *option
, long index
)
6146 struct dentry
*t_options
;
6148 t_options
= trace_options_init_dentry(tr
);
6152 return trace_create_file(option
, 0644, t_options
, (void *)index
,
6153 &trace_options_core_fops
);
6156 static __init
void create_trace_options_dir(struct trace_array
*tr
)
6158 struct dentry
*t_options
;
6161 t_options
= trace_options_init_dentry(tr
);
6165 for (i
= 0; trace_options
[i
]; i
++)
6166 create_trace_option_core_file(tr
, trace_options
[i
], i
);
6170 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
6171 size_t cnt
, loff_t
*ppos
)
6173 struct trace_array
*tr
= filp
->private_data
;
6177 r
= tracer_tracing_is_on(tr
);
6178 r
= sprintf(buf
, "%d\n", r
);
6180 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6184 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
6185 size_t cnt
, loff_t
*ppos
)
6187 struct trace_array
*tr
= filp
->private_data
;
6188 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
6192 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6197 mutex_lock(&trace_types_lock
);
6199 tracer_tracing_on(tr
);
6200 if (tr
->current_trace
->start
)
6201 tr
->current_trace
->start(tr
);
6203 tracer_tracing_off(tr
);
6204 if (tr
->current_trace
->stop
)
6205 tr
->current_trace
->stop(tr
);
6207 mutex_unlock(&trace_types_lock
);
6215 static const struct file_operations rb_simple_fops
= {
6216 .open
= tracing_open_generic_tr
,
6217 .read
= rb_simple_read
,
6218 .write
= rb_simple_write
,
6219 .release
= tracing_release_generic_tr
,
6220 .llseek
= default_llseek
,
6223 struct dentry
*trace_instance_dir
;
6226 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
);
6229 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
6231 enum ring_buffer_flags rb_flags
;
6233 rb_flags
= trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
6237 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
6241 buf
->data
= alloc_percpu(struct trace_array_cpu
);
6243 ring_buffer_free(buf
->buffer
);
6247 /* Allocate the first page for all buffers */
6248 set_buffer_entries(&tr
->trace_buffer
,
6249 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
6254 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
6258 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6262 #ifdef CONFIG_TRACER_MAX_TRACE
6263 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6264 allocate_snapshot
? size
: 1);
6266 ring_buffer_free(tr
->trace_buffer
.buffer
);
6267 free_percpu(tr
->trace_buffer
.data
);
6270 tr
->allocated_snapshot
= allocate_snapshot
;
6273 * Only the top level trace array gets its snapshot allocated
6274 * from the kernel command line.
6276 allocate_snapshot
= false;
6281 static void free_trace_buffer(struct trace_buffer
*buf
)
6284 ring_buffer_free(buf
->buffer
);
6286 free_percpu(buf
->data
);
6291 static void free_trace_buffers(struct trace_array
*tr
)
6296 free_trace_buffer(&tr
->trace_buffer
);
6298 #ifdef CONFIG_TRACER_MAX_TRACE
6299 free_trace_buffer(&tr
->max_buffer
);
6303 static int new_instance_create(const char *name
)
6305 struct trace_array
*tr
;
6308 mutex_lock(&trace_types_lock
);
6311 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6312 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6317 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6321 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6325 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
6328 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
6330 raw_spin_lock_init(&tr
->start_lock
);
6332 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6334 tr
->current_trace
= &nop_trace
;
6336 INIT_LIST_HEAD(&tr
->systems
);
6337 INIT_LIST_HEAD(&tr
->events
);
6339 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6342 tr
->dir
= debugfs_create_dir(name
, trace_instance_dir
);
6346 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6348 debugfs_remove_recursive(tr
->dir
);
6352 init_tracer_debugfs(tr
, tr
->dir
);
6354 list_add(&tr
->list
, &ftrace_trace_arrays
);
6356 mutex_unlock(&trace_types_lock
);
6361 free_trace_buffers(tr
);
6362 free_cpumask_var(tr
->tracing_cpumask
);
6367 mutex_unlock(&trace_types_lock
);
6373 static int instance_delete(const char *name
)
6375 struct trace_array
*tr
;
6379 mutex_lock(&trace_types_lock
);
6382 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6383 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6395 list_del(&tr
->list
);
6397 tracing_set_nop(tr
);
6398 event_trace_del_tracer(tr
);
6399 ftrace_destroy_function_files(tr
);
6400 debugfs_remove_recursive(tr
->dir
);
6401 free_trace_buffers(tr
);
6409 mutex_unlock(&trace_types_lock
);
6414 static int instance_mkdir (struct inode
*inode
, struct dentry
*dentry
, umode_t mode
)
6416 struct dentry
*parent
;
6419 /* Paranoid: Make sure the parent is the "instances" directory */
6420 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6421 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6425 * The inode mutex is locked, but debugfs_create_dir() will also
6426 * take the mutex. As the instances directory can not be destroyed
6427 * or changed in any other way, it is safe to unlock it, and
6428 * let the dentry try. If two users try to make the same dir at
6429 * the same time, then the new_instance_create() will determine the
6432 mutex_unlock(&inode
->i_mutex
);
6434 ret
= new_instance_create(dentry
->d_iname
);
6436 mutex_lock(&inode
->i_mutex
);
6441 static int instance_rmdir(struct inode
*inode
, struct dentry
*dentry
)
6443 struct dentry
*parent
;
6446 /* Paranoid: Make sure the parent is the "instances" directory */
6447 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6448 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6451 /* The caller did a dget() on dentry */
6452 mutex_unlock(&dentry
->d_inode
->i_mutex
);
6455 * The inode mutex is locked, but debugfs_create_dir() will also
6456 * take the mutex. As the instances directory can not be destroyed
6457 * or changed in any other way, it is safe to unlock it, and
6458 * let the dentry try. If two users try to make the same dir at
6459 * the same time, then the instance_delete() will determine the
6462 mutex_unlock(&inode
->i_mutex
);
6464 ret
= instance_delete(dentry
->d_iname
);
6466 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_PARENT
);
6467 mutex_lock(&dentry
->d_inode
->i_mutex
);
6472 static const struct inode_operations instance_dir_inode_operations
= {
6473 .lookup
= simple_lookup
,
6474 .mkdir
= instance_mkdir
,
6475 .rmdir
= instance_rmdir
,
6478 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6480 trace_instance_dir
= debugfs_create_dir("instances", d_tracer
);
6481 if (WARN_ON(!trace_instance_dir
))
6484 /* Hijack the dir inode operations, to allow mkdir */
6485 trace_instance_dir
->d_inode
->i_op
= &instance_dir_inode_operations
;
6489 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6493 trace_create_file("available_tracers", 0444, d_tracer
,
6494 tr
, &show_traces_fops
);
6496 trace_create_file("current_tracer", 0644, d_tracer
,
6497 tr
, &set_tracer_fops
);
6499 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6500 tr
, &tracing_cpumask_fops
);
6502 trace_create_file("trace_options", 0644, d_tracer
,
6503 tr
, &tracing_iter_fops
);
6505 trace_create_file("trace", 0644, d_tracer
,
6508 trace_create_file("trace_pipe", 0444, d_tracer
,
6509 tr
, &tracing_pipe_fops
);
6511 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6512 tr
, &tracing_entries_fops
);
6514 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6515 tr
, &tracing_total_entries_fops
);
6517 trace_create_file("free_buffer", 0200, d_tracer
,
6518 tr
, &tracing_free_buffer_fops
);
6520 trace_create_file("trace_marker", 0220, d_tracer
,
6521 tr
, &tracing_mark_fops
);
6523 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6526 trace_create_file("tracing_on", 0644, d_tracer
,
6527 tr
, &rb_simple_fops
);
6529 #ifdef CONFIG_TRACER_MAX_TRACE
6530 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6531 &tr
->max_latency
, &tracing_max_lat_fops
);
6534 if (ftrace_create_function_files(tr
, d_tracer
))
6535 WARN(1, "Could not allocate function filter files");
6537 #ifdef CONFIG_TRACER_SNAPSHOT
6538 trace_create_file("snapshot", 0644, d_tracer
,
6539 tr
, &snapshot_fops
);
6542 for_each_tracing_cpu(cpu
)
6543 tracing_init_debugfs_percpu(tr
, cpu
);
6547 static __init
int tracer_init_debugfs(void)
6549 struct dentry
*d_tracer
;
6551 trace_access_lock_init();
6553 d_tracer
= tracing_init_dentry();
6557 init_tracer_debugfs(&global_trace
, d_tracer
);
6559 trace_create_file("tracing_thresh", 0644, d_tracer
,
6560 &global_trace
, &tracing_thresh_fops
);
6562 trace_create_file("README", 0444, d_tracer
,
6563 NULL
, &tracing_readme_fops
);
6565 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6566 NULL
, &tracing_saved_cmdlines_fops
);
6568 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
6569 NULL
, &tracing_saved_cmdlines_size_fops
);
6571 #ifdef CONFIG_DYNAMIC_FTRACE
6572 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6573 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6576 create_trace_instances(d_tracer
);
6578 create_trace_options_dir(&global_trace
);
6583 static int trace_panic_handler(struct notifier_block
*this,
6584 unsigned long event
, void *unused
)
6586 if (ftrace_dump_on_oops
)
6587 ftrace_dump(ftrace_dump_on_oops
);
6591 static struct notifier_block trace_panic_notifier
= {
6592 .notifier_call
= trace_panic_handler
,
6594 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
6597 static int trace_die_handler(struct notifier_block
*self
,
6603 if (ftrace_dump_on_oops
)
6604 ftrace_dump(ftrace_dump_on_oops
);
6612 static struct notifier_block trace_die_notifier
= {
6613 .notifier_call
= trace_die_handler
,
6618 * printk is set to max of 1024, we really don't need it that big.
6619 * Nothing should be printing 1000 characters anyway.
6621 #define TRACE_MAX_PRINT 1000
6624 * Define here KERN_TRACE so that we have one place to modify
6625 * it if we decide to change what log level the ftrace dump
6628 #define KERN_TRACE KERN_EMERG
6631 trace_printk_seq(struct trace_seq
*s
)
6633 /* Probably should print a warning here. */
6634 if (s
->len
>= TRACE_MAX_PRINT
)
6635 s
->len
= TRACE_MAX_PRINT
;
6637 /* should be zero ended, but we are paranoid. */
6638 s
->buffer
[s
->len
] = 0;
6640 printk(KERN_TRACE
"%s", s
->buffer
);
6645 void trace_init_global_iter(struct trace_iterator
*iter
)
6647 iter
->tr
= &global_trace
;
6648 iter
->trace
= iter
->tr
->current_trace
;
6649 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
6650 iter
->trace_buffer
= &global_trace
.trace_buffer
;
6652 if (iter
->trace
&& iter
->trace
->open
)
6653 iter
->trace
->open(iter
);
6655 /* Annotate start of buffers if we had overruns */
6656 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
6657 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
6659 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6660 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
6661 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
6664 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
6666 /* use static because iter can be a bit big for the stack */
6667 static struct trace_iterator iter
;
6668 static atomic_t dump_running
;
6669 unsigned int old_userobj
;
6670 unsigned long flags
;
6673 /* Only allow one dump user at a time. */
6674 if (atomic_inc_return(&dump_running
) != 1) {
6675 atomic_dec(&dump_running
);
6680 * Always turn off tracing when we dump.
6681 * We don't need to show trace output of what happens
6682 * between multiple crashes.
6684 * If the user does a sysrq-z, then they can re-enable
6685 * tracing with echo 1 > tracing_on.
6689 local_irq_save(flags
);
6691 /* Simulate the iterator */
6692 trace_init_global_iter(&iter
);
6694 for_each_tracing_cpu(cpu
) {
6695 atomic_inc(&per_cpu_ptr(iter
.tr
->trace_buffer
.data
, cpu
)->disabled
);
6698 old_userobj
= trace_flags
& TRACE_ITER_SYM_USEROBJ
;
6700 /* don't look at user memory in panic mode */
6701 trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
6703 switch (oops_dump_mode
) {
6705 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6708 iter
.cpu_file
= raw_smp_processor_id();
6713 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
6714 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6717 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
6719 /* Did function tracer already get disabled? */
6720 if (ftrace_is_dead()) {
6721 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6722 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6726 * We need to stop all tracing on all CPUS to read the
6727 * the next buffer. This is a bit expensive, but is
6728 * not done often. We fill all what we can read,
6729 * and then release the locks again.
6732 while (!trace_empty(&iter
)) {
6735 printk(KERN_TRACE
"---------------------------------\n");
6739 /* reset all but tr, trace, and overruns */
6740 memset(&iter
.seq
, 0,
6741 sizeof(struct trace_iterator
) -
6742 offsetof(struct trace_iterator
, seq
));
6743 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
6746 if (trace_find_next_entry_inc(&iter
) != NULL
) {
6749 ret
= print_trace_line(&iter
);
6750 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6751 trace_consume(&iter
);
6753 touch_nmi_watchdog();
6755 trace_printk_seq(&iter
.seq
);
6759 printk(KERN_TRACE
" (ftrace buffer empty)\n");
6761 printk(KERN_TRACE
"---------------------------------\n");
6764 trace_flags
|= old_userobj
;
6766 for_each_tracing_cpu(cpu
) {
6767 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
6769 atomic_dec(&dump_running
);
6770 local_irq_restore(flags
);
6772 EXPORT_SYMBOL_GPL(ftrace_dump
);
6774 __init
static int tracer_alloc_buffers(void)
6780 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
6783 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
6784 goto out_free_buffer_mask
;
6786 /* Only allocate trace_printk buffers if a trace_printk exists */
6787 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
6788 /* Must be called before global_trace.buffer is allocated */
6789 trace_printk_init_buffers();
6791 /* To save memory, keep the ring buffer size to its minimum */
6792 if (ring_buffer_expanded
)
6793 ring_buf_size
= trace_buf_size
;
6797 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
6798 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
6800 raw_spin_lock_init(&global_trace
.start_lock
);
6802 /* Used for event triggers */
6803 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
6805 goto out_free_cpumask
;
6807 if (trace_create_savedcmd() < 0)
6808 goto out_free_temp_buffer
;
6810 /* TODO: make the number of buffers hot pluggable with CPUS */
6811 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
6812 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
6814 goto out_free_savedcmd
;
6817 if (global_trace
.buffer_disabled
)
6820 if (trace_boot_clock
) {
6821 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
6823 pr_warning("Trace clock %s not defined, going back to default\n",
6828 * register_tracer() might reference current_trace, so it
6829 * needs to be set before we register anything. This is
6830 * just a bootstrap of current_trace anyway.
6832 global_trace
.current_trace
= &nop_trace
;
6834 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6836 ftrace_init_global_array_ops(&global_trace
);
6838 register_tracer(&nop_trace
);
6840 /* All seems OK, enable tracing */
6841 tracing_disabled
= 0;
6843 atomic_notifier_chain_register(&panic_notifier_list
,
6844 &trace_panic_notifier
);
6846 register_die_notifier(&trace_die_notifier
);
6848 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
6850 INIT_LIST_HEAD(&global_trace
.systems
);
6851 INIT_LIST_HEAD(&global_trace
.events
);
6852 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
6854 while (trace_boot_options
) {
6857 option
= strsep(&trace_boot_options
, ",");
6858 trace_set_options(&global_trace
, option
);
6861 register_snapshot_cmd();
6866 free_saved_cmdlines_buffer(savedcmd
);
6867 out_free_temp_buffer
:
6868 ring_buffer_free(temp_buffer
);
6870 free_cpumask_var(global_trace
.tracing_cpumask
);
6871 out_free_buffer_mask
:
6872 free_cpumask_var(tracing_buffer_mask
);
6877 __init
static int clear_boot_tracer(void)
6880 * The default tracer at boot buffer is an init section.
6881 * This function is called in lateinit. If we did not
6882 * find the boot tracer, then clear it out, to prevent
6883 * later registration from accessing the buffer that is
6884 * about to be freed.
6886 if (!default_bootup_tracer
)
6889 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
6890 default_bootup_tracer
);
6891 default_bootup_tracer
= NULL
;
6896 early_initcall(tracer_alloc_buffers
);
6897 fs_initcall(tracer_init_debugfs
);
6898 late_initcall(clear_boot_tracer
);