2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
47 #include "trace_output.h"
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
53 bool ring_buffer_expanded
;
56 * We need to change this state when a selftest is running.
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
59 * insertions into the ring-buffer such as trace_printk could occurred
60 * at the same time, giving false positive or negative results.
62 static bool __read_mostly tracing_selftest_running
;
65 * If a tracer is running, we do not want to run SELFTEST.
67 bool __read_mostly tracing_selftest_disabled
;
69 /* Pipe tracepoints to printk */
70 struct trace_iterator
*tracepoint_print_iter
;
71 int tracepoint_printk
;
72 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key
);
74 /* For tracers that don't implement custom flags */
75 static struct tracer_opt dummy_tracer_opt
[] = {
80 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
90 static DEFINE_PER_CPU(bool, trace_taskinfo_save
);
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
98 static int tracing_disabled
= 1;
100 cpumask_var_t __read_mostly tracing_buffer_mask
;
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 enum ftrace_dump_mode ftrace_dump_on_oops
;
120 /* When set, tracing will stop when a WARN*() is hit */
121 int __disable_trace_on_warning
;
123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
124 /* Map of enums to their values, for "eval_map" file */
125 struct trace_eval_map_head
{
127 unsigned long length
;
130 union trace_eval_map_item
;
132 struct trace_eval_map_tail
{
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "eval_string"
137 union trace_eval_map_item
*next
;
138 const char *end
; /* points to NULL */
141 static DEFINE_MUTEX(trace_eval_mutex
);
144 * The trace_eval_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved eval_map items.
150 union trace_eval_map_item
{
151 struct trace_eval_map map
;
152 struct trace_eval_map_head head
;
153 struct trace_eval_map_tail tail
;
156 static union trace_eval_map_item
*trace_eval_maps
;
157 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
159 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
161 #define MAX_TRACER_SIZE 100
162 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
163 static char *default_bootup_tracer
;
165 static bool allocate_snapshot
;
167 static int __init
set_cmdline_ftrace(char *str
)
169 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
170 default_bootup_tracer
= bootup_tracer_buf
;
171 /* We are using ftrace early, expand it */
172 ring_buffer_expanded
= true;
175 __setup("ftrace=", set_cmdline_ftrace
);
177 static int __init
set_ftrace_dump_on_oops(char *str
)
179 if (*str
++ != '=' || !*str
) {
180 ftrace_dump_on_oops
= DUMP_ALL
;
184 if (!strcmp("orig_cpu", str
)) {
185 ftrace_dump_on_oops
= DUMP_ORIG
;
191 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
193 static int __init
stop_trace_on_warning(char *str
)
195 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
196 __disable_trace_on_warning
= 1;
199 __setup("traceoff_on_warning", stop_trace_on_warning
);
201 static int __init
boot_alloc_snapshot(char *str
)
203 allocate_snapshot
= true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded
= true;
208 __setup("alloc_snapshot", boot_alloc_snapshot
);
211 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
213 static int __init
set_trace_boot_options(char *str
)
215 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
218 __setup("trace_options=", set_trace_boot_options
);
220 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
221 static char *trace_boot_clock __initdata
;
223 static int __init
set_trace_boot_clock(char *str
)
225 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
226 trace_boot_clock
= trace_boot_clock_buf
;
229 __setup("trace_clock=", set_trace_boot_clock
);
231 static int __init
set_tracepoint_printk(char *str
)
233 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
234 tracepoint_printk
= 1;
237 __setup("tp_printk", set_tracepoint_printk
);
239 unsigned long long ns2usecs(u64 nsec
)
246 /* trace_flags holds trace_options default values */
247 #define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
254 /* trace_options that are only supported by global_trace */
255 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
258 /* trace_flags that are default zero for instances */
259 #define ZEROED_TRACE_FLAGS \
260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
266 static struct trace_array global_trace
= {
267 .trace_flags
= TRACE_DEFAULT_FLAGS
,
270 LIST_HEAD(ftrace_trace_arrays
);
272 int trace_array_get(struct trace_array
*this_tr
)
274 struct trace_array
*tr
;
277 mutex_lock(&trace_types_lock
);
278 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
285 mutex_unlock(&trace_types_lock
);
290 static void __trace_array_put(struct trace_array
*this_tr
)
292 WARN_ON(!this_tr
->ref
);
296 void trace_array_put(struct trace_array
*this_tr
)
298 mutex_lock(&trace_types_lock
);
299 __trace_array_put(this_tr
);
300 mutex_unlock(&trace_types_lock
);
303 int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
304 struct ring_buffer
*buffer
,
305 struct ring_buffer_event
*event
)
307 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
308 !filter_match_preds(call
->filter
, rec
)) {
309 __trace_event_discard_commit(buffer
, event
);
316 void trace_free_pid_list(struct trace_pid_list
*pid_list
)
318 vfree(pid_list
->pids
);
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
, pid_t search_pid
)
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
336 if (search_pid
>= filtered_pids
->pid_max
)
339 return test_bit(search_pid
, filtered_pids
->pids
);
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
352 trace_ignore_this_task(struct trace_pid_list
*filtered_pids
, struct task_struct
*task
)
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
361 return !trace_find_filtered_pid(filtered_pids
, task
->pid
);
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
376 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
377 struct task_struct
*self
,
378 struct task_struct
*task
)
383 /* For forks, we only add if the forking task is listed */
385 if (!trace_find_filtered_pid(pid_list
, self
->pid
))
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task
->pid
>= pid_list
->pid_max
)
393 /* "self" is set for forks, and NULL for exits */
395 set_bit(task
->pid
, pid_list
->pids
);
397 clear_bit(task
->pid
, pid_list
->pids
);
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
412 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
)
414 unsigned long pid
= (unsigned long)v
;
418 /* pid already is +1 of the actual prevous bit */
419 pid
= find_next_bit(pid_list
->pids
, pid_list
->pid_max
, pid
);
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid
< pid_list
->pid_max
)
423 return (void *)(pid
+ 1);
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
433 * This is used by seq_file "start" operation to start the iteration
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
439 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
)
444 pid
= find_first_bit(pid_list
->pids
, pid_list
->pid_max
);
445 if (pid
>= pid_list
->pid_max
)
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid
++; pid
&& l
< *pos
;
450 pid
= (unsigned long)trace_pid_next(pid_list
, (void *)pid
, &l
))
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
460 * Can be directly used by seq_file operations to display the current
463 int trace_pid_show(struct seq_file
*m
, void *v
)
465 unsigned long pid
= (unsigned long)v
- 1;
467 seq_printf(m
, "%lu\n", pid
);
471 /* 128 should be much more than enough */
472 #define PID_BUF_SIZE 127
474 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
475 struct trace_pid_list
**new_pid_list
,
476 const char __user
*ubuf
, size_t cnt
)
478 struct trace_pid_list
*pid_list
;
479 struct trace_parser parser
;
487 if (trace_parser_get_init(&parser
, PID_BUF_SIZE
+ 1))
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
496 pid_list
= kmalloc(sizeof(*pid_list
), GFP_KERNEL
);
500 pid_list
->pid_max
= READ_ONCE(pid_max
);
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids
&& filtered_pids
->pid_max
> pid_list
->pid_max
)
504 pid_list
->pid_max
= filtered_pids
->pid_max
;
506 pid_list
->pids
= vzalloc((pid_list
->pid_max
+ 7) >> 3);
507 if (!pid_list
->pids
) {
513 /* copy the current bits to the new max */
514 for_each_set_bit(pid
, filtered_pids
->pids
,
515 filtered_pids
->pid_max
) {
516 set_bit(pid
, pid_list
->pids
);
525 ret
= trace_get_user(&parser
, ubuf
, cnt
, &pos
);
526 if (ret
< 0 || !trace_parser_loaded(&parser
))
533 parser
.buffer
[parser
.idx
] = 0;
536 if (kstrtoul(parser
.buffer
, 0, &val
))
538 if (val
>= pid_list
->pid_max
)
543 set_bit(pid
, pid_list
->pids
);
546 trace_parser_clear(&parser
);
549 trace_parser_put(&parser
);
552 trace_free_pid_list(pid_list
);
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list
);
563 *new_pid_list
= pid_list
;
568 static u64
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
572 /* Early boot up does not have a buffer yet */
574 return trace_clock_local();
576 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
577 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
582 u64
ftrace_now(int cpu
)
584 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
588 * tracing_is_enabled - Show if global_trace has been disabled
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
596 int tracing_is_enabled(void)
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
604 return !global_trace
.buffer_disabled
;
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
619 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer
*trace_types __read_mostly
;
625 * trace_types_lock is used to protect the trace_types list.
627 DEFINE_MUTEX(trace_types_lock
);
630 * serialize the access of the ring buffer
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
644 * These primitives allow multi process access to different cpu ring buffer
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
652 static DECLARE_RWSEM(all_cpu_access_lock
);
653 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
655 static inline void trace_access_lock(int cpu
)
657 if (cpu
== RING_BUFFER_ALL_CPUS
) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock
);
661 /* gain it for accessing a cpu ring buffer. */
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock
);
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
671 static inline void trace_access_unlock(int cpu
)
673 if (cpu
== RING_BUFFER_ALL_CPUS
) {
674 up_write(&all_cpu_access_lock
);
676 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
677 up_read(&all_cpu_access_lock
);
681 static inline void trace_access_lock_init(void)
685 for_each_possible_cpu(cpu
)
686 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
691 static DEFINE_MUTEX(access_lock
);
693 static inline void trace_access_lock(int cpu
)
696 mutex_lock(&access_lock
);
699 static inline void trace_access_unlock(int cpu
)
702 mutex_unlock(&access_lock
);
705 static inline void trace_access_lock_init(void)
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
714 int skip
, int pc
, struct pt_regs
*regs
);
715 static inline void ftrace_trace_stack(struct trace_array
*tr
,
716 struct ring_buffer
*buffer
,
718 int skip
, int pc
, struct pt_regs
*regs
);
721 static inline void __ftrace_trace_stack(struct ring_buffer
*buffer
,
723 int skip
, int pc
, struct pt_regs
*regs
)
726 static inline void ftrace_trace_stack(struct trace_array
*tr
,
727 struct ring_buffer
*buffer
,
729 int skip
, int pc
, struct pt_regs
*regs
)
735 static __always_inline
void
736 trace_event_setup(struct ring_buffer_event
*event
,
737 int type
, unsigned long flags
, int pc
)
739 struct trace_entry
*ent
= ring_buffer_event_data(event
);
741 tracing_generic_entry_update(ent
, flags
, pc
);
745 static __always_inline
struct ring_buffer_event
*
746 __trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
749 unsigned long flags
, int pc
)
751 struct ring_buffer_event
*event
;
753 event
= ring_buffer_lock_reserve(buffer
, len
);
755 trace_event_setup(event
, type
, flags
, pc
);
760 void tracer_tracing_on(struct trace_array
*tr
)
762 if (tr
->trace_buffer
.buffer
)
763 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
772 tr
->buffer_disabled
= 0;
773 /* Make the flag seen by readers */
778 * tracing_on - enable tracing buffers
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
783 void tracing_on(void)
785 tracer_tracing_on(&global_trace
);
787 EXPORT_SYMBOL_GPL(tracing_on
);
790 static __always_inline
void
791 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
793 __this_cpu_write(trace_taskinfo_save
, true);
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event
) == event
) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer
, event
->array
[0], &event
->array
[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt
);
802 ring_buffer_unlock_commit(buffer
, event
);
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
811 int __trace_puts(unsigned long ip
, const char *str
, int size
)
813 struct ring_buffer_event
*event
;
814 struct ring_buffer
*buffer
;
815 struct print_entry
*entry
;
816 unsigned long irq_flags
;
820 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
823 pc
= preempt_count();
825 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
828 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
830 local_save_flags(irq_flags
);
831 buffer
= global_trace
.trace_buffer
.buffer
;
832 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
837 entry
= ring_buffer_event_data(event
);
840 memcpy(&entry
->buf
, str
, size
);
842 /* Add a newline if necessary */
843 if (entry
->buf
[size
- 1] != '\n') {
844 entry
->buf
[size
] = '\n';
845 entry
->buf
[size
+ 1] = '\0';
847 entry
->buf
[size
] = '\0';
849 __buffer_unlock_commit(buffer
, event
);
850 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
854 EXPORT_SYMBOL_GPL(__trace_puts
);
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
861 int __trace_bputs(unsigned long ip
, const char *str
)
863 struct ring_buffer_event
*event
;
864 struct ring_buffer
*buffer
;
865 struct bputs_entry
*entry
;
866 unsigned long irq_flags
;
867 int size
= sizeof(struct bputs_entry
);
870 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
873 pc
= preempt_count();
875 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
878 local_save_flags(irq_flags
);
879 buffer
= global_trace
.trace_buffer
.buffer
;
880 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
885 entry
= ring_buffer_event_data(event
);
889 __buffer_unlock_commit(buffer
, event
);
890 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
894 EXPORT_SYMBOL_GPL(__trace_bputs
);
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 static void tracing_snapshot_instance(struct trace_array
*tr
)
899 struct tracer
*tracer
= tr
->current_trace
;
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
908 if (!tr
->allocated_snapshot
) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer
->use_max_tr
) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 local_irq_save(flags
);
923 update_max_tr(tr
, current
, smp_processor_id());
924 local_irq_restore(flags
);
928 * trace_snapshot - take a snapshot of the current buffer.
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
941 void tracing_snapshot(void)
943 struct trace_array
*tr
= &global_trace
;
945 tracing_snapshot_instance(tr
);
947 EXPORT_SYMBOL_GPL(tracing_snapshot
);
949 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
950 struct trace_buffer
*size_buf
, int cpu_id
);
951 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
953 static int alloc_snapshot(struct trace_array
*tr
)
957 if (!tr
->allocated_snapshot
) {
959 /* allocate spare buffer */
960 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
961 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
965 tr
->allocated_snapshot
= true;
971 static void free_snapshot(struct trace_array
*tr
)
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
978 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
979 set_buffer_entries(&tr
->max_buffer
, 1);
980 tracing_reset_online_cpus(&tr
->max_buffer
);
981 tr
->allocated_snapshot
= false;
985 * tracing_alloc_snapshot - allocate snapshot buffer.
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
994 int tracing_alloc_snapshot(void)
996 struct trace_array
*tr
= &global_trace
;
999 ret
= alloc_snapshot(tr
);
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1017 void tracing_snapshot_alloc(void)
1021 ret
= tracing_alloc_snapshot();
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1029 void tracing_snapshot(void)
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1033 EXPORT_SYMBOL_GPL(tracing_snapshot
);
1034 int tracing_alloc_snapshot(void)
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1040 void tracing_snapshot_alloc(void)
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1048 void tracer_tracing_off(struct trace_array
*tr
)
1050 if (tr
->trace_buffer
.buffer
)
1051 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1060 tr
->buffer_disabled
= 1;
1061 /* Make the flag seen by readers */
1066 * tracing_off - turn off tracing buffers
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1073 void tracing_off(void)
1075 tracer_tracing_off(&global_trace
);
1077 EXPORT_SYMBOL_GPL(tracing_off
);
1079 void disable_trace_on_warning(void)
1081 if (__disable_trace_on_warning
)
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1089 * Shows real state of the ring buffer if it is enabled or not.
1091 int tracer_tracing_is_on(struct trace_array
*tr
)
1093 if (tr
->trace_buffer
.buffer
)
1094 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
1095 return !tr
->buffer_disabled
;
1099 * tracing_is_on - show state of ring buffers enabled
1101 int tracing_is_on(void)
1103 return tracer_tracing_is_on(&global_trace
);
1105 EXPORT_SYMBOL_GPL(tracing_is_on
);
1107 static int __init
set_buf_size(char *str
)
1109 unsigned long buf_size
;
1113 buf_size
= memparse(str
, &str
);
1114 /* nr_entries can not be zero */
1117 trace_buf_size
= buf_size
;
1120 __setup("trace_buf_size=", set_buf_size
);
1122 static int __init
set_tracing_thresh(char *str
)
1124 unsigned long threshold
;
1129 ret
= kstrtoul(str
, 0, &threshold
);
1132 tracing_thresh
= threshold
* 1000;
1135 __setup("tracing_thresh=", set_tracing_thresh
);
1137 unsigned long nsecs_to_usecs(unsigned long nsecs
)
1139 return nsecs
/ 1000;
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options
[] = {
1160 int in_ns
; /* is this clock in nanoseconds? */
1161 } trace_clocks
[] = {
1162 { trace_clock_local
, "local", 1 },
1163 { trace_clock_global
, "global", 1 },
1164 { trace_clock_counter
, "counter", 0 },
1165 { trace_clock_jiffies
, "uptime", 0 },
1166 { trace_clock
, "perf", 1 },
1167 { ktime_get_mono_fast_ns
, "mono", 1 },
1168 { ktime_get_raw_fast_ns
, "mono_raw", 1 },
1169 { ktime_get_boot_fast_ns
, "boot", 1 },
1174 * trace_parser_get_init - gets the buffer for trace parser
1176 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
1178 memset(parser
, 0, sizeof(*parser
));
1180 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
1181 if (!parser
->buffer
)
1184 parser
->size
= size
;
1189 * trace_parser_put - frees the buffer for trace parser
1191 void trace_parser_put(struct trace_parser
*parser
)
1193 kfree(parser
->buffer
);
1194 parser
->buffer
= NULL
;
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1204 * Returns number of bytes read.
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1208 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
1209 size_t cnt
, loff_t
*ppos
)
1216 trace_parser_clear(parser
);
1218 ret
= get_user(ch
, ubuf
++);
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1229 if (!parser
->cont
) {
1230 /* skip white space */
1231 while (cnt
&& isspace(ch
)) {
1232 ret
= get_user(ch
, ubuf
++);
1239 /* only spaces were written */
1249 /* read the non-space input */
1250 while (cnt
&& !isspace(ch
)) {
1251 if (parser
->idx
< parser
->size
- 1)
1252 parser
->buffer
[parser
->idx
++] = ch
;
1257 ret
= get_user(ch
, ubuf
++);
1264 /* We either got finished input or we have to wait for another call. */
1266 parser
->buffer
[parser
->idx
] = 0;
1267 parser
->cont
= false;
1268 } else if (parser
->idx
< parser
->size
- 1) {
1269 parser
->cont
= true;
1270 parser
->buffer
[parser
->idx
++] = ch
;
1283 /* TODO add a seq_buf_to_buffer() */
1284 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
1288 if (trace_seq_used(s
) <= s
->seq
.readpos
)
1291 len
= trace_seq_used(s
) - s
->seq
.readpos
;
1294 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
1296 s
->seq
.readpos
+= cnt
;
1300 unsigned long __read_mostly tracing_thresh
;
1302 #ifdef CONFIG_TRACER_MAX_TRACE
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1309 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1311 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
1312 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
1313 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
1314 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
1317 max_buf
->time_start
= data
->preempt_timestamp
;
1319 max_data
->saved_latency
= tr
->max_latency
;
1320 max_data
->critical_start
= data
->critical_start
;
1321 max_data
->critical_end
= data
->critical_end
;
1323 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
1324 max_data
->pid
= tsk
->pid
;
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1330 max_data
->uid
= current_uid();
1332 max_data
->uid
= task_uid(tsk
);
1334 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1335 max_data
->policy
= tsk
->policy
;
1336 max_data
->rt_priority
= tsk
->rt_priority
;
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk
);
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1352 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1354 struct ring_buffer
*buf
;
1359 WARN_ON_ONCE(!irqs_disabled());
1361 if (!tr
->allocated_snapshot
) {
1362 /* Only the nop tracer should hit this when disabling */
1363 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1367 arch_spin_lock(&tr
->max_lock
);
1369 buf
= tr
->trace_buffer
.buffer
;
1370 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1371 tr
->max_buffer
.buffer
= buf
;
1373 __update_max_tr(tr
, tsk
, cpu
);
1374 arch_spin_unlock(&tr
->max_lock
);
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1386 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1393 WARN_ON_ONCE(!irqs_disabled());
1394 if (!tr
->allocated_snapshot
) {
1395 /* Only the nop tracer should hit this when disabling */
1396 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1400 arch_spin_lock(&tr
->max_lock
);
1402 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1404 if (ret
== -EBUSY
) {
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1411 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1412 "Failed to swap buffers due to commit in progress\n");
1415 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1417 __update_max_tr(tr
, tsk
, cpu
);
1418 arch_spin_unlock(&tr
->max_lock
);
1420 #endif /* CONFIG_TRACER_MAX_TRACE */
1422 static int wait_on_pipe(struct trace_iterator
*iter
, bool full
)
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1428 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
1432 #ifdef CONFIG_FTRACE_STARTUP_TEST
1433 static bool selftests_can_run
;
1435 struct trace_selftests
{
1436 struct list_head list
;
1437 struct tracer
*type
;
1440 static LIST_HEAD(postponed_selftests
);
1442 static int save_selftest(struct tracer
*type
)
1444 struct trace_selftests
*selftest
;
1446 selftest
= kmalloc(sizeof(*selftest
), GFP_KERNEL
);
1450 selftest
->type
= type
;
1451 list_add(&selftest
->list
, &postponed_selftests
);
1455 static int run_tracer_selftest(struct tracer
*type
)
1457 struct trace_array
*tr
= &global_trace
;
1458 struct tracer
*saved_tracer
= tr
->current_trace
;
1461 if (!type
->selftest
|| tracing_selftest_disabled
)
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1469 if (!selftests_can_run
)
1470 return save_selftest(type
);
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
1479 tracing_reset_online_cpus(&tr
->trace_buffer
);
1481 tr
->current_trace
= type
;
1483 #ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type
->use_max_tr
) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded
)
1487 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1488 RING_BUFFER_ALL_CPUS
);
1489 tr
->allocated_snapshot
= true;
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type
->name
);
1495 ret
= type
->selftest(type
, tr
);
1496 /* the test is responsible for resetting too */
1497 tr
->current_trace
= saved_tracer
;
1499 printk(KERN_CONT
"FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr
->trace_buffer
);
1507 #ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type
->use_max_tr
) {
1509 tr
->allocated_snapshot
= false;
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded
)
1513 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1514 RING_BUFFER_ALL_CPUS
);
1518 printk(KERN_CONT
"PASSED\n");
1522 static __init
int init_trace_selftests(void)
1524 struct trace_selftests
*p
, *n
;
1525 struct tracer
*t
, **last
;
1528 selftests_can_run
= true;
1530 mutex_lock(&trace_types_lock
);
1532 if (list_empty(&postponed_selftests
))
1535 pr_info("Running postponed tracer tests:\n");
1537 list_for_each_entry_safe(p
, n
, &postponed_selftests
, list
) {
1538 ret
= run_tracer_selftest(p
->type
);
1539 /* If the test fails, then warn and remove from available_tracers */
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1543 last
= &trace_types
;
1544 for (t
= trace_types
; t
; t
= t
->next
) {
1557 mutex_unlock(&trace_types_lock
);
1561 core_initcall(init_trace_selftests
);
1563 static inline int run_tracer_selftest(struct tracer
*type
)
1567 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1569 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
);
1571 static void __init
apply_trace_boot_options(void);
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1577 * Register a new plugin tracer.
1579 int __init
register_tracer(struct tracer
*type
)
1585 pr_info("Tracer must have a name\n");
1589 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1594 mutex_lock(&trace_types_lock
);
1596 tracing_selftest_running
= true;
1598 for (t
= trace_types
; t
; t
= t
->next
) {
1599 if (strcmp(type
->name
, t
->name
) == 0) {
1601 pr_info("Tracer %s already registered\n",
1608 if (!type
->set_flag
)
1609 type
->set_flag
= &dummy_set_flag
;
1611 /*allocate a dummy tracer_flags*/
1612 type
->flags
= kmalloc(sizeof(*type
->flags
), GFP_KERNEL
);
1617 type
->flags
->val
= 0;
1618 type
->flags
->opts
= dummy_tracer_opt
;
1620 if (!type
->flags
->opts
)
1621 type
->flags
->opts
= dummy_tracer_opt
;
1623 /* store the tracer for __set_tracer_option */
1624 type
->flags
->trace
= type
;
1626 ret
= run_tracer_selftest(type
);
1630 type
->next
= trace_types
;
1632 add_tracer_options(&global_trace
, type
);
1635 tracing_selftest_running
= false;
1636 mutex_unlock(&trace_types_lock
);
1638 if (ret
|| !default_bootup_tracer
)
1641 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1644 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1645 /* Do we want this tracer to start on bootup? */
1646 tracing_set_tracer(&global_trace
, type
->name
);
1647 default_bootup_tracer
= NULL
;
1649 apply_trace_boot_options();
1651 /* disable other selftests, since this will break it. */
1652 tracing_selftest_disabled
= true;
1653 #ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1662 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1664 struct ring_buffer
*buffer
= buf
->buffer
;
1669 ring_buffer_record_disable(buffer
);
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
1673 ring_buffer_reset_cpu(buffer
, cpu
);
1675 ring_buffer_record_enable(buffer
);
1678 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1680 struct ring_buffer
*buffer
= buf
->buffer
;
1686 ring_buffer_record_disable(buffer
);
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1691 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1693 for_each_online_cpu(cpu
)
1694 ring_buffer_reset_cpu(buffer
, cpu
);
1696 ring_buffer_record_enable(buffer
);
1699 /* Must have trace_types_lock held */
1700 void tracing_reset_all_online_cpus(void)
1702 struct trace_array
*tr
;
1704 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1705 tracing_reset_online_cpus(&tr
->trace_buffer
);
1706 #ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr
->max_buffer
);
1712 static int *tgid_map
;
1714 #define SAVED_CMDLINES_DEFAULT 128
1715 #define NO_CMDLINE_MAP UINT_MAX
1716 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1717 struct saved_cmdlines_buffer
{
1718 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1719 unsigned *map_cmdline_to_pid
;
1720 unsigned cmdline_num
;
1722 char *saved_cmdlines
;
1724 static struct saved_cmdlines_buffer
*savedcmd
;
1726 /* temporary disable recording */
1727 static atomic_t trace_record_taskinfo_disabled __read_mostly
;
1729 static inline char *get_saved_cmdlines(int idx
)
1731 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1734 static inline void set_cmdline(int idx
, const char *cmdline
)
1736 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1739 static int allocate_cmdlines_buffer(unsigned int val
,
1740 struct saved_cmdlines_buffer
*s
)
1742 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1744 if (!s
->map_cmdline_to_pid
)
1747 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1748 if (!s
->saved_cmdlines
) {
1749 kfree(s
->map_cmdline_to_pid
);
1754 s
->cmdline_num
= val
;
1755 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1756 sizeof(s
->map_pid_to_cmdline
));
1757 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1758 val
* sizeof(*s
->map_cmdline_to_pid
));
1763 static int trace_create_savedcmd(void)
1767 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1771 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1781 int is_tracing_stopped(void)
1783 return global_trace
.stop_count
;
1787 * tracing_start - quick start of the tracer
1789 * If tracing is enabled but was stopped by tracing_stop,
1790 * this will start the tracer back up.
1792 void tracing_start(void)
1794 struct ring_buffer
*buffer
;
1795 unsigned long flags
;
1797 if (tracing_disabled
)
1800 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1801 if (--global_trace
.stop_count
) {
1802 if (global_trace
.stop_count
< 0) {
1803 /* Someone screwed up their debugging */
1805 global_trace
.stop_count
= 0;
1810 /* Prevent the buffers from switching */
1811 arch_spin_lock(&global_trace
.max_lock
);
1813 buffer
= global_trace
.trace_buffer
.buffer
;
1815 ring_buffer_record_enable(buffer
);
1817 #ifdef CONFIG_TRACER_MAX_TRACE
1818 buffer
= global_trace
.max_buffer
.buffer
;
1820 ring_buffer_record_enable(buffer
);
1823 arch_spin_unlock(&global_trace
.max_lock
);
1826 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1829 static void tracing_start_tr(struct trace_array
*tr
)
1831 struct ring_buffer
*buffer
;
1832 unsigned long flags
;
1834 if (tracing_disabled
)
1837 /* If global, we need to also start the max tracer */
1838 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1839 return tracing_start();
1841 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1843 if (--tr
->stop_count
) {
1844 if (tr
->stop_count
< 0) {
1845 /* Someone screwed up their debugging */
1852 buffer
= tr
->trace_buffer
.buffer
;
1854 ring_buffer_record_enable(buffer
);
1857 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1861 * tracing_stop - quick stop of the tracer
1863 * Light weight way to stop tracing. Use in conjunction with
1866 void tracing_stop(void)
1868 struct ring_buffer
*buffer
;
1869 unsigned long flags
;
1871 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1872 if (global_trace
.stop_count
++)
1875 /* Prevent the buffers from switching */
1876 arch_spin_lock(&global_trace
.max_lock
);
1878 buffer
= global_trace
.trace_buffer
.buffer
;
1880 ring_buffer_record_disable(buffer
);
1882 #ifdef CONFIG_TRACER_MAX_TRACE
1883 buffer
= global_trace
.max_buffer
.buffer
;
1885 ring_buffer_record_disable(buffer
);
1888 arch_spin_unlock(&global_trace
.max_lock
);
1891 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1894 static void tracing_stop_tr(struct trace_array
*tr
)
1896 struct ring_buffer
*buffer
;
1897 unsigned long flags
;
1899 /* If global, we need to also stop the max tracer */
1900 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1901 return tracing_stop();
1903 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1904 if (tr
->stop_count
++)
1907 buffer
= tr
->trace_buffer
.buffer
;
1909 ring_buffer_record_disable(buffer
);
1912 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1915 static int trace_save_cmdline(struct task_struct
*tsk
)
1919 /* treat recording of idle task as a success */
1923 if (unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1927 * It's not the end of the world if we don't get
1928 * the lock, but we also don't want to spin
1929 * nor do we want to disable interrupts,
1930 * so if we miss here, then better luck next time.
1932 if (!arch_spin_trylock(&trace_cmdline_lock
))
1935 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1936 if (idx
== NO_CMDLINE_MAP
) {
1937 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1940 * Check whether the cmdline buffer at idx has a pid
1941 * mapped. We are going to overwrite that entry so we
1942 * need to clear the map_pid_to_cmdline. Otherwise we
1943 * would read the new comm for the old pid.
1945 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1946 if (pid
!= NO_CMDLINE_MAP
)
1947 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1949 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1950 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1952 savedcmd
->cmdline_idx
= idx
;
1955 set_cmdline(idx
, tsk
->comm
);
1957 arch_spin_unlock(&trace_cmdline_lock
);
1962 static void __trace_find_cmdline(int pid
, char comm
[])
1967 strcpy(comm
, "<idle>");
1971 if (WARN_ON_ONCE(pid
< 0)) {
1972 strcpy(comm
, "<XXX>");
1976 if (pid
> PID_MAX_DEFAULT
) {
1977 strcpy(comm
, "<...>");
1981 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1982 if (map
!= NO_CMDLINE_MAP
)
1983 strlcpy(comm
, get_saved_cmdlines(map
), TASK_COMM_LEN
);
1985 strcpy(comm
, "<...>");
1988 void trace_find_cmdline(int pid
, char comm
[])
1991 arch_spin_lock(&trace_cmdline_lock
);
1993 __trace_find_cmdline(pid
, comm
);
1995 arch_spin_unlock(&trace_cmdline_lock
);
1999 int trace_find_tgid(int pid
)
2001 if (unlikely(!tgid_map
|| !pid
|| pid
> PID_MAX_DEFAULT
))
2004 return tgid_map
[pid
];
2007 static int trace_save_tgid(struct task_struct
*tsk
)
2009 /* treat recording of idle task as a success */
2013 if (unlikely(!tgid_map
|| tsk
->pid
> PID_MAX_DEFAULT
))
2016 tgid_map
[tsk
->pid
] = tsk
->tgid
;
2020 static bool tracing_record_taskinfo_skip(int flags
)
2022 if (unlikely(!(flags
& (TRACE_RECORD_CMDLINE
| TRACE_RECORD_TGID
))))
2024 if (atomic_read(&trace_record_taskinfo_disabled
) || !tracing_is_on())
2026 if (!__this_cpu_read(trace_taskinfo_save
))
2032 * tracing_record_taskinfo - record the task info of a task
2034 * @task - task to record
2035 * @flags - TRACE_RECORD_CMDLINE for recording comm
2036 * - TRACE_RECORD_TGID for recording tgid
2038 void tracing_record_taskinfo(struct task_struct
*task
, int flags
)
2042 if (tracing_record_taskinfo_skip(flags
))
2046 * Record as much task information as possible. If some fail, continue
2047 * to try to record the others.
2049 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(task
);
2050 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(task
);
2052 /* If recording any information failed, retry again soon. */
2056 __this_cpu_write(trace_taskinfo_save
, false);
2060 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2062 * @prev - previous task during sched_switch
2063 * @next - next task during sched_switch
2064 * @flags - TRACE_RECORD_CMDLINE for recording comm
2065 * TRACE_RECORD_TGID for recording tgid
2067 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
2068 struct task_struct
*next
, int flags
)
2072 if (tracing_record_taskinfo_skip(flags
))
2076 * Record as much task information as possible. If some fail, continue
2077 * to try to record the others.
2079 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(prev
);
2080 done
&= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(next
);
2081 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(prev
);
2082 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(next
);
2084 /* If recording any information failed, retry again soon. */
2088 __this_cpu_write(trace_taskinfo_save
, false);
2091 /* Helpers to record a specific task information */
2092 void tracing_record_cmdline(struct task_struct
*task
)
2094 tracing_record_taskinfo(task
, TRACE_RECORD_CMDLINE
);
2097 void tracing_record_tgid(struct task_struct
*task
)
2099 tracing_record_taskinfo(task
, TRACE_RECORD_TGID
);
2103 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2104 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2105 * simplifies those functions and keeps them in sync.
2107 enum print_line_t
trace_handle_return(struct trace_seq
*s
)
2109 return trace_seq_has_overflowed(s
) ?
2110 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
2112 EXPORT_SYMBOL_GPL(trace_handle_return
);
2115 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
2118 struct task_struct
*tsk
= current
;
2120 entry
->preempt_count
= pc
& 0xff;
2121 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
2123 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2124 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
2126 TRACE_FLAG_IRQS_NOSUPPORT
|
2128 ((pc
& NMI_MASK
) ? TRACE_FLAG_NMI
: 0) |
2129 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
2130 ((pc
& SOFTIRQ_OFFSET
) ? TRACE_FLAG_SOFTIRQ
: 0) |
2131 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
2132 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
2134 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
2136 struct ring_buffer_event
*
2137 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
2140 unsigned long flags
, int pc
)
2142 return __trace_buffer_lock_reserve(buffer
, type
, len
, flags
, pc
);
2145 DEFINE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
2146 DEFINE_PER_CPU(int, trace_buffered_event_cnt
);
2147 static int trace_buffered_event_ref
;
2150 * trace_buffered_event_enable - enable buffering events
2152 * When events are being filtered, it is quicker to use a temporary
2153 * buffer to write the event data into if there's a likely chance
2154 * that it will not be committed. The discard of the ring buffer
2155 * is not as fast as committing, and is much slower than copying
2158 * When an event is to be filtered, allocate per cpu buffers to
2159 * write the event data into, and if the event is filtered and discarded
2160 * it is simply dropped, otherwise, the entire data is to be committed
2163 void trace_buffered_event_enable(void)
2165 struct ring_buffer_event
*event
;
2169 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2171 if (trace_buffered_event_ref
++)
2174 for_each_tracing_cpu(cpu
) {
2175 page
= alloc_pages_node(cpu_to_node(cpu
),
2176 GFP_KERNEL
| __GFP_NORETRY
, 0);
2180 event
= page_address(page
);
2181 memset(event
, 0, sizeof(*event
));
2183 per_cpu(trace_buffered_event
, cpu
) = event
;
2186 if (cpu
== smp_processor_id() &&
2187 this_cpu_read(trace_buffered_event
) !=
2188 per_cpu(trace_buffered_event
, cpu
))
2195 trace_buffered_event_disable();
2198 static void enable_trace_buffered_event(void *data
)
2200 /* Probably not needed, but do it anyway */
2202 this_cpu_dec(trace_buffered_event_cnt
);
2205 static void disable_trace_buffered_event(void *data
)
2207 this_cpu_inc(trace_buffered_event_cnt
);
2211 * trace_buffered_event_disable - disable buffering events
2213 * When a filter is removed, it is faster to not use the buffered
2214 * events, and to commit directly into the ring buffer. Free up
2215 * the temp buffers when there are no more users. This requires
2216 * special synchronization with current events.
2218 void trace_buffered_event_disable(void)
2222 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2224 if (WARN_ON_ONCE(!trace_buffered_event_ref
))
2227 if (--trace_buffered_event_ref
)
2231 /* For each CPU, set the buffer as used. */
2232 smp_call_function_many(tracing_buffer_mask
,
2233 disable_trace_buffered_event
, NULL
, 1);
2236 /* Wait for all current users to finish */
2237 synchronize_sched();
2239 for_each_tracing_cpu(cpu
) {
2240 free_page((unsigned long)per_cpu(trace_buffered_event
, cpu
));
2241 per_cpu(trace_buffered_event
, cpu
) = NULL
;
2244 * Make sure trace_buffered_event is NULL before clearing
2245 * trace_buffered_event_cnt.
2250 /* Do the work on each cpu */
2251 smp_call_function_many(tracing_buffer_mask
,
2252 enable_trace_buffered_event
, NULL
, 1);
2256 static struct ring_buffer
*temp_buffer
;
2258 struct ring_buffer_event
*
2259 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
2260 struct trace_event_file
*trace_file
,
2261 int type
, unsigned long len
,
2262 unsigned long flags
, int pc
)
2264 struct ring_buffer_event
*entry
;
2267 *current_rb
= trace_file
->tr
->trace_buffer
.buffer
;
2269 if ((trace_file
->flags
&
2270 (EVENT_FILE_FL_SOFT_DISABLED
| EVENT_FILE_FL_FILTERED
)) &&
2271 (entry
= this_cpu_read(trace_buffered_event
))) {
2272 /* Try to use the per cpu buffer first */
2273 val
= this_cpu_inc_return(trace_buffered_event_cnt
);
2275 trace_event_setup(entry
, type
, flags
, pc
);
2276 entry
->array
[0] = len
;
2279 this_cpu_dec(trace_buffered_event_cnt
);
2282 entry
= __trace_buffer_lock_reserve(*current_rb
,
2283 type
, len
, flags
, pc
);
2285 * If tracing is off, but we have triggers enabled
2286 * we still need to look at the event data. Use the temp_buffer
2287 * to store the trace event for the tigger to use. It's recusive
2288 * safe and will not be recorded anywhere.
2290 if (!entry
&& trace_file
->flags
& EVENT_FILE_FL_TRIGGER_COND
) {
2291 *current_rb
= temp_buffer
;
2292 entry
= __trace_buffer_lock_reserve(*current_rb
,
2293 type
, len
, flags
, pc
);
2297 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
2299 static DEFINE_SPINLOCK(tracepoint_iter_lock
);
2300 static DEFINE_MUTEX(tracepoint_printk_mutex
);
2302 static void output_printk(struct trace_event_buffer
*fbuffer
)
2304 struct trace_event_call
*event_call
;
2305 struct trace_event
*event
;
2306 unsigned long flags
;
2307 struct trace_iterator
*iter
= tracepoint_print_iter
;
2309 /* We should never get here if iter is NULL */
2310 if (WARN_ON_ONCE(!iter
))
2313 event_call
= fbuffer
->trace_file
->event_call
;
2314 if (!event_call
|| !event_call
->event
.funcs
||
2315 !event_call
->event
.funcs
->trace
)
2318 event
= &fbuffer
->trace_file
->event_call
->event
;
2320 spin_lock_irqsave(&tracepoint_iter_lock
, flags
);
2321 trace_seq_init(&iter
->seq
);
2322 iter
->ent
= fbuffer
->entry
;
2323 event_call
->event
.funcs
->trace(iter
, 0, event
);
2324 trace_seq_putc(&iter
->seq
, 0);
2325 printk("%s", iter
->seq
.buffer
);
2327 spin_unlock_irqrestore(&tracepoint_iter_lock
, flags
);
2330 int tracepoint_printk_sysctl(struct ctl_table
*table
, int write
,
2331 void __user
*buffer
, size_t *lenp
,
2334 int save_tracepoint_printk
;
2337 mutex_lock(&tracepoint_printk_mutex
);
2338 save_tracepoint_printk
= tracepoint_printk
;
2340 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
2343 * This will force exiting early, as tracepoint_printk
2344 * is always zero when tracepoint_printk_iter is not allocated
2346 if (!tracepoint_print_iter
)
2347 tracepoint_printk
= 0;
2349 if (save_tracepoint_printk
== tracepoint_printk
)
2352 if (tracepoint_printk
)
2353 static_key_enable(&tracepoint_printk_key
.key
);
2355 static_key_disable(&tracepoint_printk_key
.key
);
2358 mutex_unlock(&tracepoint_printk_mutex
);
2363 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
)
2365 if (static_key_false(&tracepoint_printk_key
.key
))
2366 output_printk(fbuffer
);
2368 event_trigger_unlock_commit(fbuffer
->trace_file
, fbuffer
->buffer
,
2369 fbuffer
->event
, fbuffer
->entry
,
2370 fbuffer
->flags
, fbuffer
->pc
);
2372 EXPORT_SYMBOL_GPL(trace_event_buffer_commit
);
2374 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
2375 struct ring_buffer
*buffer
,
2376 struct ring_buffer_event
*event
,
2377 unsigned long flags
, int pc
,
2378 struct pt_regs
*regs
)
2380 __buffer_unlock_commit(buffer
, event
);
2383 * If regs is not set, then skip the following callers:
2384 * trace_buffer_unlock_commit_regs
2385 * event_trigger_unlock_commit
2386 * trace_event_buffer_commit
2387 * trace_event_raw_event_sched_switch
2388 * Note, we can still get here via blktrace, wakeup tracer
2389 * and mmiotrace, but that's ok if they lose a function or
2390 * two. They are that meaningful.
2392 ftrace_trace_stack(tr
, buffer
, flags
, regs
? 0 : 4, pc
, regs
);
2393 ftrace_trace_userstack(buffer
, flags
, pc
);
2397 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2400 trace_buffer_unlock_commit_nostack(struct ring_buffer
*buffer
,
2401 struct ring_buffer_event
*event
)
2403 __buffer_unlock_commit(buffer
, event
);
2407 trace_process_export(struct trace_export
*export
,
2408 struct ring_buffer_event
*event
)
2410 struct trace_entry
*entry
;
2411 unsigned int size
= 0;
2413 entry
= ring_buffer_event_data(event
);
2414 size
= ring_buffer_event_length(event
);
2415 export
->write(entry
, size
);
2418 static DEFINE_MUTEX(ftrace_export_lock
);
2420 static struct trace_export __rcu
*ftrace_exports_list __read_mostly
;
2422 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled
);
2424 static inline void ftrace_exports_enable(void)
2426 static_branch_enable(&ftrace_exports_enabled
);
2429 static inline void ftrace_exports_disable(void)
2431 static_branch_disable(&ftrace_exports_enabled
);
2434 void ftrace_exports(struct ring_buffer_event
*event
)
2436 struct trace_export
*export
;
2438 preempt_disable_notrace();
2440 export
= rcu_dereference_raw_notrace(ftrace_exports_list
);
2442 trace_process_export(export
, event
);
2443 export
= rcu_dereference_raw_notrace(export
->next
);
2446 preempt_enable_notrace();
2450 add_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2452 rcu_assign_pointer(export
->next
, *list
);
2454 * We are entering export into the list but another
2455 * CPU might be walking that list. We need to make sure
2456 * the export->next pointer is valid before another CPU sees
2457 * the export pointer included into the list.
2459 rcu_assign_pointer(*list
, export
);
2463 rm_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2465 struct trace_export
**p
;
2467 for (p
= list
; *p
!= NULL
; p
= &(*p
)->next
)
2474 rcu_assign_pointer(*p
, (*p
)->next
);
2480 add_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2483 ftrace_exports_enable();
2485 add_trace_export(list
, export
);
2489 rm_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2493 ret
= rm_trace_export(list
, export
);
2495 ftrace_exports_disable();
2500 int register_ftrace_export(struct trace_export
*export
)
2502 if (WARN_ON_ONCE(!export
->write
))
2505 mutex_lock(&ftrace_export_lock
);
2507 add_ftrace_export(&ftrace_exports_list
, export
);
2509 mutex_unlock(&ftrace_export_lock
);
2513 EXPORT_SYMBOL_GPL(register_ftrace_export
);
2515 int unregister_ftrace_export(struct trace_export
*export
)
2519 mutex_lock(&ftrace_export_lock
);
2521 ret
= rm_ftrace_export(&ftrace_exports_list
, export
);
2523 mutex_unlock(&ftrace_export_lock
);
2527 EXPORT_SYMBOL_GPL(unregister_ftrace_export
);
2530 trace_function(struct trace_array
*tr
,
2531 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
2534 struct trace_event_call
*call
= &event_function
;
2535 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2536 struct ring_buffer_event
*event
;
2537 struct ftrace_entry
*entry
;
2539 event
= __trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
2543 entry
= ring_buffer_event_data(event
);
2545 entry
->parent_ip
= parent_ip
;
2547 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2548 if (static_branch_unlikely(&ftrace_exports_enabled
))
2549 ftrace_exports(event
);
2550 __buffer_unlock_commit(buffer
, event
);
2554 #ifdef CONFIG_STACKTRACE
2556 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2557 struct ftrace_stack
{
2558 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
2561 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
2562 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
2564 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
2565 unsigned long flags
,
2566 int skip
, int pc
, struct pt_regs
*regs
)
2568 struct trace_event_call
*call
= &event_kernel_stack
;
2569 struct ring_buffer_event
*event
;
2570 struct stack_entry
*entry
;
2571 struct stack_trace trace
;
2573 int size
= FTRACE_STACK_ENTRIES
;
2575 trace
.nr_entries
= 0;
2579 * Add two, for this function and the call to save_stack_trace()
2580 * If regs is set, then these functions will not be in the way.
2586 * Since events can happen in NMIs there's no safe way to
2587 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2588 * or NMI comes in, it will just have to use the default
2589 * FTRACE_STACK_SIZE.
2591 preempt_disable_notrace();
2593 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
2595 * We don't need any atomic variables, just a barrier.
2596 * If an interrupt comes in, we don't care, because it would
2597 * have exited and put the counter back to what we want.
2598 * We just need a barrier to keep gcc from moving things
2602 if (use_stack
== 1) {
2603 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
2604 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
2607 save_stack_trace_regs(regs
, &trace
);
2609 save_stack_trace(&trace
);
2611 if (trace
.nr_entries
> size
)
2612 size
= trace
.nr_entries
;
2614 /* From now on, use_stack is a boolean */
2617 size
*= sizeof(unsigned long);
2619 event
= __trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
2620 sizeof(*entry
) + size
, flags
, pc
);
2623 entry
= ring_buffer_event_data(event
);
2625 memset(&entry
->caller
, 0, size
);
2628 memcpy(&entry
->caller
, trace
.entries
,
2629 trace
.nr_entries
* sizeof(unsigned long));
2631 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2632 trace
.entries
= entry
->caller
;
2634 save_stack_trace_regs(regs
, &trace
);
2636 save_stack_trace(&trace
);
2639 entry
->size
= trace
.nr_entries
;
2641 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2642 __buffer_unlock_commit(buffer
, event
);
2645 /* Again, don't let gcc optimize things here */
2647 __this_cpu_dec(ftrace_stack_reserve
);
2648 preempt_enable_notrace();
2652 static inline void ftrace_trace_stack(struct trace_array
*tr
,
2653 struct ring_buffer
*buffer
,
2654 unsigned long flags
,
2655 int skip
, int pc
, struct pt_regs
*regs
)
2657 if (!(tr
->trace_flags
& TRACE_ITER_STACKTRACE
))
2660 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
2663 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
2666 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2668 if (rcu_is_watching()) {
2669 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2674 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2675 * but if the above rcu_is_watching() failed, then the NMI
2676 * triggered someplace critical, and rcu_irq_enter() should
2677 * not be called from NMI.
2679 if (unlikely(in_nmi()))
2683 * It is possible that a function is being traced in a
2684 * location that RCU is not watching. A call to
2685 * rcu_irq_enter() will make sure that it is, but there's
2686 * a few internal rcu functions that could be traced
2687 * where that wont work either. In those cases, we just
2690 if (unlikely(rcu_irq_enter_disabled()))
2693 rcu_irq_enter_irqson();
2694 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2695 rcu_irq_exit_irqson();
2699 * trace_dump_stack - record a stack back trace in the trace buffer
2700 * @skip: Number of functions to skip (helper handlers)
2702 void trace_dump_stack(int skip
)
2704 unsigned long flags
;
2706 if (tracing_disabled
|| tracing_selftest_running
)
2709 local_save_flags(flags
);
2712 * Skip 3 more, seems to get us at the caller of
2716 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
2717 flags
, skip
, preempt_count(), NULL
);
2720 static DEFINE_PER_CPU(int, user_stack_count
);
2723 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
2725 struct trace_event_call
*call
= &event_user_stack
;
2726 struct ring_buffer_event
*event
;
2727 struct userstack_entry
*entry
;
2728 struct stack_trace trace
;
2730 if (!(global_trace
.trace_flags
& TRACE_ITER_USERSTACKTRACE
))
2734 * NMIs can not handle page faults, even with fix ups.
2735 * The save user stack can (and often does) fault.
2737 if (unlikely(in_nmi()))
2741 * prevent recursion, since the user stack tracing may
2742 * trigger other kernel events.
2745 if (__this_cpu_read(user_stack_count
))
2748 __this_cpu_inc(user_stack_count
);
2750 event
= __trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
2751 sizeof(*entry
), flags
, pc
);
2753 goto out_drop_count
;
2754 entry
= ring_buffer_event_data(event
);
2756 entry
->tgid
= current
->tgid
;
2757 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
2759 trace
.nr_entries
= 0;
2760 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2762 trace
.entries
= entry
->caller
;
2764 save_stack_trace_user(&trace
);
2765 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2766 __buffer_unlock_commit(buffer
, event
);
2769 __this_cpu_dec(user_stack_count
);
2775 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
2777 ftrace_trace_userstack(tr
, flags
, preempt_count());
2781 #endif /* CONFIG_STACKTRACE */
2783 /* created for use with alloc_percpu */
2784 struct trace_buffer_struct
{
2786 char buffer
[4][TRACE_BUF_SIZE
];
2789 static struct trace_buffer_struct
*trace_percpu_buffer
;
2792 * Thise allows for lockless recording. If we're nested too deeply, then
2793 * this returns NULL.
2795 static char *get_trace_buf(void)
2797 struct trace_buffer_struct
*buffer
= this_cpu_ptr(trace_percpu_buffer
);
2799 if (!buffer
|| buffer
->nesting
>= 4)
2804 /* Interrupts must see nesting incremented before we use the buffer */
2806 return &buffer
->buffer
[buffer
->nesting
][0];
2809 static void put_trace_buf(void)
2811 /* Don't let the decrement of nesting leak before this */
2813 this_cpu_dec(trace_percpu_buffer
->nesting
);
2816 static int alloc_percpu_trace_buffer(void)
2818 struct trace_buffer_struct
*buffers
;
2820 buffers
= alloc_percpu(struct trace_buffer_struct
);
2821 if (WARN(!buffers
, "Could not allocate percpu trace_printk buffer"))
2824 trace_percpu_buffer
= buffers
;
2828 static int buffers_allocated
;
2830 void trace_printk_init_buffers(void)
2832 if (buffers_allocated
)
2835 if (alloc_percpu_trace_buffer())
2838 /* trace_printk() is for debug use only. Don't use it in production. */
2841 pr_warn("**********************************************************\n");
2842 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2844 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2846 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2847 pr_warn("** unsafe for production use. **\n");
2849 pr_warn("** If you see this message and you are not debugging **\n");
2850 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2852 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2853 pr_warn("**********************************************************\n");
2855 /* Expand the buffers to set size */
2856 tracing_update_buffers();
2858 buffers_allocated
= 1;
2861 * trace_printk_init_buffers() can be called by modules.
2862 * If that happens, then we need to start cmdline recording
2863 * directly here. If the global_trace.buffer is already
2864 * allocated here, then this was called by module code.
2866 if (global_trace
.trace_buffer
.buffer
)
2867 tracing_start_cmdline_record();
2870 void trace_printk_start_comm(void)
2872 /* Start tracing comms if trace printk is set */
2873 if (!buffers_allocated
)
2875 tracing_start_cmdline_record();
2878 static void trace_printk_start_stop_comm(int enabled
)
2880 if (!buffers_allocated
)
2884 tracing_start_cmdline_record();
2886 tracing_stop_cmdline_record();
2890 * trace_vbprintk - write binary msg to tracing buffer
2893 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2895 struct trace_event_call
*call
= &event_bprint
;
2896 struct ring_buffer_event
*event
;
2897 struct ring_buffer
*buffer
;
2898 struct trace_array
*tr
= &global_trace
;
2899 struct bprint_entry
*entry
;
2900 unsigned long flags
;
2902 int len
= 0, size
, pc
;
2904 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2907 /* Don't pollute graph traces with trace_vprintk internals */
2908 pause_graph_tracing();
2910 pc
= preempt_count();
2911 preempt_disable_notrace();
2913 tbuffer
= get_trace_buf();
2919 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2921 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2924 local_save_flags(flags
);
2925 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2926 buffer
= tr
->trace_buffer
.buffer
;
2927 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2931 entry
= ring_buffer_event_data(event
);
2935 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2936 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2937 __buffer_unlock_commit(buffer
, event
);
2938 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
2945 preempt_enable_notrace();
2946 unpause_graph_tracing();
2950 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2953 __trace_array_vprintk(struct ring_buffer
*buffer
,
2954 unsigned long ip
, const char *fmt
, va_list args
)
2956 struct trace_event_call
*call
= &event_print
;
2957 struct ring_buffer_event
*event
;
2958 int len
= 0, size
, pc
;
2959 struct print_entry
*entry
;
2960 unsigned long flags
;
2963 if (tracing_disabled
|| tracing_selftest_running
)
2966 /* Don't pollute graph traces with trace_vprintk internals */
2967 pause_graph_tracing();
2969 pc
= preempt_count();
2970 preempt_disable_notrace();
2973 tbuffer
= get_trace_buf();
2979 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2981 local_save_flags(flags
);
2982 size
= sizeof(*entry
) + len
+ 1;
2983 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2987 entry
= ring_buffer_event_data(event
);
2990 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
2991 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2992 __buffer_unlock_commit(buffer
, event
);
2993 ftrace_trace_stack(&global_trace
, buffer
, flags
, 6, pc
, NULL
);
3000 preempt_enable_notrace();
3001 unpause_graph_tracing();
3006 int trace_array_vprintk(struct trace_array
*tr
,
3007 unsigned long ip
, const char *fmt
, va_list args
)
3009 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
3012 int trace_array_printk(struct trace_array
*tr
,
3013 unsigned long ip
, const char *fmt
, ...)
3018 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3022 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
3027 int trace_array_printk_buf(struct ring_buffer
*buffer
,
3028 unsigned long ip
, const char *fmt
, ...)
3033 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3037 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
3042 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
3044 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
3046 EXPORT_SYMBOL_GPL(trace_vprintk
);
3048 static void trace_iterator_increment(struct trace_iterator
*iter
)
3050 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
3054 ring_buffer_read(buf_iter
, NULL
);
3057 static struct trace_entry
*
3058 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
3059 unsigned long *lost_events
)
3061 struct ring_buffer_event
*event
;
3062 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
3065 event
= ring_buffer_iter_peek(buf_iter
, ts
);
3067 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
3071 iter
->ent_size
= ring_buffer_event_length(event
);
3072 return ring_buffer_event_data(event
);
3078 static struct trace_entry
*
3079 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
3080 unsigned long *missing_events
, u64
*ent_ts
)
3082 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
3083 struct trace_entry
*ent
, *next
= NULL
;
3084 unsigned long lost_events
= 0, next_lost
= 0;
3085 int cpu_file
= iter
->cpu_file
;
3086 u64 next_ts
= 0, ts
;
3092 * If we are in a per_cpu trace file, don't bother by iterating over
3093 * all cpu and peek directly.
3095 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
3096 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
3098 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
3100 *ent_cpu
= cpu_file
;
3105 for_each_tracing_cpu(cpu
) {
3107 if (ring_buffer_empty_cpu(buffer
, cpu
))
3110 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
3113 * Pick the entry with the smallest timestamp:
3115 if (ent
&& (!next
|| ts
< next_ts
)) {
3119 next_lost
= lost_events
;
3120 next_size
= iter
->ent_size
;
3124 iter
->ent_size
= next_size
;
3127 *ent_cpu
= next_cpu
;
3133 *missing_events
= next_lost
;
3138 /* Find the next real entry, without updating the iterator itself */
3139 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
3140 int *ent_cpu
, u64
*ent_ts
)
3142 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
3145 /* Find the next real entry, and increment the iterator to the next entry */
3146 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
3148 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
3149 &iter
->lost_events
, &iter
->ts
);
3152 trace_iterator_increment(iter
);
3154 return iter
->ent
? iter
: NULL
;
3157 static void trace_consume(struct trace_iterator
*iter
)
3159 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
3160 &iter
->lost_events
);
3163 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3165 struct trace_iterator
*iter
= m
->private;
3169 WARN_ON_ONCE(iter
->leftover
);
3173 /* can't go backwards */
3178 ent
= trace_find_next_entry_inc(iter
);
3182 while (ent
&& iter
->idx
< i
)
3183 ent
= trace_find_next_entry_inc(iter
);
3190 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
3192 struct ring_buffer_event
*event
;
3193 struct ring_buffer_iter
*buf_iter
;
3194 unsigned long entries
= 0;
3197 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
3199 buf_iter
= trace_buffer_iter(iter
, cpu
);
3203 ring_buffer_iter_reset(buf_iter
);
3206 * We could have the case with the max latency tracers
3207 * that a reset never took place on a cpu. This is evident
3208 * by the timestamp being before the start of the buffer.
3210 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
3211 if (ts
>= iter
->trace_buffer
->time_start
)
3214 ring_buffer_read(buf_iter
, NULL
);
3217 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
3221 * The current tracer is copied to avoid a global locking
3224 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3226 struct trace_iterator
*iter
= m
->private;
3227 struct trace_array
*tr
= iter
->tr
;
3228 int cpu_file
= iter
->cpu_file
;
3234 * copy the tracer to avoid using a global lock all around.
3235 * iter->trace is a copy of current_trace, the pointer to the
3236 * name may be used instead of a strcmp(), as iter->trace->name
3237 * will point to the same string as current_trace->name.
3239 mutex_lock(&trace_types_lock
);
3240 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
3241 *iter
->trace
= *tr
->current_trace
;
3242 mutex_unlock(&trace_types_lock
);
3244 #ifdef CONFIG_TRACER_MAX_TRACE
3245 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3246 return ERR_PTR(-EBUSY
);
3249 if (!iter
->snapshot
)
3250 atomic_inc(&trace_record_taskinfo_disabled
);
3252 if (*pos
!= iter
->pos
) {
3257 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
3258 for_each_tracing_cpu(cpu
)
3259 tracing_iter_reset(iter
, cpu
);
3261 tracing_iter_reset(iter
, cpu_file
);
3264 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
3269 * If we overflowed the seq_file before, then we want
3270 * to just reuse the trace_seq buffer again.
3276 p
= s_next(m
, p
, &l
);
3280 trace_event_read_lock();
3281 trace_access_lock(cpu_file
);
3285 static void s_stop(struct seq_file
*m
, void *p
)
3287 struct trace_iterator
*iter
= m
->private;
3289 #ifdef CONFIG_TRACER_MAX_TRACE
3290 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3294 if (!iter
->snapshot
)
3295 atomic_dec(&trace_record_taskinfo_disabled
);
3297 trace_access_unlock(iter
->cpu_file
);
3298 trace_event_read_unlock();
3302 get_total_entries(struct trace_buffer
*buf
,
3303 unsigned long *total
, unsigned long *entries
)
3305 unsigned long count
;
3311 for_each_tracing_cpu(cpu
) {
3312 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
3314 * If this buffer has skipped entries, then we hold all
3315 * entries for the trace and we need to ignore the
3316 * ones before the time stamp.
3318 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
3319 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
3320 /* total is the same as the entries */
3324 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
3329 static void print_lat_help_header(struct seq_file
*m
)
3331 seq_puts(m
, "# _------=> CPU# \n"
3332 "# / _-----=> irqs-off \n"
3333 "# | / _----=> need-resched \n"
3334 "# || / _---=> hardirq/softirq \n"
3335 "# ||| / _--=> preempt-depth \n"
3337 "# cmd pid ||||| time | caller \n"
3338 "# \\ / ||||| \\ | / \n");
3341 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
3343 unsigned long total
;
3344 unsigned long entries
;
3346 get_total_entries(buf
, &total
, &entries
);
3347 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3348 entries
, total
, num_online_cpus());
3352 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
,
3355 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3357 print_event_info(buf
, m
);
3359 seq_printf(m
, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid
? "TGID " : "");
3360 seq_printf(m
, "# | | | %s | |\n", tgid
? " | " : "");
3363 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
,
3366 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3367 const char tgid_space
[] = " ";
3368 const char space
[] = " ";
3370 seq_printf(m
, "# %s _-----=> irqs-off\n",
3371 tgid
? tgid_space
: space
);
3372 seq_printf(m
, "# %s / _----=> need-resched\n",
3373 tgid
? tgid_space
: space
);
3374 seq_printf(m
, "# %s| / _---=> hardirq/softirq\n",
3375 tgid
? tgid_space
: space
);
3376 seq_printf(m
, "# %s|| / _--=> preempt-depth\n",
3377 tgid
? tgid_space
: space
);
3378 seq_printf(m
, "# %s||| / delay\n",
3379 tgid
? tgid_space
: space
);
3380 seq_printf(m
, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3381 tgid
? " TGID " : space
);
3382 seq_printf(m
, "# | | | %s|||| | |\n",
3383 tgid
? " | " : space
);
3387 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
3389 unsigned long sym_flags
= (global_trace
.trace_flags
& TRACE_ITER_SYM_MASK
);
3390 struct trace_buffer
*buf
= iter
->trace_buffer
;
3391 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
3392 struct tracer
*type
= iter
->trace
;
3393 unsigned long entries
;
3394 unsigned long total
;
3395 const char *name
= "preemption";
3399 get_total_entries(buf
, &total
, &entries
);
3401 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
3403 seq_puts(m
, "# -----------------------------------"
3404 "---------------------------------\n");
3405 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3406 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3407 nsecs_to_usecs(data
->saved_latency
),
3411 #if defined(CONFIG_PREEMPT_NONE)
3413 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3415 #elif defined(CONFIG_PREEMPT)
3420 /* These are reserved for later use */
3423 seq_printf(m
, " #P:%d)\n", num_online_cpus());
3427 seq_puts(m
, "# -----------------\n");
3428 seq_printf(m
, "# | task: %.16s-%d "
3429 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3430 data
->comm
, data
->pid
,
3431 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
3432 data
->policy
, data
->rt_priority
);
3433 seq_puts(m
, "# -----------------\n");
3435 if (data
->critical_start
) {
3436 seq_puts(m
, "# => started at: ");
3437 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
3438 trace_print_seq(m
, &iter
->seq
);
3439 seq_puts(m
, "\n# => ended at: ");
3440 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
3441 trace_print_seq(m
, &iter
->seq
);
3442 seq_puts(m
, "\n#\n");
3448 static void test_cpu_buff_start(struct trace_iterator
*iter
)
3450 struct trace_seq
*s
= &iter
->seq
;
3451 struct trace_array
*tr
= iter
->tr
;
3453 if (!(tr
->trace_flags
& TRACE_ITER_ANNOTATE
))
3456 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
3459 if (cpumask_available(iter
->started
) &&
3460 cpumask_test_cpu(iter
->cpu
, iter
->started
))
3463 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
3466 if (cpumask_available(iter
->started
))
3467 cpumask_set_cpu(iter
->cpu
, iter
->started
);
3469 /* Don't print started cpu buffer for the first entry of the trace */
3471 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
3475 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
3477 struct trace_array
*tr
= iter
->tr
;
3478 struct trace_seq
*s
= &iter
->seq
;
3479 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
3480 struct trace_entry
*entry
;
3481 struct trace_event
*event
;
3485 test_cpu_buff_start(iter
);
3487 event
= ftrace_find_event(entry
->type
);
3489 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3490 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3491 trace_print_lat_context(iter
);
3493 trace_print_context(iter
);
3496 if (trace_seq_has_overflowed(s
))
3497 return TRACE_TYPE_PARTIAL_LINE
;
3500 return event
->funcs
->trace(iter
, sym_flags
, event
);
3502 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
3504 return trace_handle_return(s
);
3507 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
3509 struct trace_array
*tr
= iter
->tr
;
3510 struct trace_seq
*s
= &iter
->seq
;
3511 struct trace_entry
*entry
;
3512 struct trace_event
*event
;
3516 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
)
3517 trace_seq_printf(s
, "%d %d %llu ",
3518 entry
->pid
, iter
->cpu
, iter
->ts
);
3520 if (trace_seq_has_overflowed(s
))
3521 return TRACE_TYPE_PARTIAL_LINE
;
3523 event
= ftrace_find_event(entry
->type
);
3525 return event
->funcs
->raw(iter
, 0, event
);
3527 trace_seq_printf(s
, "%d ?\n", entry
->type
);
3529 return trace_handle_return(s
);
3532 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
3534 struct trace_array
*tr
= iter
->tr
;
3535 struct trace_seq
*s
= &iter
->seq
;
3536 unsigned char newline
= '\n';
3537 struct trace_entry
*entry
;
3538 struct trace_event
*event
;
3542 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3543 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
3544 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
3545 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
3546 if (trace_seq_has_overflowed(s
))
3547 return TRACE_TYPE_PARTIAL_LINE
;
3550 event
= ftrace_find_event(entry
->type
);
3552 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
3553 if (ret
!= TRACE_TYPE_HANDLED
)
3557 SEQ_PUT_FIELD(s
, newline
);
3559 return trace_handle_return(s
);
3562 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
3564 struct trace_array
*tr
= iter
->tr
;
3565 struct trace_seq
*s
= &iter
->seq
;
3566 struct trace_entry
*entry
;
3567 struct trace_event
*event
;
3571 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3572 SEQ_PUT_FIELD(s
, entry
->pid
);
3573 SEQ_PUT_FIELD(s
, iter
->cpu
);
3574 SEQ_PUT_FIELD(s
, iter
->ts
);
3575 if (trace_seq_has_overflowed(s
))
3576 return TRACE_TYPE_PARTIAL_LINE
;
3579 event
= ftrace_find_event(entry
->type
);
3580 return event
? event
->funcs
->binary(iter
, 0, event
) :
3584 int trace_empty(struct trace_iterator
*iter
)
3586 struct ring_buffer_iter
*buf_iter
;
3589 /* If we are looking at one CPU buffer, only check that one */
3590 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
3591 cpu
= iter
->cpu_file
;
3592 buf_iter
= trace_buffer_iter(iter
, cpu
);
3594 if (!ring_buffer_iter_empty(buf_iter
))
3597 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3603 for_each_tracing_cpu(cpu
) {
3604 buf_iter
= trace_buffer_iter(iter
, cpu
);
3606 if (!ring_buffer_iter_empty(buf_iter
))
3609 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3617 /* Called with trace_event_read_lock() held. */
3618 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
3620 struct trace_array
*tr
= iter
->tr
;
3621 unsigned long trace_flags
= tr
->trace_flags
;
3622 enum print_line_t ret
;
3624 if (iter
->lost_events
) {
3625 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
3626 iter
->cpu
, iter
->lost_events
);
3627 if (trace_seq_has_overflowed(&iter
->seq
))
3628 return TRACE_TYPE_PARTIAL_LINE
;
3631 if (iter
->trace
&& iter
->trace
->print_line
) {
3632 ret
= iter
->trace
->print_line(iter
);
3633 if (ret
!= TRACE_TYPE_UNHANDLED
)
3637 if (iter
->ent
->type
== TRACE_BPUTS
&&
3638 trace_flags
& TRACE_ITER_PRINTK
&&
3639 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3640 return trace_print_bputs_msg_only(iter
);
3642 if (iter
->ent
->type
== TRACE_BPRINT
&&
3643 trace_flags
& TRACE_ITER_PRINTK
&&
3644 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3645 return trace_print_bprintk_msg_only(iter
);
3647 if (iter
->ent
->type
== TRACE_PRINT
&&
3648 trace_flags
& TRACE_ITER_PRINTK
&&
3649 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3650 return trace_print_printk_msg_only(iter
);
3652 if (trace_flags
& TRACE_ITER_BIN
)
3653 return print_bin_fmt(iter
);
3655 if (trace_flags
& TRACE_ITER_HEX
)
3656 return print_hex_fmt(iter
);
3658 if (trace_flags
& TRACE_ITER_RAW
)
3659 return print_raw_fmt(iter
);
3661 return print_trace_fmt(iter
);
3664 void trace_latency_header(struct seq_file
*m
)
3666 struct trace_iterator
*iter
= m
->private;
3667 struct trace_array
*tr
= iter
->tr
;
3669 /* print nothing if the buffers are empty */
3670 if (trace_empty(iter
))
3673 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3674 print_trace_header(m
, iter
);
3676 if (!(tr
->trace_flags
& TRACE_ITER_VERBOSE
))
3677 print_lat_help_header(m
);
3680 void trace_default_header(struct seq_file
*m
)
3682 struct trace_iterator
*iter
= m
->private;
3683 struct trace_array
*tr
= iter
->tr
;
3684 unsigned long trace_flags
= tr
->trace_flags
;
3686 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
3689 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
3690 /* print nothing if the buffers are empty */
3691 if (trace_empty(iter
))
3693 print_trace_header(m
, iter
);
3694 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
3695 print_lat_help_header(m
);
3697 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
3698 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
3699 print_func_help_header_irq(iter
->trace_buffer
,
3702 print_func_help_header(iter
->trace_buffer
, m
,
3708 static void test_ftrace_alive(struct seq_file
*m
)
3710 if (!ftrace_is_dead())
3712 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3713 "# MAY BE MISSING FUNCTION EVENTS\n");
3716 #ifdef CONFIG_TRACER_MAX_TRACE
3717 static void show_snapshot_main_help(struct seq_file
*m
)
3719 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3720 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3721 "# Takes a snapshot of the main buffer.\n"
3722 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3723 "# (Doesn't have to be '2' works with any number that\n"
3724 "# is not a '0' or '1')\n");
3727 static void show_snapshot_percpu_help(struct seq_file
*m
)
3729 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3730 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3731 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3732 "# Takes a snapshot of the main buffer for this cpu.\n");
3734 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
3735 "# Must use main snapshot file to allocate.\n");
3737 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3738 "# (Doesn't have to be '2' works with any number that\n"
3739 "# is not a '0' or '1')\n");
3742 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
3744 if (iter
->tr
->allocated_snapshot
)
3745 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
3747 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
3749 seq_puts(m
, "# Snapshot commands:\n");
3750 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
3751 show_snapshot_main_help(m
);
3753 show_snapshot_percpu_help(m
);
3756 /* Should never be called */
3757 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
3760 static int s_show(struct seq_file
*m
, void *v
)
3762 struct trace_iterator
*iter
= v
;
3765 if (iter
->ent
== NULL
) {
3767 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
3769 test_ftrace_alive(m
);
3771 if (iter
->snapshot
&& trace_empty(iter
))
3772 print_snapshot_help(m
, iter
);
3773 else if (iter
->trace
&& iter
->trace
->print_header
)
3774 iter
->trace
->print_header(m
);
3776 trace_default_header(m
);
3778 } else if (iter
->leftover
) {
3780 * If we filled the seq_file buffer earlier, we
3781 * want to just show it now.
3783 ret
= trace_print_seq(m
, &iter
->seq
);
3785 /* ret should this time be zero, but you never know */
3786 iter
->leftover
= ret
;
3789 print_trace_line(iter
);
3790 ret
= trace_print_seq(m
, &iter
->seq
);
3792 * If we overflow the seq_file buffer, then it will
3793 * ask us for this data again at start up.
3795 * ret is 0 if seq_file write succeeded.
3798 iter
->leftover
= ret
;
3805 * Should be used after trace_array_get(), trace_types_lock
3806 * ensures that i_cdev was already initialized.
3808 static inline int tracing_get_cpu(struct inode
*inode
)
3810 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
3811 return (long)inode
->i_cdev
- 1;
3812 return RING_BUFFER_ALL_CPUS
;
3815 static const struct seq_operations tracer_seq_ops
= {
3822 static struct trace_iterator
*
3823 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
3825 struct trace_array
*tr
= inode
->i_private
;
3826 struct trace_iterator
*iter
;
3829 if (tracing_disabled
)
3830 return ERR_PTR(-ENODEV
);
3832 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
3834 return ERR_PTR(-ENOMEM
);
3836 iter
->buffer_iter
= kcalloc(nr_cpu_ids
, sizeof(*iter
->buffer_iter
),
3838 if (!iter
->buffer_iter
)
3842 * We make a copy of the current tracer to avoid concurrent
3843 * changes on it while we are reading.
3845 mutex_lock(&trace_types_lock
);
3846 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3850 *iter
->trace
= *tr
->current_trace
;
3852 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3857 #ifdef CONFIG_TRACER_MAX_TRACE
3858 /* Currently only the top directory has a snapshot */
3859 if (tr
->current_trace
->print_max
|| snapshot
)
3860 iter
->trace_buffer
= &tr
->max_buffer
;
3863 iter
->trace_buffer
= &tr
->trace_buffer
;
3864 iter
->snapshot
= snapshot
;
3866 iter
->cpu_file
= tracing_get_cpu(inode
);
3867 mutex_init(&iter
->mutex
);
3869 /* Notify the tracer early; before we stop tracing. */
3870 if (iter
->trace
&& iter
->trace
->open
)
3871 iter
->trace
->open(iter
);
3873 /* Annotate start of buffers if we had overruns */
3874 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3875 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3877 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3878 if (trace_clocks
[tr
->clock_id
].in_ns
)
3879 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3881 /* stop the trace while dumping if we are not opening "snapshot" */
3882 if (!iter
->snapshot
)
3883 tracing_stop_tr(tr
);
3885 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3886 for_each_tracing_cpu(cpu
) {
3887 iter
->buffer_iter
[cpu
] =
3888 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3890 ring_buffer_read_prepare_sync();
3891 for_each_tracing_cpu(cpu
) {
3892 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3893 tracing_iter_reset(iter
, cpu
);
3896 cpu
= iter
->cpu_file
;
3897 iter
->buffer_iter
[cpu
] =
3898 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3899 ring_buffer_read_prepare_sync();
3900 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3901 tracing_iter_reset(iter
, cpu
);
3904 mutex_unlock(&trace_types_lock
);
3909 mutex_unlock(&trace_types_lock
);
3911 kfree(iter
->buffer_iter
);
3913 seq_release_private(inode
, file
);
3914 return ERR_PTR(-ENOMEM
);
3917 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3919 if (tracing_disabled
)
3922 filp
->private_data
= inode
->i_private
;
3926 bool tracing_is_disabled(void)
3928 return (tracing_disabled
) ? true: false;
3932 * Open and update trace_array ref count.
3933 * Must have the current trace_array passed to it.
3935 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3937 struct trace_array
*tr
= inode
->i_private
;
3939 if (tracing_disabled
)
3942 if (trace_array_get(tr
) < 0)
3945 filp
->private_data
= inode
->i_private
;
3950 static int tracing_release(struct inode
*inode
, struct file
*file
)
3952 struct trace_array
*tr
= inode
->i_private
;
3953 struct seq_file
*m
= file
->private_data
;
3954 struct trace_iterator
*iter
;
3957 if (!(file
->f_mode
& FMODE_READ
)) {
3958 trace_array_put(tr
);
3962 /* Writes do not use seq_file */
3964 mutex_lock(&trace_types_lock
);
3966 for_each_tracing_cpu(cpu
) {
3967 if (iter
->buffer_iter
[cpu
])
3968 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3971 if (iter
->trace
&& iter
->trace
->close
)
3972 iter
->trace
->close(iter
);
3974 if (!iter
->snapshot
)
3975 /* reenable tracing if it was previously enabled */
3976 tracing_start_tr(tr
);
3978 __trace_array_put(tr
);
3980 mutex_unlock(&trace_types_lock
);
3982 mutex_destroy(&iter
->mutex
);
3983 free_cpumask_var(iter
->started
);
3985 kfree(iter
->buffer_iter
);
3986 seq_release_private(inode
, file
);
3991 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3993 struct trace_array
*tr
= inode
->i_private
;
3995 trace_array_put(tr
);
3999 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
4001 struct trace_array
*tr
= inode
->i_private
;
4003 trace_array_put(tr
);
4005 return single_release(inode
, file
);
4008 static int tracing_open(struct inode
*inode
, struct file
*file
)
4010 struct trace_array
*tr
= inode
->i_private
;
4011 struct trace_iterator
*iter
;
4014 if (trace_array_get(tr
) < 0)
4017 /* If this file was open for write, then erase contents */
4018 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
4019 int cpu
= tracing_get_cpu(inode
);
4021 if (cpu
== RING_BUFFER_ALL_CPUS
)
4022 tracing_reset_online_cpus(&tr
->trace_buffer
);
4024 tracing_reset(&tr
->trace_buffer
, cpu
);
4027 if (file
->f_mode
& FMODE_READ
) {
4028 iter
= __tracing_open(inode
, file
, false);
4030 ret
= PTR_ERR(iter
);
4031 else if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
4032 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4036 trace_array_put(tr
);
4042 * Some tracers are not suitable for instance buffers.
4043 * A tracer is always available for the global array (toplevel)
4044 * or if it explicitly states that it is.
4047 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
4049 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
4052 /* Find the next tracer that this trace array may use */
4053 static struct tracer
*
4054 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
4056 while (t
&& !trace_ok_for_array(t
, tr
))
4063 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4065 struct trace_array
*tr
= m
->private;
4066 struct tracer
*t
= v
;
4071 t
= get_tracer_for_array(tr
, t
->next
);
4076 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
4078 struct trace_array
*tr
= m
->private;
4082 mutex_lock(&trace_types_lock
);
4084 t
= get_tracer_for_array(tr
, trace_types
);
4085 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
4091 static void t_stop(struct seq_file
*m
, void *p
)
4093 mutex_unlock(&trace_types_lock
);
4096 static int t_show(struct seq_file
*m
, void *v
)
4098 struct tracer
*t
= v
;
4103 seq_puts(m
, t
->name
);
4112 static const struct seq_operations show_traces_seq_ops
= {
4119 static int show_traces_open(struct inode
*inode
, struct file
*file
)
4121 struct trace_array
*tr
= inode
->i_private
;
4125 if (tracing_disabled
)
4128 ret
= seq_open(file
, &show_traces_seq_ops
);
4132 m
= file
->private_data
;
4139 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
4140 size_t count
, loff_t
*ppos
)
4145 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
4149 if (file
->f_mode
& FMODE_READ
)
4150 ret
= seq_lseek(file
, offset
, whence
);
4152 file
->f_pos
= ret
= 0;
4157 static const struct file_operations tracing_fops
= {
4158 .open
= tracing_open
,
4160 .write
= tracing_write_stub
,
4161 .llseek
= tracing_lseek
,
4162 .release
= tracing_release
,
4165 static const struct file_operations show_traces_fops
= {
4166 .open
= show_traces_open
,
4168 .release
= seq_release
,
4169 .llseek
= seq_lseek
,
4173 * The tracer itself will not take this lock, but still we want
4174 * to provide a consistent cpumask to user-space:
4176 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
4179 * Temporary storage for the character representation of the
4180 * CPU bitmask (and one more byte for the newline):
4182 static char mask_str
[NR_CPUS
+ 1];
4185 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
4186 size_t count
, loff_t
*ppos
)
4188 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4191 mutex_lock(&tracing_cpumask_update_lock
);
4193 len
= snprintf(mask_str
, count
, "%*pb\n",
4194 cpumask_pr_args(tr
->tracing_cpumask
));
4199 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
4202 mutex_unlock(&tracing_cpumask_update_lock
);
4208 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
4209 size_t count
, loff_t
*ppos
)
4211 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4212 cpumask_var_t tracing_cpumask_new
;
4215 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
4218 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
4222 mutex_lock(&tracing_cpumask_update_lock
);
4224 local_irq_disable();
4225 arch_spin_lock(&tr
->max_lock
);
4226 for_each_tracing_cpu(cpu
) {
4228 * Increase/decrease the disabled counter if we are
4229 * about to flip a bit in the cpumask:
4231 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4232 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4233 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4234 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4236 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4237 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4238 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4239 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4242 arch_spin_unlock(&tr
->max_lock
);
4245 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
4247 mutex_unlock(&tracing_cpumask_update_lock
);
4248 free_cpumask_var(tracing_cpumask_new
);
4253 free_cpumask_var(tracing_cpumask_new
);
4258 static const struct file_operations tracing_cpumask_fops
= {
4259 .open
= tracing_open_generic_tr
,
4260 .read
= tracing_cpumask_read
,
4261 .write
= tracing_cpumask_write
,
4262 .release
= tracing_release_generic_tr
,
4263 .llseek
= generic_file_llseek
,
4266 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
4268 struct tracer_opt
*trace_opts
;
4269 struct trace_array
*tr
= m
->private;
4273 mutex_lock(&trace_types_lock
);
4274 tracer_flags
= tr
->current_trace
->flags
->val
;
4275 trace_opts
= tr
->current_trace
->flags
->opts
;
4277 for (i
= 0; trace_options
[i
]; i
++) {
4278 if (tr
->trace_flags
& (1 << i
))
4279 seq_printf(m
, "%s\n", trace_options
[i
]);
4281 seq_printf(m
, "no%s\n", trace_options
[i
]);
4284 for (i
= 0; trace_opts
[i
].name
; i
++) {
4285 if (tracer_flags
& trace_opts
[i
].bit
)
4286 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
4288 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
4290 mutex_unlock(&trace_types_lock
);
4295 static int __set_tracer_option(struct trace_array
*tr
,
4296 struct tracer_flags
*tracer_flags
,
4297 struct tracer_opt
*opts
, int neg
)
4299 struct tracer
*trace
= tracer_flags
->trace
;
4302 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
4307 tracer_flags
->val
&= ~opts
->bit
;
4309 tracer_flags
->val
|= opts
->bit
;
4313 /* Try to assign a tracer specific option */
4314 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
4316 struct tracer
*trace
= tr
->current_trace
;
4317 struct tracer_flags
*tracer_flags
= trace
->flags
;
4318 struct tracer_opt
*opts
= NULL
;
4321 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
4322 opts
= &tracer_flags
->opts
[i
];
4324 if (strcmp(cmp
, opts
->name
) == 0)
4325 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
4331 /* Some tracers require overwrite to stay enabled */
4332 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
4334 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
4340 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
4342 /* do nothing if flag is already set */
4343 if (!!(tr
->trace_flags
& mask
) == !!enabled
)
4346 /* Give the tracer a chance to approve the change */
4347 if (tr
->current_trace
->flag_changed
)
4348 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
4352 tr
->trace_flags
|= mask
;
4354 tr
->trace_flags
&= ~mask
;
4356 if (mask
== TRACE_ITER_RECORD_CMD
)
4357 trace_event_enable_cmd_record(enabled
);
4359 if (mask
== TRACE_ITER_RECORD_TGID
) {
4361 tgid_map
= kzalloc((PID_MAX_DEFAULT
+ 1) * sizeof(*tgid_map
),
4364 tr
->trace_flags
&= ~TRACE_ITER_RECORD_TGID
;
4368 trace_event_enable_tgid_record(enabled
);
4371 if (mask
== TRACE_ITER_EVENT_FORK
)
4372 trace_event_follow_fork(tr
, enabled
);
4374 if (mask
== TRACE_ITER_FUNC_FORK
)
4375 ftrace_pid_follow_fork(tr
, enabled
);
4377 if (mask
== TRACE_ITER_OVERWRITE
) {
4378 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
4379 #ifdef CONFIG_TRACER_MAX_TRACE
4380 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
4384 if (mask
== TRACE_ITER_PRINTK
) {
4385 trace_printk_start_stop_comm(enabled
);
4386 trace_printk_control(enabled
);
4392 static int trace_set_options(struct trace_array
*tr
, char *option
)
4398 size_t orig_len
= strlen(option
);
4400 cmp
= strstrip(option
);
4402 if (strncmp(cmp
, "no", 2) == 0) {
4407 mutex_lock(&trace_types_lock
);
4409 for (i
= 0; trace_options
[i
]; i
++) {
4410 if (strcmp(cmp
, trace_options
[i
]) == 0) {
4411 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
4416 /* If no option could be set, test the specific tracer options */
4417 if (!trace_options
[i
])
4418 ret
= set_tracer_option(tr
, cmp
, neg
);
4420 mutex_unlock(&trace_types_lock
);
4423 * If the first trailing whitespace is replaced with '\0' by strstrip,
4424 * turn it back into a space.
4426 if (orig_len
> strlen(option
))
4427 option
[strlen(option
)] = ' ';
4432 static void __init
apply_trace_boot_options(void)
4434 char *buf
= trace_boot_options_buf
;
4438 option
= strsep(&buf
, ",");
4444 trace_set_options(&global_trace
, option
);
4446 /* Put back the comma to allow this to be called again */
4453 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
4454 size_t cnt
, loff_t
*ppos
)
4456 struct seq_file
*m
= filp
->private_data
;
4457 struct trace_array
*tr
= m
->private;
4461 if (cnt
>= sizeof(buf
))
4464 if (copy_from_user(buf
, ubuf
, cnt
))
4469 ret
= trace_set_options(tr
, buf
);
4478 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
4480 struct trace_array
*tr
= inode
->i_private
;
4483 if (tracing_disabled
)
4486 if (trace_array_get(tr
) < 0)
4489 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
4491 trace_array_put(tr
);
4496 static const struct file_operations tracing_iter_fops
= {
4497 .open
= tracing_trace_options_open
,
4499 .llseek
= seq_lseek
,
4500 .release
= tracing_single_release_tr
,
4501 .write
= tracing_trace_options_write
,
4504 static const char readme_msg
[] =
4505 "tracing mini-HOWTO:\n\n"
4506 "# echo 0 > tracing_on : quick way to disable tracing\n"
4507 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4508 " Important files:\n"
4509 " trace\t\t\t- The static contents of the buffer\n"
4510 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4511 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4512 " current_tracer\t- function and latency tracers\n"
4513 " available_tracers\t- list of configured tracers for current_tracer\n"
4514 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4515 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4516 " trace_clock\t\t-change the clock used to order events\n"
4517 " local: Per cpu clock but may not be synced across CPUs\n"
4518 " global: Synced across CPUs but slows tracing down.\n"
4519 " counter: Not a clock, but just an increment\n"
4520 " uptime: Jiffy counter from time of boot\n"
4521 " perf: Same clock that perf events use\n"
4522 #ifdef CONFIG_X86_64
4523 " x86-tsc: TSC cycle counter\n"
4525 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4526 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4527 " tracing_cpumask\t- Limit which CPUs to trace\n"
4528 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4529 "\t\t\t Remove sub-buffer with rmdir\n"
4530 " trace_options\t\t- Set format or modify how tracing happens\n"
4531 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4532 "\t\t\t option name\n"
4533 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4534 #ifdef CONFIG_DYNAMIC_FTRACE
4535 "\n available_filter_functions - list of functions that can be filtered on\n"
4536 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4537 "\t\t\t functions\n"
4538 "\t accepts: func_full_name or glob-matching-pattern\n"
4539 "\t modules: Can select a group via module\n"
4540 "\t Format: :mod:<module-name>\n"
4541 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4542 "\t triggers: a command to perform when function is hit\n"
4543 "\t Format: <function>:<trigger>[:count]\n"
4544 "\t trigger: traceon, traceoff\n"
4545 "\t\t enable_event:<system>:<event>\n"
4546 "\t\t disable_event:<system>:<event>\n"
4547 #ifdef CONFIG_STACKTRACE
4550 #ifdef CONFIG_TRACER_SNAPSHOT
4555 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4556 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4557 "\t The first one will disable tracing every time do_fault is hit\n"
4558 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4559 "\t The first time do trap is hit and it disables tracing, the\n"
4560 "\t counter will decrement to 2. If tracing is already disabled,\n"
4561 "\t the counter will not decrement. It only decrements when the\n"
4562 "\t trigger did work\n"
4563 "\t To remove trigger without count:\n"
4564 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4565 "\t To remove trigger with a count:\n"
4566 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4567 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4568 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4569 "\t modules: Can select a group via module command :mod:\n"
4570 "\t Does not accept triggers\n"
4571 #endif /* CONFIG_DYNAMIC_FTRACE */
4572 #ifdef CONFIG_FUNCTION_TRACER
4573 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4576 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4577 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4578 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4579 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4581 #ifdef CONFIG_TRACER_SNAPSHOT
4582 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4583 "\t\t\t snapshot buffer. Read the contents for more\n"
4584 "\t\t\t information\n"
4586 #ifdef CONFIG_STACK_TRACER
4587 " stack_trace\t\t- Shows the max stack trace when active\n"
4588 " stack_max_size\t- Shows current max stack size that was traced\n"
4589 "\t\t\t Write into this file to reset the max size (trigger a\n"
4590 "\t\t\t new trace)\n"
4591 #ifdef CONFIG_DYNAMIC_FTRACE
4592 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4595 #endif /* CONFIG_STACK_TRACER */
4596 #ifdef CONFIG_KPROBE_EVENTS
4597 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4598 "\t\t\t Write into this file to define/undefine new trace events.\n"
4600 #ifdef CONFIG_UPROBE_EVENTS
4601 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4602 "\t\t\t Write into this file to define/undefine new trace events.\n"
4604 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4605 "\t accepts: event-definitions (one definition per line)\n"
4606 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4607 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4608 "\t -:[<group>/]<event>\n"
4609 #ifdef CONFIG_KPROBE_EVENTS
4610 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4611 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4613 #ifdef CONFIG_UPROBE_EVENTS
4614 "\t place: <path>:<offset>\n"
4616 "\t args: <name>=fetcharg[:type]\n"
4617 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4618 "\t $stack<index>, $stack, $retval, $comm\n"
4619 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4620 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4622 " events/\t\t- Directory containing all trace event subsystems:\n"
4623 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4624 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4625 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4627 " filter\t\t- If set, only events passing filter are traced\n"
4628 " events/<system>/<event>/\t- Directory containing control files for\n"
4630 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4631 " filter\t\t- If set, only events passing filter are traced\n"
4632 " trigger\t\t- If set, a command to perform when event is hit\n"
4633 "\t Format: <trigger>[:count][if <filter>]\n"
4634 "\t trigger: traceon, traceoff\n"
4635 "\t enable_event:<system>:<event>\n"
4636 "\t disable_event:<system>:<event>\n"
4637 #ifdef CONFIG_HIST_TRIGGERS
4638 "\t enable_hist:<system>:<event>\n"
4639 "\t disable_hist:<system>:<event>\n"
4641 #ifdef CONFIG_STACKTRACE
4644 #ifdef CONFIG_TRACER_SNAPSHOT
4647 #ifdef CONFIG_HIST_TRIGGERS
4648 "\t\t hist (see below)\n"
4650 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4651 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4652 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4653 "\t events/block/block_unplug/trigger\n"
4654 "\t The first disables tracing every time block_unplug is hit.\n"
4655 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4656 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4657 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4658 "\t Like function triggers, the counter is only decremented if it\n"
4659 "\t enabled or disabled tracing.\n"
4660 "\t To remove a trigger without a count:\n"
4661 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4662 "\t To remove a trigger with a count:\n"
4663 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4664 "\t Filters can be ignored when removing a trigger.\n"
4665 #ifdef CONFIG_HIST_TRIGGERS
4666 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4667 "\t Format: hist:keys=<field1[,field2,...]>\n"
4668 "\t [:values=<field1[,field2,...]>]\n"
4669 "\t [:sort=<field1[,field2,...]>]\n"
4670 "\t [:size=#entries]\n"
4671 "\t [:pause][:continue][:clear]\n"
4672 "\t [:name=histname1]\n"
4673 "\t [if <filter>]\n\n"
4674 "\t When a matching event is hit, an entry is added to a hash\n"
4675 "\t table using the key(s) and value(s) named, and the value of a\n"
4676 "\t sum called 'hitcount' is incremented. Keys and values\n"
4677 "\t correspond to fields in the event's format description. Keys\n"
4678 "\t can be any field, or the special string 'stacktrace'.\n"
4679 "\t Compound keys consisting of up to two fields can be specified\n"
4680 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4681 "\t fields. Sort keys consisting of up to two fields can be\n"
4682 "\t specified using the 'sort' keyword. The sort direction can\n"
4683 "\t be modified by appending '.descending' or '.ascending' to a\n"
4684 "\t sort field. The 'size' parameter can be used to specify more\n"
4685 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4686 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4687 "\t its histogram data will be shared with other triggers of the\n"
4688 "\t same name, and trigger hits will update this common data.\n\n"
4689 "\t Reading the 'hist' file for the event will dump the hash\n"
4690 "\t table in its entirety to stdout. If there are multiple hist\n"
4691 "\t triggers attached to an event, there will be a table for each\n"
4692 "\t trigger in the output. The table displayed for a named\n"
4693 "\t trigger will be the same as any other instance having the\n"
4694 "\t same name. The default format used to display a given field\n"
4695 "\t can be modified by appending any of the following modifiers\n"
4696 "\t to the field name, as applicable:\n\n"
4697 "\t .hex display a number as a hex value\n"
4698 "\t .sym display an address as a symbol\n"
4699 "\t .sym-offset display an address as a symbol and offset\n"
4700 "\t .execname display a common_pid as a program name\n"
4701 "\t .syscall display a syscall id as a syscall name\n\n"
4702 "\t .log2 display log2 value rather than raw number\n\n"
4703 "\t The 'pause' parameter can be used to pause an existing hist\n"
4704 "\t trigger or to start a hist trigger but not log any events\n"
4705 "\t until told to do so. 'continue' can be used to start or\n"
4706 "\t restart a paused hist trigger.\n\n"
4707 "\t The 'clear' parameter will clear the contents of a running\n"
4708 "\t hist trigger and leave its current paused/active state\n"
4710 "\t The enable_hist and disable_hist triggers can be used to\n"
4711 "\t have one event conditionally start and stop another event's\n"
4712 "\t already-attached hist trigger. The syntax is analagous to\n"
4713 "\t the enable_event and disable_event triggers.\n"
4718 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
4719 size_t cnt
, loff_t
*ppos
)
4721 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
4722 readme_msg
, strlen(readme_msg
));
4725 static const struct file_operations tracing_readme_fops
= {
4726 .open
= tracing_open_generic
,
4727 .read
= tracing_readme_read
,
4728 .llseek
= generic_file_llseek
,
4731 static void *saved_tgids_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4735 if (*pos
|| m
->count
)
4740 for (; ptr
<= &tgid_map
[PID_MAX_DEFAULT
]; ptr
++) {
4741 if (trace_find_tgid(*ptr
))
4748 static void *saved_tgids_start(struct seq_file
*m
, loff_t
*pos
)
4758 v
= saved_tgids_next(m
, v
, &l
);
4766 static void saved_tgids_stop(struct seq_file
*m
, void *v
)
4770 static int saved_tgids_show(struct seq_file
*m
, void *v
)
4772 int pid
= (int *)v
- tgid_map
;
4774 seq_printf(m
, "%d %d\n", pid
, trace_find_tgid(pid
));
4778 static const struct seq_operations tracing_saved_tgids_seq_ops
= {
4779 .start
= saved_tgids_start
,
4780 .stop
= saved_tgids_stop
,
4781 .next
= saved_tgids_next
,
4782 .show
= saved_tgids_show
,
4785 static int tracing_saved_tgids_open(struct inode
*inode
, struct file
*filp
)
4787 if (tracing_disabled
)
4790 return seq_open(filp
, &tracing_saved_tgids_seq_ops
);
4794 static const struct file_operations tracing_saved_tgids_fops
= {
4795 .open
= tracing_saved_tgids_open
,
4797 .llseek
= seq_lseek
,
4798 .release
= seq_release
,
4801 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4803 unsigned int *ptr
= v
;
4805 if (*pos
|| m
->count
)
4810 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
4812 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
4821 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
4827 arch_spin_lock(&trace_cmdline_lock
);
4829 v
= &savedcmd
->map_cmdline_to_pid
[0];
4831 v
= saved_cmdlines_next(m
, v
, &l
);
4839 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
4841 arch_spin_unlock(&trace_cmdline_lock
);
4845 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
4847 char buf
[TASK_COMM_LEN
];
4848 unsigned int *pid
= v
;
4850 __trace_find_cmdline(*pid
, buf
);
4851 seq_printf(m
, "%d %s\n", *pid
, buf
);
4855 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
4856 .start
= saved_cmdlines_start
,
4857 .next
= saved_cmdlines_next
,
4858 .stop
= saved_cmdlines_stop
,
4859 .show
= saved_cmdlines_show
,
4862 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
4864 if (tracing_disabled
)
4867 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
4870 static const struct file_operations tracing_saved_cmdlines_fops
= {
4871 .open
= tracing_saved_cmdlines_open
,
4873 .llseek
= seq_lseek
,
4874 .release
= seq_release
,
4878 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
4879 size_t cnt
, loff_t
*ppos
)
4884 arch_spin_lock(&trace_cmdline_lock
);
4885 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
4886 arch_spin_unlock(&trace_cmdline_lock
);
4888 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4891 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
4893 kfree(s
->saved_cmdlines
);
4894 kfree(s
->map_cmdline_to_pid
);
4898 static int tracing_resize_saved_cmdlines(unsigned int val
)
4900 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
4902 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
4906 if (allocate_cmdlines_buffer(val
, s
) < 0) {
4911 arch_spin_lock(&trace_cmdline_lock
);
4912 savedcmd_temp
= savedcmd
;
4914 arch_spin_unlock(&trace_cmdline_lock
);
4915 free_saved_cmdlines_buffer(savedcmd_temp
);
4921 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
4922 size_t cnt
, loff_t
*ppos
)
4927 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4931 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4932 if (!val
|| val
> PID_MAX_DEFAULT
)
4935 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
4944 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
4945 .open
= tracing_open_generic
,
4946 .read
= tracing_saved_cmdlines_size_read
,
4947 .write
= tracing_saved_cmdlines_size_write
,
4950 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4951 static union trace_eval_map_item
*
4952 update_eval_map(union trace_eval_map_item
*ptr
)
4954 if (!ptr
->map
.eval_string
) {
4955 if (ptr
->tail
.next
) {
4956 ptr
= ptr
->tail
.next
;
4957 /* Set ptr to the next real item (skip head) */
4965 static void *eval_map_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4967 union trace_eval_map_item
*ptr
= v
;
4970 * Paranoid! If ptr points to end, we don't want to increment past it.
4971 * This really should never happen.
4973 ptr
= update_eval_map(ptr
);
4974 if (WARN_ON_ONCE(!ptr
))
4981 ptr
= update_eval_map(ptr
);
4986 static void *eval_map_start(struct seq_file
*m
, loff_t
*pos
)
4988 union trace_eval_map_item
*v
;
4991 mutex_lock(&trace_eval_mutex
);
4993 v
= trace_eval_maps
;
4997 while (v
&& l
< *pos
) {
4998 v
= eval_map_next(m
, v
, &l
);
5004 static void eval_map_stop(struct seq_file
*m
, void *v
)
5006 mutex_unlock(&trace_eval_mutex
);
5009 static int eval_map_show(struct seq_file
*m
, void *v
)
5011 union trace_eval_map_item
*ptr
= v
;
5013 seq_printf(m
, "%s %ld (%s)\n",
5014 ptr
->map
.eval_string
, ptr
->map
.eval_value
,
5020 static const struct seq_operations tracing_eval_map_seq_ops
= {
5021 .start
= eval_map_start
,
5022 .next
= eval_map_next
,
5023 .stop
= eval_map_stop
,
5024 .show
= eval_map_show
,
5027 static int tracing_eval_map_open(struct inode
*inode
, struct file
*filp
)
5029 if (tracing_disabled
)
5032 return seq_open(filp
, &tracing_eval_map_seq_ops
);
5035 static const struct file_operations tracing_eval_map_fops
= {
5036 .open
= tracing_eval_map_open
,
5038 .llseek
= seq_lseek
,
5039 .release
= seq_release
,
5042 static inline union trace_eval_map_item
*
5043 trace_eval_jmp_to_tail(union trace_eval_map_item
*ptr
)
5045 /* Return tail of array given the head */
5046 return ptr
+ ptr
->head
.length
+ 1;
5050 trace_insert_eval_map_file(struct module
*mod
, struct trace_eval_map
**start
,
5053 struct trace_eval_map
**stop
;
5054 struct trace_eval_map
**map
;
5055 union trace_eval_map_item
*map_array
;
5056 union trace_eval_map_item
*ptr
;
5061 * The trace_eval_maps contains the map plus a head and tail item,
5062 * where the head holds the module and length of array, and the
5063 * tail holds a pointer to the next list.
5065 map_array
= kmalloc(sizeof(*map_array
) * (len
+ 2), GFP_KERNEL
);
5067 pr_warn("Unable to allocate trace eval mapping\n");
5071 mutex_lock(&trace_eval_mutex
);
5073 if (!trace_eval_maps
)
5074 trace_eval_maps
= map_array
;
5076 ptr
= trace_eval_maps
;
5078 ptr
= trace_eval_jmp_to_tail(ptr
);
5079 if (!ptr
->tail
.next
)
5081 ptr
= ptr
->tail
.next
;
5084 ptr
->tail
.next
= map_array
;
5086 map_array
->head
.mod
= mod
;
5087 map_array
->head
.length
= len
;
5090 for (map
= start
; (unsigned long)map
< (unsigned long)stop
; map
++) {
5091 map_array
->map
= **map
;
5094 memset(map_array
, 0, sizeof(*map_array
));
5096 mutex_unlock(&trace_eval_mutex
);
5099 static void trace_create_eval_file(struct dentry
*d_tracer
)
5101 trace_create_file("eval_map", 0444, d_tracer
,
5102 NULL
, &tracing_eval_map_fops
);
5105 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5106 static inline void trace_create_eval_file(struct dentry
*d_tracer
) { }
5107 static inline void trace_insert_eval_map_file(struct module
*mod
,
5108 struct trace_eval_map
**start
, int len
) { }
5109 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5111 static void trace_insert_eval_map(struct module
*mod
,
5112 struct trace_eval_map
**start
, int len
)
5114 struct trace_eval_map
**map
;
5121 trace_event_eval_update(map
, len
);
5123 trace_insert_eval_map_file(mod
, start
, len
);
5127 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
5128 size_t cnt
, loff_t
*ppos
)
5130 struct trace_array
*tr
= filp
->private_data
;
5131 char buf
[MAX_TRACER_SIZE
+2];
5134 mutex_lock(&trace_types_lock
);
5135 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
5136 mutex_unlock(&trace_types_lock
);
5138 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5141 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
5143 tracing_reset_online_cpus(&tr
->trace_buffer
);
5147 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
5151 for_each_tracing_cpu(cpu
)
5152 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
5155 #ifdef CONFIG_TRACER_MAX_TRACE
5156 /* resize @tr's buffer to the size of @size_tr's entries */
5157 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
5158 struct trace_buffer
*size_buf
, int cpu_id
)
5162 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
5163 for_each_tracing_cpu(cpu
) {
5164 ret
= ring_buffer_resize(trace_buf
->buffer
,
5165 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
5168 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
5169 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
5172 ret
= ring_buffer_resize(trace_buf
->buffer
,
5173 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
5175 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
5176 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
5181 #endif /* CONFIG_TRACER_MAX_TRACE */
5183 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
5184 unsigned long size
, int cpu
)
5189 * If kernel or user changes the size of the ring buffer
5190 * we use the size that was given, and we can forget about
5191 * expanding it later.
5193 ring_buffer_expanded
= true;
5195 /* May be called before buffers are initialized */
5196 if (!tr
->trace_buffer
.buffer
)
5199 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
5203 #ifdef CONFIG_TRACER_MAX_TRACE
5204 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
5205 !tr
->current_trace
->use_max_tr
)
5208 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
5210 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
5211 &tr
->trace_buffer
, cpu
);
5214 * AARGH! We are left with different
5215 * size max buffer!!!!
5216 * The max buffer is our "snapshot" buffer.
5217 * When a tracer needs a snapshot (one of the
5218 * latency tracers), it swaps the max buffer
5219 * with the saved snap shot. We succeeded to
5220 * update the size of the main buffer, but failed to
5221 * update the size of the max buffer. But when we tried
5222 * to reset the main buffer to the original size, we
5223 * failed there too. This is very unlikely to
5224 * happen, but if it does, warn and kill all
5228 tracing_disabled
= 1;
5233 if (cpu
== RING_BUFFER_ALL_CPUS
)
5234 set_buffer_entries(&tr
->max_buffer
, size
);
5236 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
5239 #endif /* CONFIG_TRACER_MAX_TRACE */
5241 if (cpu
== RING_BUFFER_ALL_CPUS
)
5242 set_buffer_entries(&tr
->trace_buffer
, size
);
5244 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
5249 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
5250 unsigned long size
, int cpu_id
)
5254 mutex_lock(&trace_types_lock
);
5256 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
5257 /* make sure, this cpu is enabled in the mask */
5258 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
5264 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
5269 mutex_unlock(&trace_types_lock
);
5276 * tracing_update_buffers - used by tracing facility to expand ring buffers
5278 * To save on memory when the tracing is never used on a system with it
5279 * configured in. The ring buffers are set to a minimum size. But once
5280 * a user starts to use the tracing facility, then they need to grow
5281 * to their default size.
5283 * This function is to be called when a tracer is about to be used.
5285 int tracing_update_buffers(void)
5289 mutex_lock(&trace_types_lock
);
5290 if (!ring_buffer_expanded
)
5291 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
5292 RING_BUFFER_ALL_CPUS
);
5293 mutex_unlock(&trace_types_lock
);
5298 struct trace_option_dentry
;
5301 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
5304 * Used to clear out the tracer before deletion of an instance.
5305 * Must have trace_types_lock held.
5307 static void tracing_set_nop(struct trace_array
*tr
)
5309 if (tr
->current_trace
== &nop_trace
)
5312 tr
->current_trace
->enabled
--;
5314 if (tr
->current_trace
->reset
)
5315 tr
->current_trace
->reset(tr
);
5317 tr
->current_trace
= &nop_trace
;
5320 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
)
5322 /* Only enable if the directory has been created already. */
5326 create_trace_option_files(tr
, t
);
5329 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
5332 #ifdef CONFIG_TRACER_MAX_TRACE
5337 mutex_lock(&trace_types_lock
);
5339 if (!ring_buffer_expanded
) {
5340 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
5341 RING_BUFFER_ALL_CPUS
);
5347 for (t
= trace_types
; t
; t
= t
->next
) {
5348 if (strcmp(t
->name
, buf
) == 0)
5355 if (t
== tr
->current_trace
)
5358 /* Some tracers are only allowed for the top level buffer */
5359 if (!trace_ok_for_array(t
, tr
)) {
5364 /* If trace pipe files are being read, we can't change the tracer */
5365 if (tr
->current_trace
->ref
) {
5370 trace_branch_disable();
5372 tr
->current_trace
->enabled
--;
5374 if (tr
->current_trace
->reset
)
5375 tr
->current_trace
->reset(tr
);
5377 /* Current trace needs to be nop_trace before synchronize_sched */
5378 tr
->current_trace
= &nop_trace
;
5380 #ifdef CONFIG_TRACER_MAX_TRACE
5381 had_max_tr
= tr
->allocated_snapshot
;
5383 if (had_max_tr
&& !t
->use_max_tr
) {
5385 * We need to make sure that the update_max_tr sees that
5386 * current_trace changed to nop_trace to keep it from
5387 * swapping the buffers after we resize it.
5388 * The update_max_tr is called from interrupts disabled
5389 * so a synchronized_sched() is sufficient.
5391 synchronize_sched();
5396 #ifdef CONFIG_TRACER_MAX_TRACE
5397 if (t
->use_max_tr
&& !had_max_tr
) {
5398 ret
= alloc_snapshot(tr
);
5405 ret
= tracer_init(t
, tr
);
5410 tr
->current_trace
= t
;
5411 tr
->current_trace
->enabled
++;
5412 trace_branch_enable(tr
);
5414 mutex_unlock(&trace_types_lock
);
5420 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
5421 size_t cnt
, loff_t
*ppos
)
5423 struct trace_array
*tr
= filp
->private_data
;
5424 char buf
[MAX_TRACER_SIZE
+1];
5431 if (cnt
> MAX_TRACER_SIZE
)
5432 cnt
= MAX_TRACER_SIZE
;
5434 if (copy_from_user(buf
, ubuf
, cnt
))
5439 /* strip ending whitespace. */
5440 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
5443 err
= tracing_set_tracer(tr
, buf
);
5453 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
5454 size_t cnt
, loff_t
*ppos
)
5459 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
5460 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
5461 if (r
> sizeof(buf
))
5463 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5467 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
5468 size_t cnt
, loff_t
*ppos
)
5473 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5483 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
5484 size_t cnt
, loff_t
*ppos
)
5486 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
5490 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
5491 size_t cnt
, loff_t
*ppos
)
5493 struct trace_array
*tr
= filp
->private_data
;
5496 mutex_lock(&trace_types_lock
);
5497 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
5501 if (tr
->current_trace
->update_thresh
) {
5502 ret
= tr
->current_trace
->update_thresh(tr
);
5509 mutex_unlock(&trace_types_lock
);
5514 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5517 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
5518 size_t cnt
, loff_t
*ppos
)
5520 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
5524 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
5525 size_t cnt
, loff_t
*ppos
)
5527 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
5532 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
5534 struct trace_array
*tr
= inode
->i_private
;
5535 struct trace_iterator
*iter
;
5538 if (tracing_disabled
)
5541 if (trace_array_get(tr
) < 0)
5544 mutex_lock(&trace_types_lock
);
5546 /* create a buffer to store the information to pass to userspace */
5547 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5550 __trace_array_put(tr
);
5554 trace_seq_init(&iter
->seq
);
5555 iter
->trace
= tr
->current_trace
;
5557 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
5562 /* trace pipe does not show start of buffer */
5563 cpumask_setall(iter
->started
);
5565 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
5566 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
5568 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5569 if (trace_clocks
[tr
->clock_id
].in_ns
)
5570 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
5573 iter
->trace_buffer
= &tr
->trace_buffer
;
5574 iter
->cpu_file
= tracing_get_cpu(inode
);
5575 mutex_init(&iter
->mutex
);
5576 filp
->private_data
= iter
;
5578 if (iter
->trace
->pipe_open
)
5579 iter
->trace
->pipe_open(iter
);
5581 nonseekable_open(inode
, filp
);
5583 tr
->current_trace
->ref
++;
5585 mutex_unlock(&trace_types_lock
);
5591 __trace_array_put(tr
);
5592 mutex_unlock(&trace_types_lock
);
5596 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
5598 struct trace_iterator
*iter
= file
->private_data
;
5599 struct trace_array
*tr
= inode
->i_private
;
5601 mutex_lock(&trace_types_lock
);
5603 tr
->current_trace
->ref
--;
5605 if (iter
->trace
->pipe_close
)
5606 iter
->trace
->pipe_close(iter
);
5608 mutex_unlock(&trace_types_lock
);
5610 free_cpumask_var(iter
->started
);
5611 mutex_destroy(&iter
->mutex
);
5614 trace_array_put(tr
);
5620 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
5622 struct trace_array
*tr
= iter
->tr
;
5624 /* Iterators are static, they should be filled or empty */
5625 if (trace_buffer_iter(iter
, iter
->cpu_file
))
5626 return POLLIN
| POLLRDNORM
;
5628 if (tr
->trace_flags
& TRACE_ITER_BLOCK
)
5630 * Always select as readable when in blocking mode
5632 return POLLIN
| POLLRDNORM
;
5634 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
5639 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
5641 struct trace_iterator
*iter
= filp
->private_data
;
5643 return trace_poll(iter
, filp
, poll_table
);
5646 /* Must be called with iter->mutex held. */
5647 static int tracing_wait_pipe(struct file
*filp
)
5649 struct trace_iterator
*iter
= filp
->private_data
;
5652 while (trace_empty(iter
)) {
5654 if ((filp
->f_flags
& O_NONBLOCK
)) {
5659 * We block until we read something and tracing is disabled.
5660 * We still block if tracing is disabled, but we have never
5661 * read anything. This allows a user to cat this file, and
5662 * then enable tracing. But after we have read something,
5663 * we give an EOF when tracing is again disabled.
5665 * iter->pos will be 0 if we haven't read anything.
5667 if (!tracing_is_on() && iter
->pos
)
5670 mutex_unlock(&iter
->mutex
);
5672 ret
= wait_on_pipe(iter
, false);
5674 mutex_lock(&iter
->mutex
);
5687 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
5688 size_t cnt
, loff_t
*ppos
)
5690 struct trace_iterator
*iter
= filp
->private_data
;
5694 * Avoid more than one consumer on a single file descriptor
5695 * This is just a matter of traces coherency, the ring buffer itself
5698 mutex_lock(&iter
->mutex
);
5700 /* return any leftover data */
5701 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5705 trace_seq_init(&iter
->seq
);
5707 if (iter
->trace
->read
) {
5708 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
5714 sret
= tracing_wait_pipe(filp
);
5718 /* stop when tracing is finished */
5719 if (trace_empty(iter
)) {
5724 if (cnt
>= PAGE_SIZE
)
5725 cnt
= PAGE_SIZE
- 1;
5727 /* reset all but tr, trace, and overruns */
5728 memset(&iter
->seq
, 0,
5729 sizeof(struct trace_iterator
) -
5730 offsetof(struct trace_iterator
, seq
));
5731 cpumask_clear(iter
->started
);
5734 trace_event_read_lock();
5735 trace_access_lock(iter
->cpu_file
);
5736 while (trace_find_next_entry_inc(iter
) != NULL
) {
5737 enum print_line_t ret
;
5738 int save_len
= iter
->seq
.seq
.len
;
5740 ret
= print_trace_line(iter
);
5741 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5742 /* don't print partial lines */
5743 iter
->seq
.seq
.len
= save_len
;
5746 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5747 trace_consume(iter
);
5749 if (trace_seq_used(&iter
->seq
) >= cnt
)
5753 * Setting the full flag means we reached the trace_seq buffer
5754 * size and we should leave by partial output condition above.
5755 * One of the trace_seq_* functions is not used properly.
5757 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
5760 trace_access_unlock(iter
->cpu_file
);
5761 trace_event_read_unlock();
5763 /* Now copy what we have to the user */
5764 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5765 if (iter
->seq
.seq
.readpos
>= trace_seq_used(&iter
->seq
))
5766 trace_seq_init(&iter
->seq
);
5769 * If there was nothing to send to user, in spite of consuming trace
5770 * entries, go back to wait for more entries.
5776 mutex_unlock(&iter
->mutex
);
5781 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
5784 __free_page(spd
->pages
[idx
]);
5787 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
5789 .confirm
= generic_pipe_buf_confirm
,
5790 .release
= generic_pipe_buf_release
,
5791 .steal
= generic_pipe_buf_steal
,
5792 .get
= generic_pipe_buf_get
,
5796 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
5802 /* Seq buffer is page-sized, exactly what we need. */
5804 save_len
= iter
->seq
.seq
.len
;
5805 ret
= print_trace_line(iter
);
5807 if (trace_seq_has_overflowed(&iter
->seq
)) {
5808 iter
->seq
.seq
.len
= save_len
;
5813 * This should not be hit, because it should only
5814 * be set if the iter->seq overflowed. But check it
5815 * anyway to be safe.
5817 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5818 iter
->seq
.seq
.len
= save_len
;
5822 count
= trace_seq_used(&iter
->seq
) - save_len
;
5825 iter
->seq
.seq
.len
= save_len
;
5829 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5830 trace_consume(iter
);
5832 if (!trace_find_next_entry_inc(iter
)) {
5842 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
5844 struct pipe_inode_info
*pipe
,
5848 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5849 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5850 struct trace_iterator
*iter
= filp
->private_data
;
5851 struct splice_pipe_desc spd
= {
5853 .partial
= partial_def
,
5854 .nr_pages
= 0, /* This gets updated below. */
5855 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5856 .ops
= &tracing_pipe_buf_ops
,
5857 .spd_release
= tracing_spd_release_pipe
,
5863 if (splice_grow_spd(pipe
, &spd
))
5866 mutex_lock(&iter
->mutex
);
5868 if (iter
->trace
->splice_read
) {
5869 ret
= iter
->trace
->splice_read(iter
, filp
,
5870 ppos
, pipe
, len
, flags
);
5875 ret
= tracing_wait_pipe(filp
);
5879 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
5884 trace_event_read_lock();
5885 trace_access_lock(iter
->cpu_file
);
5887 /* Fill as many pages as possible. */
5888 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
5889 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
5893 rem
= tracing_fill_pipe_page(rem
, iter
);
5895 /* Copy the data into the page, so we can start over. */
5896 ret
= trace_seq_to_buffer(&iter
->seq
,
5897 page_address(spd
.pages
[i
]),
5898 trace_seq_used(&iter
->seq
));
5900 __free_page(spd
.pages
[i
]);
5903 spd
.partial
[i
].offset
= 0;
5904 spd
.partial
[i
].len
= trace_seq_used(&iter
->seq
);
5906 trace_seq_init(&iter
->seq
);
5909 trace_access_unlock(iter
->cpu_file
);
5910 trace_event_read_unlock();
5911 mutex_unlock(&iter
->mutex
);
5916 ret
= splice_to_pipe(pipe
, &spd
);
5920 splice_shrink_spd(&spd
);
5924 mutex_unlock(&iter
->mutex
);
5929 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
5930 size_t cnt
, loff_t
*ppos
)
5932 struct inode
*inode
= file_inode(filp
);
5933 struct trace_array
*tr
= inode
->i_private
;
5934 int cpu
= tracing_get_cpu(inode
);
5939 mutex_lock(&trace_types_lock
);
5941 if (cpu
== RING_BUFFER_ALL_CPUS
) {
5942 int cpu
, buf_size_same
;
5947 /* check if all cpu sizes are same */
5948 for_each_tracing_cpu(cpu
) {
5949 /* fill in the size from first enabled cpu */
5951 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
5952 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
5958 if (buf_size_same
) {
5959 if (!ring_buffer_expanded
)
5960 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
5962 trace_buf_size
>> 10);
5964 r
= sprintf(buf
, "%lu\n", size
>> 10);
5966 r
= sprintf(buf
, "X\n");
5968 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
5970 mutex_unlock(&trace_types_lock
);
5972 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5977 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
5978 size_t cnt
, loff_t
*ppos
)
5980 struct inode
*inode
= file_inode(filp
);
5981 struct trace_array
*tr
= inode
->i_private
;
5985 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5989 /* must have at least 1 entry */
5993 /* value is in KB */
5995 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
6005 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
6006 size_t cnt
, loff_t
*ppos
)
6008 struct trace_array
*tr
= filp
->private_data
;
6011 unsigned long size
= 0, expanded_size
= 0;
6013 mutex_lock(&trace_types_lock
);
6014 for_each_tracing_cpu(cpu
) {
6015 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
6016 if (!ring_buffer_expanded
)
6017 expanded_size
+= trace_buf_size
>> 10;
6019 if (ring_buffer_expanded
)
6020 r
= sprintf(buf
, "%lu\n", size
);
6022 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
6023 mutex_unlock(&trace_types_lock
);
6025 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6029 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
6030 size_t cnt
, loff_t
*ppos
)
6033 * There is no need to read what the user has written, this function
6034 * is just to make sure that there is no error when "echo" is used
6043 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
6045 struct trace_array
*tr
= inode
->i_private
;
6047 /* disable tracing ? */
6048 if (tr
->trace_flags
& TRACE_ITER_STOP_ON_FREE
)
6049 tracer_tracing_off(tr
);
6050 /* resize the ring buffer to 0 */
6051 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
6053 trace_array_put(tr
);
6059 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
6060 size_t cnt
, loff_t
*fpos
)
6062 struct trace_array
*tr
= filp
->private_data
;
6063 struct ring_buffer_event
*event
;
6064 struct ring_buffer
*buffer
;
6065 struct print_entry
*entry
;
6066 unsigned long irq_flags
;
6067 const char faulted
[] = "<faulted>";
6072 /* Used in tracing_mark_raw_write() as well */
6073 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6075 if (tracing_disabled
)
6078 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6081 if (cnt
> TRACE_BUF_SIZE
)
6082 cnt
= TRACE_BUF_SIZE
;
6084 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6086 local_save_flags(irq_flags
);
6087 size
= sizeof(*entry
) + cnt
+ 2; /* add '\0' and possible '\n' */
6089 /* If less than "<faulted>", then make sure we can still add that */
6090 if (cnt
< FAULTED_SIZE
)
6091 size
+= FAULTED_SIZE
- cnt
;
6093 buffer
= tr
->trace_buffer
.buffer
;
6094 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
6095 irq_flags
, preempt_count());
6096 if (unlikely(!event
))
6097 /* Ring buffer disabled, return as if not open for write */
6100 entry
= ring_buffer_event_data(event
);
6101 entry
->ip
= _THIS_IP_
;
6103 len
= __copy_from_user_inatomic(&entry
->buf
, ubuf
, cnt
);
6105 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6112 if (entry
->buf
[cnt
- 1] != '\n') {
6113 entry
->buf
[cnt
] = '\n';
6114 entry
->buf
[cnt
+ 1] = '\0';
6116 entry
->buf
[cnt
] = '\0';
6118 __buffer_unlock_commit(buffer
, event
);
6126 /* Limit it for now to 3K (including tag) */
6127 #define RAW_DATA_MAX_SIZE (1024*3)
6130 tracing_mark_raw_write(struct file
*filp
, const char __user
*ubuf
,
6131 size_t cnt
, loff_t
*fpos
)
6133 struct trace_array
*tr
= filp
->private_data
;
6134 struct ring_buffer_event
*event
;
6135 struct ring_buffer
*buffer
;
6136 struct raw_data_entry
*entry
;
6137 const char faulted
[] = "<faulted>";
6138 unsigned long irq_flags
;
6143 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6145 if (tracing_disabled
)
6148 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6151 /* The marker must at least have a tag id */
6152 if (cnt
< sizeof(unsigned int) || cnt
> RAW_DATA_MAX_SIZE
)
6155 if (cnt
> TRACE_BUF_SIZE
)
6156 cnt
= TRACE_BUF_SIZE
;
6158 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6160 local_save_flags(irq_flags
);
6161 size
= sizeof(*entry
) + cnt
;
6162 if (cnt
< FAULT_SIZE_ID
)
6163 size
+= FAULT_SIZE_ID
- cnt
;
6165 buffer
= tr
->trace_buffer
.buffer
;
6166 event
= __trace_buffer_lock_reserve(buffer
, TRACE_RAW_DATA
, size
,
6167 irq_flags
, preempt_count());
6169 /* Ring buffer disabled, return as if not open for write */
6172 entry
= ring_buffer_event_data(event
);
6174 len
= __copy_from_user_inatomic(&entry
->id
, ubuf
, cnt
);
6177 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6182 __buffer_unlock_commit(buffer
, event
);
6190 static int tracing_clock_show(struct seq_file
*m
, void *v
)
6192 struct trace_array
*tr
= m
->private;
6195 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
6197 "%s%s%s%s", i
? " " : "",
6198 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
6199 i
== tr
->clock_id
? "]" : "");
6205 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
6209 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
6210 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
6213 if (i
== ARRAY_SIZE(trace_clocks
))
6216 mutex_lock(&trace_types_lock
);
6220 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
6223 * New clock may not be consistent with the previous clock.
6224 * Reset the buffer so that it doesn't have incomparable timestamps.
6226 tracing_reset_online_cpus(&tr
->trace_buffer
);
6228 #ifdef CONFIG_TRACER_MAX_TRACE
6229 if (tr
->max_buffer
.buffer
)
6230 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
6231 tracing_reset_online_cpus(&tr
->max_buffer
);
6234 mutex_unlock(&trace_types_lock
);
6239 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
6240 size_t cnt
, loff_t
*fpos
)
6242 struct seq_file
*m
= filp
->private_data
;
6243 struct trace_array
*tr
= m
->private;
6245 const char *clockstr
;
6248 if (cnt
>= sizeof(buf
))
6251 if (copy_from_user(buf
, ubuf
, cnt
))
6256 clockstr
= strstrip(buf
);
6258 ret
= tracing_set_clock(tr
, clockstr
);
6267 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
6269 struct trace_array
*tr
= inode
->i_private
;
6272 if (tracing_disabled
)
6275 if (trace_array_get(tr
))
6278 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
6280 trace_array_put(tr
);
6285 struct ftrace_buffer_info
{
6286 struct trace_iterator iter
;
6288 unsigned int spare_cpu
;
6292 #ifdef CONFIG_TRACER_SNAPSHOT
6293 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
6295 struct trace_array
*tr
= inode
->i_private
;
6296 struct trace_iterator
*iter
;
6300 if (trace_array_get(tr
) < 0)
6303 if (file
->f_mode
& FMODE_READ
) {
6304 iter
= __tracing_open(inode
, file
, true);
6306 ret
= PTR_ERR(iter
);
6308 /* Writes still need the seq_file to hold the private data */
6310 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
6313 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
6321 iter
->trace_buffer
= &tr
->max_buffer
;
6322 iter
->cpu_file
= tracing_get_cpu(inode
);
6324 file
->private_data
= m
;
6328 trace_array_put(tr
);
6334 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6337 struct seq_file
*m
= filp
->private_data
;
6338 struct trace_iterator
*iter
= m
->private;
6339 struct trace_array
*tr
= iter
->tr
;
6343 ret
= tracing_update_buffers();
6347 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6351 mutex_lock(&trace_types_lock
);
6353 if (tr
->current_trace
->use_max_tr
) {
6360 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6364 if (tr
->allocated_snapshot
)
6368 /* Only allow per-cpu swap if the ring buffer supports it */
6369 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6370 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6375 if (!tr
->allocated_snapshot
) {
6376 ret
= alloc_snapshot(tr
);
6380 local_irq_disable();
6381 /* Now, we're going to swap */
6382 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6383 update_max_tr(tr
, current
, smp_processor_id());
6385 update_max_tr_single(tr
, current
, iter
->cpu_file
);
6389 if (tr
->allocated_snapshot
) {
6390 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6391 tracing_reset_online_cpus(&tr
->max_buffer
);
6393 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
6403 mutex_unlock(&trace_types_lock
);
6407 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
6409 struct seq_file
*m
= file
->private_data
;
6412 ret
= tracing_release(inode
, file
);
6414 if (file
->f_mode
& FMODE_READ
)
6417 /* If write only, the seq_file is just a stub */
6425 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
6426 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6427 size_t count
, loff_t
*ppos
);
6428 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
6429 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6430 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
6432 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
6434 struct ftrace_buffer_info
*info
;
6437 ret
= tracing_buffers_open(inode
, filp
);
6441 info
= filp
->private_data
;
6443 if (info
->iter
.trace
->use_max_tr
) {
6444 tracing_buffers_release(inode
, filp
);
6448 info
->iter
.snapshot
= true;
6449 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
6454 #endif /* CONFIG_TRACER_SNAPSHOT */
6457 static const struct file_operations tracing_thresh_fops
= {
6458 .open
= tracing_open_generic
,
6459 .read
= tracing_thresh_read
,
6460 .write
= tracing_thresh_write
,
6461 .llseek
= generic_file_llseek
,
6464 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6465 static const struct file_operations tracing_max_lat_fops
= {
6466 .open
= tracing_open_generic
,
6467 .read
= tracing_max_lat_read
,
6468 .write
= tracing_max_lat_write
,
6469 .llseek
= generic_file_llseek
,
6473 static const struct file_operations set_tracer_fops
= {
6474 .open
= tracing_open_generic
,
6475 .read
= tracing_set_trace_read
,
6476 .write
= tracing_set_trace_write
,
6477 .llseek
= generic_file_llseek
,
6480 static const struct file_operations tracing_pipe_fops
= {
6481 .open
= tracing_open_pipe
,
6482 .poll
= tracing_poll_pipe
,
6483 .read
= tracing_read_pipe
,
6484 .splice_read
= tracing_splice_read_pipe
,
6485 .release
= tracing_release_pipe
,
6486 .llseek
= no_llseek
,
6489 static const struct file_operations tracing_entries_fops
= {
6490 .open
= tracing_open_generic_tr
,
6491 .read
= tracing_entries_read
,
6492 .write
= tracing_entries_write
,
6493 .llseek
= generic_file_llseek
,
6494 .release
= tracing_release_generic_tr
,
6497 static const struct file_operations tracing_total_entries_fops
= {
6498 .open
= tracing_open_generic_tr
,
6499 .read
= tracing_total_entries_read
,
6500 .llseek
= generic_file_llseek
,
6501 .release
= tracing_release_generic_tr
,
6504 static const struct file_operations tracing_free_buffer_fops
= {
6505 .open
= tracing_open_generic_tr
,
6506 .write
= tracing_free_buffer_write
,
6507 .release
= tracing_free_buffer_release
,
6510 static const struct file_operations tracing_mark_fops
= {
6511 .open
= tracing_open_generic_tr
,
6512 .write
= tracing_mark_write
,
6513 .llseek
= generic_file_llseek
,
6514 .release
= tracing_release_generic_tr
,
6517 static const struct file_operations tracing_mark_raw_fops
= {
6518 .open
= tracing_open_generic_tr
,
6519 .write
= tracing_mark_raw_write
,
6520 .llseek
= generic_file_llseek
,
6521 .release
= tracing_release_generic_tr
,
6524 static const struct file_operations trace_clock_fops
= {
6525 .open
= tracing_clock_open
,
6527 .llseek
= seq_lseek
,
6528 .release
= tracing_single_release_tr
,
6529 .write
= tracing_clock_write
,
6532 #ifdef CONFIG_TRACER_SNAPSHOT
6533 static const struct file_operations snapshot_fops
= {
6534 .open
= tracing_snapshot_open
,
6536 .write
= tracing_snapshot_write
,
6537 .llseek
= tracing_lseek
,
6538 .release
= tracing_snapshot_release
,
6541 static const struct file_operations snapshot_raw_fops
= {
6542 .open
= snapshot_raw_open
,
6543 .read
= tracing_buffers_read
,
6544 .release
= tracing_buffers_release
,
6545 .splice_read
= tracing_buffers_splice_read
,
6546 .llseek
= no_llseek
,
6549 #endif /* CONFIG_TRACER_SNAPSHOT */
6551 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
6553 struct trace_array
*tr
= inode
->i_private
;
6554 struct ftrace_buffer_info
*info
;
6557 if (tracing_disabled
)
6560 if (trace_array_get(tr
) < 0)
6563 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
6565 trace_array_put(tr
);
6569 mutex_lock(&trace_types_lock
);
6572 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
6573 info
->iter
.trace
= tr
->current_trace
;
6574 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
6576 /* Force reading ring buffer for first read */
6577 info
->read
= (unsigned int)-1;
6579 filp
->private_data
= info
;
6581 tr
->current_trace
->ref
++;
6583 mutex_unlock(&trace_types_lock
);
6585 ret
= nonseekable_open(inode
, filp
);
6587 trace_array_put(tr
);
6593 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
6595 struct ftrace_buffer_info
*info
= filp
->private_data
;
6596 struct trace_iterator
*iter
= &info
->iter
;
6598 return trace_poll(iter
, filp
, poll_table
);
6602 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6603 size_t count
, loff_t
*ppos
)
6605 struct ftrace_buffer_info
*info
= filp
->private_data
;
6606 struct trace_iterator
*iter
= &info
->iter
;
6613 #ifdef CONFIG_TRACER_MAX_TRACE
6614 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6619 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
6621 if (IS_ERR(info
->spare
)) {
6622 ret
= PTR_ERR(info
->spare
);
6625 info
->spare_cpu
= iter
->cpu_file
;
6631 /* Do we have previous read data to read? */
6632 if (info
->read
< PAGE_SIZE
)
6636 trace_access_lock(iter
->cpu_file
);
6637 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
6641 trace_access_unlock(iter
->cpu_file
);
6644 if (trace_empty(iter
)) {
6645 if ((filp
->f_flags
& O_NONBLOCK
))
6648 ret
= wait_on_pipe(iter
, false);
6659 size
= PAGE_SIZE
- info
->read
;
6663 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
6675 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
6677 struct ftrace_buffer_info
*info
= file
->private_data
;
6678 struct trace_iterator
*iter
= &info
->iter
;
6680 mutex_lock(&trace_types_lock
);
6682 iter
->tr
->current_trace
->ref
--;
6684 __trace_array_put(iter
->tr
);
6687 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
,
6688 info
->spare_cpu
, info
->spare
);
6691 mutex_unlock(&trace_types_lock
);
6697 struct ring_buffer
*buffer
;
6703 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
6704 struct pipe_buffer
*buf
)
6706 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6711 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6716 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
6717 struct pipe_buffer
*buf
)
6719 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6724 /* Pipe buffer operations for a buffer. */
6725 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
6727 .confirm
= generic_pipe_buf_confirm
,
6728 .release
= buffer_pipe_buf_release
,
6729 .steal
= generic_pipe_buf_steal
,
6730 .get
= buffer_pipe_buf_get
,
6734 * Callback from splice_to_pipe(), if we need to release some pages
6735 * at the end of the spd in case we error'ed out in filling the pipe.
6737 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
6739 struct buffer_ref
*ref
=
6740 (struct buffer_ref
*)spd
->partial
[i
].private;
6745 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6747 spd
->partial
[i
].private = 0;
6751 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6752 struct pipe_inode_info
*pipe
, size_t len
,
6755 struct ftrace_buffer_info
*info
= file
->private_data
;
6756 struct trace_iterator
*iter
= &info
->iter
;
6757 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
6758 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
6759 struct splice_pipe_desc spd
= {
6761 .partial
= partial_def
,
6762 .nr_pages_max
= PIPE_DEF_BUFFERS
,
6763 .ops
= &buffer_pipe_buf_ops
,
6764 .spd_release
= buffer_spd_release
,
6766 struct buffer_ref
*ref
;
6767 int entries
, size
, i
;
6770 #ifdef CONFIG_TRACER_MAX_TRACE
6771 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6775 if (*ppos
& (PAGE_SIZE
- 1))
6778 if (len
& (PAGE_SIZE
- 1)) {
6779 if (len
< PAGE_SIZE
)
6784 if (splice_grow_spd(pipe
, &spd
))
6788 trace_access_lock(iter
->cpu_file
);
6789 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6791 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
6795 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
6802 ref
->buffer
= iter
->trace_buffer
->buffer
;
6803 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
6804 if (IS_ERR(ref
->page
)) {
6805 ret
= PTR_ERR(ref
->page
);
6810 ref
->cpu
= iter
->cpu_file
;
6812 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
6813 len
, iter
->cpu_file
, 1);
6815 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
,
6822 * zero out any left over data, this is going to
6825 size
= ring_buffer_page_len(ref
->page
);
6826 if (size
< PAGE_SIZE
)
6827 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
6829 page
= virt_to_page(ref
->page
);
6831 spd
.pages
[i
] = page
;
6832 spd
.partial
[i
].len
= PAGE_SIZE
;
6833 spd
.partial
[i
].offset
= 0;
6834 spd
.partial
[i
].private = (unsigned long)ref
;
6838 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6841 trace_access_unlock(iter
->cpu_file
);
6844 /* did we read anything? */
6845 if (!spd
.nr_pages
) {
6850 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
))
6853 ret
= wait_on_pipe(iter
, true);
6860 ret
= splice_to_pipe(pipe
, &spd
);
6862 splice_shrink_spd(&spd
);
6867 static const struct file_operations tracing_buffers_fops
= {
6868 .open
= tracing_buffers_open
,
6869 .read
= tracing_buffers_read
,
6870 .poll
= tracing_buffers_poll
,
6871 .release
= tracing_buffers_release
,
6872 .splice_read
= tracing_buffers_splice_read
,
6873 .llseek
= no_llseek
,
6877 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
6878 size_t count
, loff_t
*ppos
)
6880 struct inode
*inode
= file_inode(filp
);
6881 struct trace_array
*tr
= inode
->i_private
;
6882 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
6883 int cpu
= tracing_get_cpu(inode
);
6884 struct trace_seq
*s
;
6886 unsigned long long t
;
6887 unsigned long usec_rem
;
6889 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
6895 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
6896 trace_seq_printf(s
, "entries: %ld\n", cnt
);
6898 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
6899 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
6901 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
6902 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
6904 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
6905 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
6907 if (trace_clocks
[tr
->clock_id
].in_ns
) {
6908 /* local or global for trace_clock */
6909 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6910 usec_rem
= do_div(t
, USEC_PER_SEC
);
6911 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
6914 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6915 usec_rem
= do_div(t
, USEC_PER_SEC
);
6916 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
6918 /* counter or tsc mode for trace_clock */
6919 trace_seq_printf(s
, "oldest event ts: %llu\n",
6920 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6922 trace_seq_printf(s
, "now ts: %llu\n",
6923 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6926 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
6927 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
6929 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
6930 trace_seq_printf(s
, "read events: %ld\n", cnt
);
6932 count
= simple_read_from_buffer(ubuf
, count
, ppos
,
6933 s
->buffer
, trace_seq_used(s
));
6940 static const struct file_operations tracing_stats_fops
= {
6941 .open
= tracing_open_generic_tr
,
6942 .read
= tracing_stats_read
,
6943 .llseek
= generic_file_llseek
,
6944 .release
= tracing_release_generic_tr
,
6947 #ifdef CONFIG_DYNAMIC_FTRACE
6950 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
6951 size_t cnt
, loff_t
*ppos
)
6953 unsigned long *p
= filp
->private_data
;
6954 char buf
[64]; /* Not too big for a shallow stack */
6957 r
= scnprintf(buf
, 63, "%ld", *p
);
6960 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6963 static const struct file_operations tracing_dyn_info_fops
= {
6964 .open
= tracing_open_generic
,
6965 .read
= tracing_read_dyn_info
,
6966 .llseek
= generic_file_llseek
,
6968 #endif /* CONFIG_DYNAMIC_FTRACE */
6970 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6972 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
,
6973 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
6976 tracing_snapshot_instance(tr
);
6980 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
,
6981 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
6984 struct ftrace_func_mapper
*mapper
= data
;
6988 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
6998 tracing_snapshot_instance(tr
);
7002 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
7003 struct ftrace_probe_ops
*ops
, void *data
)
7005 struct ftrace_func_mapper
*mapper
= data
;
7008 seq_printf(m
, "%ps:", (void *)ip
);
7010 seq_puts(m
, "snapshot");
7013 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7016 seq_printf(m
, ":count=%ld\n", *count
);
7018 seq_puts(m
, ":unlimited\n");
7024 ftrace_snapshot_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7025 unsigned long ip
, void *init_data
, void **data
)
7027 struct ftrace_func_mapper
*mapper
= *data
;
7030 mapper
= allocate_ftrace_func_mapper();
7036 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
7040 ftrace_snapshot_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7041 unsigned long ip
, void *data
)
7043 struct ftrace_func_mapper
*mapper
= data
;
7048 free_ftrace_func_mapper(mapper
, NULL
);
7052 ftrace_func_mapper_remove_ip(mapper
, ip
);
7055 static struct ftrace_probe_ops snapshot_probe_ops
= {
7056 .func
= ftrace_snapshot
,
7057 .print
= ftrace_snapshot_print
,
7060 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
7061 .func
= ftrace_count_snapshot
,
7062 .print
= ftrace_snapshot_print
,
7063 .init
= ftrace_snapshot_init
,
7064 .free
= ftrace_snapshot_free
,
7068 ftrace_trace_snapshot_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
7069 char *glob
, char *cmd
, char *param
, int enable
)
7071 struct ftrace_probe_ops
*ops
;
7072 void *count
= (void *)-1;
7079 /* hash funcs only work with set_ftrace_filter */
7083 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
7086 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
7091 number
= strsep(¶m
, ":");
7093 if (!strlen(number
))
7097 * We use the callback data field (which is a pointer)
7100 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
7105 ret
= alloc_snapshot(tr
);
7109 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
7112 return ret
< 0 ? ret
: 0;
7115 static struct ftrace_func_command ftrace_snapshot_cmd
= {
7117 .func
= ftrace_trace_snapshot_callback
,
7120 static __init
int register_snapshot_cmd(void)
7122 return register_ftrace_command(&ftrace_snapshot_cmd
);
7125 static inline __init
int register_snapshot_cmd(void) { return 0; }
7126 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7128 static struct dentry
*tracing_get_dentry(struct trace_array
*tr
)
7130 if (WARN_ON(!tr
->dir
))
7131 return ERR_PTR(-ENODEV
);
7133 /* Top directory uses NULL as the parent */
7134 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
7137 /* All sub buffers have a descriptor */
7141 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
7143 struct dentry
*d_tracer
;
7146 return tr
->percpu_dir
;
7148 d_tracer
= tracing_get_dentry(tr
);
7149 if (IS_ERR(d_tracer
))
7152 tr
->percpu_dir
= tracefs_create_dir("per_cpu", d_tracer
);
7154 WARN_ONCE(!tr
->percpu_dir
,
7155 "Could not create tracefs directory 'per_cpu/%d'\n", cpu
);
7157 return tr
->percpu_dir
;
7160 static struct dentry
*
7161 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
7162 void *data
, long cpu
, const struct file_operations
*fops
)
7164 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
7166 if (ret
) /* See tracing_get_cpu() */
7167 d_inode(ret
)->i_cdev
= (void *)(cpu
+ 1);
7172 tracing_init_tracefs_percpu(struct trace_array
*tr
, long cpu
)
7174 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
7175 struct dentry
*d_cpu
;
7176 char cpu_dir
[30]; /* 30 characters should be more than enough */
7181 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
7182 d_cpu
= tracefs_create_dir(cpu_dir
, d_percpu
);
7184 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir
);
7188 /* per cpu trace_pipe */
7189 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
7190 tr
, cpu
, &tracing_pipe_fops
);
7193 trace_create_cpu_file("trace", 0644, d_cpu
,
7194 tr
, cpu
, &tracing_fops
);
7196 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
7197 tr
, cpu
, &tracing_buffers_fops
);
7199 trace_create_cpu_file("stats", 0444, d_cpu
,
7200 tr
, cpu
, &tracing_stats_fops
);
7202 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
7203 tr
, cpu
, &tracing_entries_fops
);
7205 #ifdef CONFIG_TRACER_SNAPSHOT
7206 trace_create_cpu_file("snapshot", 0644, d_cpu
,
7207 tr
, cpu
, &snapshot_fops
);
7209 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
7210 tr
, cpu
, &snapshot_raw_fops
);
7214 #ifdef CONFIG_FTRACE_SELFTEST
7215 /* Let selftest have access to static functions in this file */
7216 #include "trace_selftest.c"
7220 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7223 struct trace_option_dentry
*topt
= filp
->private_data
;
7226 if (topt
->flags
->val
& topt
->opt
->bit
)
7231 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7235 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7238 struct trace_option_dentry
*topt
= filp
->private_data
;
7242 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7246 if (val
!= 0 && val
!= 1)
7249 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
7250 mutex_lock(&trace_types_lock
);
7251 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
7253 mutex_unlock(&trace_types_lock
);
7264 static const struct file_operations trace_options_fops
= {
7265 .open
= tracing_open_generic
,
7266 .read
= trace_options_read
,
7267 .write
= trace_options_write
,
7268 .llseek
= generic_file_llseek
,
7272 * In order to pass in both the trace_array descriptor as well as the index
7273 * to the flag that the trace option file represents, the trace_array
7274 * has a character array of trace_flags_index[], which holds the index
7275 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7276 * The address of this character array is passed to the flag option file
7277 * read/write callbacks.
7279 * In order to extract both the index and the trace_array descriptor,
7280 * get_tr_index() uses the following algorithm.
7284 * As the pointer itself contains the address of the index (remember
7287 * Then to get the trace_array descriptor, by subtracting that index
7288 * from the ptr, we get to the start of the index itself.
7290 * ptr - idx == &index[0]
7292 * Then a simple container_of() from that pointer gets us to the
7293 * trace_array descriptor.
7295 static void get_tr_index(void *data
, struct trace_array
**ptr
,
7296 unsigned int *pindex
)
7298 *pindex
= *(unsigned char *)data
;
7300 *ptr
= container_of(data
- *pindex
, struct trace_array
,
7305 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7308 void *tr_index
= filp
->private_data
;
7309 struct trace_array
*tr
;
7313 get_tr_index(tr_index
, &tr
, &index
);
7315 if (tr
->trace_flags
& (1 << index
))
7320 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7324 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7327 void *tr_index
= filp
->private_data
;
7328 struct trace_array
*tr
;
7333 get_tr_index(tr_index
, &tr
, &index
);
7335 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7339 if (val
!= 0 && val
!= 1)
7342 mutex_lock(&trace_types_lock
);
7343 ret
= set_tracer_flag(tr
, 1 << index
, val
);
7344 mutex_unlock(&trace_types_lock
);
7354 static const struct file_operations trace_options_core_fops
= {
7355 .open
= tracing_open_generic
,
7356 .read
= trace_options_core_read
,
7357 .write
= trace_options_core_write
,
7358 .llseek
= generic_file_llseek
,
7361 struct dentry
*trace_create_file(const char *name
,
7363 struct dentry
*parent
,
7365 const struct file_operations
*fops
)
7369 ret
= tracefs_create_file(name
, mode
, parent
, data
, fops
);
7371 pr_warn("Could not create tracefs '%s' entry\n", name
);
7377 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
7379 struct dentry
*d_tracer
;
7384 d_tracer
= tracing_get_dentry(tr
);
7385 if (IS_ERR(d_tracer
))
7388 tr
->options
= tracefs_create_dir("options", d_tracer
);
7390 pr_warn("Could not create tracefs directory 'options'\n");
7398 create_trace_option_file(struct trace_array
*tr
,
7399 struct trace_option_dentry
*topt
,
7400 struct tracer_flags
*flags
,
7401 struct tracer_opt
*opt
)
7403 struct dentry
*t_options
;
7405 t_options
= trace_options_init_dentry(tr
);
7409 topt
->flags
= flags
;
7413 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
7414 &trace_options_fops
);
7419 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
7421 struct trace_option_dentry
*topts
;
7422 struct trace_options
*tr_topts
;
7423 struct tracer_flags
*flags
;
7424 struct tracer_opt
*opts
;
7431 flags
= tracer
->flags
;
7433 if (!flags
|| !flags
->opts
)
7437 * If this is an instance, only create flags for tracers
7438 * the instance may have.
7440 if (!trace_ok_for_array(tracer
, tr
))
7443 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7444 /* Make sure there's no duplicate flags. */
7445 if (WARN_ON_ONCE(tr
->topts
[i
].tracer
->flags
== tracer
->flags
))
7451 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
7454 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
7458 tr_topts
= krealloc(tr
->topts
, sizeof(*tr
->topts
) * (tr
->nr_topts
+ 1),
7465 tr
->topts
= tr_topts
;
7466 tr
->topts
[tr
->nr_topts
].tracer
= tracer
;
7467 tr
->topts
[tr
->nr_topts
].topts
= topts
;
7470 for (cnt
= 0; opts
[cnt
].name
; cnt
++) {
7471 create_trace_option_file(tr
, &topts
[cnt
], flags
,
7473 WARN_ONCE(topts
[cnt
].entry
== NULL
,
7474 "Failed to create trace option: %s",
7479 static struct dentry
*
7480 create_trace_option_core_file(struct trace_array
*tr
,
7481 const char *option
, long index
)
7483 struct dentry
*t_options
;
7485 t_options
= trace_options_init_dentry(tr
);
7489 return trace_create_file(option
, 0644, t_options
,
7490 (void *)&tr
->trace_flags_index
[index
],
7491 &trace_options_core_fops
);
7494 static void create_trace_options_dir(struct trace_array
*tr
)
7496 struct dentry
*t_options
;
7497 bool top_level
= tr
== &global_trace
;
7500 t_options
= trace_options_init_dentry(tr
);
7504 for (i
= 0; trace_options
[i
]; i
++) {
7506 !((1 << i
) & TOP_LEVEL_TRACE_FLAGS
))
7507 create_trace_option_core_file(tr
, trace_options
[i
], i
);
7512 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
7513 size_t cnt
, loff_t
*ppos
)
7515 struct trace_array
*tr
= filp
->private_data
;
7519 r
= tracer_tracing_is_on(tr
);
7520 r
= sprintf(buf
, "%d\n", r
);
7522 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7526 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
7527 size_t cnt
, loff_t
*ppos
)
7529 struct trace_array
*tr
= filp
->private_data
;
7530 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
7534 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7539 mutex_lock(&trace_types_lock
);
7541 tracer_tracing_on(tr
);
7542 if (tr
->current_trace
->start
)
7543 tr
->current_trace
->start(tr
);
7545 tracer_tracing_off(tr
);
7546 if (tr
->current_trace
->stop
)
7547 tr
->current_trace
->stop(tr
);
7549 mutex_unlock(&trace_types_lock
);
7557 static const struct file_operations rb_simple_fops
= {
7558 .open
= tracing_open_generic_tr
,
7559 .read
= rb_simple_read
,
7560 .write
= rb_simple_write
,
7561 .release
= tracing_release_generic_tr
,
7562 .llseek
= default_llseek
,
7565 struct dentry
*trace_instance_dir
;
7568 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
7571 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
7573 enum ring_buffer_flags rb_flags
;
7575 rb_flags
= tr
->trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
7579 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
7583 buf
->data
= alloc_percpu(struct trace_array_cpu
);
7585 ring_buffer_free(buf
->buffer
);
7589 /* Allocate the first page for all buffers */
7590 set_buffer_entries(&tr
->trace_buffer
,
7591 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
7596 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
7600 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
7604 #ifdef CONFIG_TRACER_MAX_TRACE
7605 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
7606 allocate_snapshot
? size
: 1);
7608 ring_buffer_free(tr
->trace_buffer
.buffer
);
7609 free_percpu(tr
->trace_buffer
.data
);
7612 tr
->allocated_snapshot
= allocate_snapshot
;
7615 * Only the top level trace array gets its snapshot allocated
7616 * from the kernel command line.
7618 allocate_snapshot
= false;
7623 static void free_trace_buffer(struct trace_buffer
*buf
)
7626 ring_buffer_free(buf
->buffer
);
7628 free_percpu(buf
->data
);
7633 static void free_trace_buffers(struct trace_array
*tr
)
7638 free_trace_buffer(&tr
->trace_buffer
);
7640 #ifdef CONFIG_TRACER_MAX_TRACE
7641 free_trace_buffer(&tr
->max_buffer
);
7645 static void init_trace_flags_index(struct trace_array
*tr
)
7649 /* Used by the trace options files */
7650 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++)
7651 tr
->trace_flags_index
[i
] = i
;
7654 static void __update_tracer_options(struct trace_array
*tr
)
7658 for (t
= trace_types
; t
; t
= t
->next
)
7659 add_tracer_options(tr
, t
);
7662 static void update_tracer_options(struct trace_array
*tr
)
7664 mutex_lock(&trace_types_lock
);
7665 __update_tracer_options(tr
);
7666 mutex_unlock(&trace_types_lock
);
7669 static int instance_mkdir(const char *name
)
7671 struct trace_array
*tr
;
7674 mutex_lock(&trace_types_lock
);
7677 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7678 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
7683 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
7687 tr
->name
= kstrdup(name
, GFP_KERNEL
);
7691 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
7694 tr
->trace_flags
= global_trace
.trace_flags
& ~ZEROED_TRACE_FLAGS
;
7696 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
7698 raw_spin_lock_init(&tr
->start_lock
);
7700 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
7702 tr
->current_trace
= &nop_trace
;
7704 INIT_LIST_HEAD(&tr
->systems
);
7705 INIT_LIST_HEAD(&tr
->events
);
7707 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
7710 tr
->dir
= tracefs_create_dir(name
, trace_instance_dir
);
7714 ret
= event_trace_add_tracer(tr
->dir
, tr
);
7716 tracefs_remove_recursive(tr
->dir
);
7720 ftrace_init_trace_array(tr
);
7722 init_tracer_tracefs(tr
, tr
->dir
);
7723 init_trace_flags_index(tr
);
7724 __update_tracer_options(tr
);
7726 list_add(&tr
->list
, &ftrace_trace_arrays
);
7728 mutex_unlock(&trace_types_lock
);
7733 free_trace_buffers(tr
);
7734 free_cpumask_var(tr
->tracing_cpumask
);
7739 mutex_unlock(&trace_types_lock
);
7745 static int instance_rmdir(const char *name
)
7747 struct trace_array
*tr
;
7752 mutex_lock(&trace_types_lock
);
7755 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7756 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
7765 if (tr
->ref
|| (tr
->current_trace
&& tr
->current_trace
->ref
))
7768 list_del(&tr
->list
);
7770 /* Disable all the flags that were enabled coming in */
7771 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++) {
7772 if ((1 << i
) & ZEROED_TRACE_FLAGS
)
7773 set_tracer_flag(tr
, 1 << i
, 0);
7776 tracing_set_nop(tr
);
7777 clear_ftrace_function_probes(tr
);
7778 event_trace_del_tracer(tr
);
7779 ftrace_clear_pids(tr
);
7780 ftrace_destroy_function_files(tr
);
7781 tracefs_remove_recursive(tr
->dir
);
7782 free_trace_buffers(tr
);
7784 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7785 kfree(tr
->topts
[i
].topts
);
7789 free_cpumask_var(tr
->tracing_cpumask
);
7796 mutex_unlock(&trace_types_lock
);
7801 static __init
void create_trace_instances(struct dentry
*d_tracer
)
7803 trace_instance_dir
= tracefs_create_instance_dir("instances", d_tracer
,
7806 if (WARN_ON(!trace_instance_dir
))
7811 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
7815 trace_create_file("available_tracers", 0444, d_tracer
,
7816 tr
, &show_traces_fops
);
7818 trace_create_file("current_tracer", 0644, d_tracer
,
7819 tr
, &set_tracer_fops
);
7821 trace_create_file("tracing_cpumask", 0644, d_tracer
,
7822 tr
, &tracing_cpumask_fops
);
7824 trace_create_file("trace_options", 0644, d_tracer
,
7825 tr
, &tracing_iter_fops
);
7827 trace_create_file("trace", 0644, d_tracer
,
7830 trace_create_file("trace_pipe", 0444, d_tracer
,
7831 tr
, &tracing_pipe_fops
);
7833 trace_create_file("buffer_size_kb", 0644, d_tracer
,
7834 tr
, &tracing_entries_fops
);
7836 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
7837 tr
, &tracing_total_entries_fops
);
7839 trace_create_file("free_buffer", 0200, d_tracer
,
7840 tr
, &tracing_free_buffer_fops
);
7842 trace_create_file("trace_marker", 0220, d_tracer
,
7843 tr
, &tracing_mark_fops
);
7845 trace_create_file("trace_marker_raw", 0220, d_tracer
,
7846 tr
, &tracing_mark_raw_fops
);
7848 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
7851 trace_create_file("tracing_on", 0644, d_tracer
,
7852 tr
, &rb_simple_fops
);
7854 create_trace_options_dir(tr
);
7856 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7857 trace_create_file("tracing_max_latency", 0644, d_tracer
,
7858 &tr
->max_latency
, &tracing_max_lat_fops
);
7861 if (ftrace_create_function_files(tr
, d_tracer
))
7862 WARN(1, "Could not allocate function filter files");
7864 #ifdef CONFIG_TRACER_SNAPSHOT
7865 trace_create_file("snapshot", 0644, d_tracer
,
7866 tr
, &snapshot_fops
);
7869 for_each_tracing_cpu(cpu
)
7870 tracing_init_tracefs_percpu(tr
, cpu
);
7872 ftrace_init_tracefs(tr
, d_tracer
);
7875 static struct vfsmount
*trace_automount(struct dentry
*mntpt
, void *ingore
)
7877 struct vfsmount
*mnt
;
7878 struct file_system_type
*type
;
7881 * To maintain backward compatibility for tools that mount
7882 * debugfs to get to the tracing facility, tracefs is automatically
7883 * mounted to the debugfs/tracing directory.
7885 type
= get_fs_type("tracefs");
7888 mnt
= vfs_submount(mntpt
, type
, "tracefs", NULL
);
7889 put_filesystem(type
);
7898 * tracing_init_dentry - initialize top level trace array
7900 * This is called when creating files or directories in the tracing
7901 * directory. It is called via fs_initcall() by any of the boot up code
7902 * and expects to return the dentry of the top level tracing directory.
7904 struct dentry
*tracing_init_dentry(void)
7906 struct trace_array
*tr
= &global_trace
;
7908 /* The top level trace array uses NULL as parent */
7912 if (WARN_ON(!tracefs_initialized()) ||
7913 (IS_ENABLED(CONFIG_DEBUG_FS
) &&
7914 WARN_ON(!debugfs_initialized())))
7915 return ERR_PTR(-ENODEV
);
7918 * As there may still be users that expect the tracing
7919 * files to exist in debugfs/tracing, we must automount
7920 * the tracefs file system there, so older tools still
7921 * work with the newer kerenl.
7923 tr
->dir
= debugfs_create_automount("tracing", NULL
,
7924 trace_automount
, NULL
);
7926 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7927 return ERR_PTR(-ENOMEM
);
7933 extern struct trace_eval_map
*__start_ftrace_eval_maps
[];
7934 extern struct trace_eval_map
*__stop_ftrace_eval_maps
[];
7936 static void __init
trace_eval_init(void)
7940 len
= __stop_ftrace_eval_maps
- __start_ftrace_eval_maps
;
7941 trace_insert_eval_map(NULL
, __start_ftrace_eval_maps
, len
);
7944 #ifdef CONFIG_MODULES
7945 static void trace_module_add_evals(struct module
*mod
)
7947 if (!mod
->num_trace_evals
)
7951 * Modules with bad taint do not have events created, do
7952 * not bother with enums either.
7954 if (trace_module_has_bad_taint(mod
))
7957 trace_insert_eval_map(mod
, mod
->trace_evals
, mod
->num_trace_evals
);
7960 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
7961 static void trace_module_remove_evals(struct module
*mod
)
7963 union trace_eval_map_item
*map
;
7964 union trace_eval_map_item
**last
= &trace_eval_maps
;
7966 if (!mod
->num_trace_evals
)
7969 mutex_lock(&trace_eval_mutex
);
7971 map
= trace_eval_maps
;
7974 if (map
->head
.mod
== mod
)
7976 map
= trace_eval_jmp_to_tail(map
);
7977 last
= &map
->tail
.next
;
7978 map
= map
->tail
.next
;
7983 *last
= trace_eval_jmp_to_tail(map
)->tail
.next
;
7986 mutex_unlock(&trace_eval_mutex
);
7989 static inline void trace_module_remove_evals(struct module
*mod
) { }
7990 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
7992 static int trace_module_notify(struct notifier_block
*self
,
7993 unsigned long val
, void *data
)
7995 struct module
*mod
= data
;
7998 case MODULE_STATE_COMING
:
7999 trace_module_add_evals(mod
);
8001 case MODULE_STATE_GOING
:
8002 trace_module_remove_evals(mod
);
8009 static struct notifier_block trace_module_nb
= {
8010 .notifier_call
= trace_module_notify
,
8013 #endif /* CONFIG_MODULES */
8015 static __init
int tracer_init_tracefs(void)
8017 struct dentry
*d_tracer
;
8019 trace_access_lock_init();
8021 d_tracer
= tracing_init_dentry();
8022 if (IS_ERR(d_tracer
))
8025 init_tracer_tracefs(&global_trace
, d_tracer
);
8026 ftrace_init_tracefs_toplevel(&global_trace
, d_tracer
);
8028 trace_create_file("tracing_thresh", 0644, d_tracer
,
8029 &global_trace
, &tracing_thresh_fops
);
8031 trace_create_file("README", 0444, d_tracer
,
8032 NULL
, &tracing_readme_fops
);
8034 trace_create_file("saved_cmdlines", 0444, d_tracer
,
8035 NULL
, &tracing_saved_cmdlines_fops
);
8037 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
8038 NULL
, &tracing_saved_cmdlines_size_fops
);
8040 trace_create_file("saved_tgids", 0444, d_tracer
,
8041 NULL
, &tracing_saved_tgids_fops
);
8045 trace_create_eval_file(d_tracer
);
8047 #ifdef CONFIG_MODULES
8048 register_module_notifier(&trace_module_nb
);
8051 #ifdef CONFIG_DYNAMIC_FTRACE
8052 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
8053 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
8056 create_trace_instances(d_tracer
);
8058 update_tracer_options(&global_trace
);
8063 static int trace_panic_handler(struct notifier_block
*this,
8064 unsigned long event
, void *unused
)
8066 if (ftrace_dump_on_oops
)
8067 ftrace_dump(ftrace_dump_on_oops
);
8071 static struct notifier_block trace_panic_notifier
= {
8072 .notifier_call
= trace_panic_handler
,
8074 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
8077 static int trace_die_handler(struct notifier_block
*self
,
8083 if (ftrace_dump_on_oops
)
8084 ftrace_dump(ftrace_dump_on_oops
);
8092 static struct notifier_block trace_die_notifier
= {
8093 .notifier_call
= trace_die_handler
,
8098 * printk is set to max of 1024, we really don't need it that big.
8099 * Nothing should be printing 1000 characters anyway.
8101 #define TRACE_MAX_PRINT 1000
8104 * Define here KERN_TRACE so that we have one place to modify
8105 * it if we decide to change what log level the ftrace dump
8108 #define KERN_TRACE KERN_EMERG
8111 trace_printk_seq(struct trace_seq
*s
)
8113 /* Probably should print a warning here. */
8114 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
8115 s
->seq
.len
= TRACE_MAX_PRINT
;
8118 * More paranoid code. Although the buffer size is set to
8119 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8120 * an extra layer of protection.
8122 if (WARN_ON_ONCE(s
->seq
.len
>= s
->seq
.size
))
8123 s
->seq
.len
= s
->seq
.size
- 1;
8125 /* should be zero ended, but we are paranoid. */
8126 s
->buffer
[s
->seq
.len
] = 0;
8128 printk(KERN_TRACE
"%s", s
->buffer
);
8133 void trace_init_global_iter(struct trace_iterator
*iter
)
8135 iter
->tr
= &global_trace
;
8136 iter
->trace
= iter
->tr
->current_trace
;
8137 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
8138 iter
->trace_buffer
= &global_trace
.trace_buffer
;
8140 if (iter
->trace
&& iter
->trace
->open
)
8141 iter
->trace
->open(iter
);
8143 /* Annotate start of buffers if we had overruns */
8144 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
8145 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
8147 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8148 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
8149 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
8152 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
8154 /* use static because iter can be a bit big for the stack */
8155 static struct trace_iterator iter
;
8156 static atomic_t dump_running
;
8157 struct trace_array
*tr
= &global_trace
;
8158 unsigned int old_userobj
;
8159 unsigned long flags
;
8162 /* Only allow one dump user at a time. */
8163 if (atomic_inc_return(&dump_running
) != 1) {
8164 atomic_dec(&dump_running
);
8169 * Always turn off tracing when we dump.
8170 * We don't need to show trace output of what happens
8171 * between multiple crashes.
8173 * If the user does a sysrq-z, then they can re-enable
8174 * tracing with echo 1 > tracing_on.
8178 local_irq_save(flags
);
8180 /* Simulate the iterator */
8181 trace_init_global_iter(&iter
);
8183 for_each_tracing_cpu(cpu
) {
8184 atomic_inc(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8187 old_userobj
= tr
->trace_flags
& TRACE_ITER_SYM_USEROBJ
;
8189 /* don't look at user memory in panic mode */
8190 tr
->trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
8192 switch (oops_dump_mode
) {
8194 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8197 iter
.cpu_file
= raw_smp_processor_id();
8202 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
8203 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8206 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
8208 /* Did function tracer already get disabled? */
8209 if (ftrace_is_dead()) {
8210 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8211 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8215 * We need to stop all tracing on all CPUS to read the
8216 * the next buffer. This is a bit expensive, but is
8217 * not done often. We fill all what we can read,
8218 * and then release the locks again.
8221 while (!trace_empty(&iter
)) {
8224 printk(KERN_TRACE
"---------------------------------\n");
8228 /* reset all but tr, trace, and overruns */
8229 memset(&iter
.seq
, 0,
8230 sizeof(struct trace_iterator
) -
8231 offsetof(struct trace_iterator
, seq
));
8232 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
8235 if (trace_find_next_entry_inc(&iter
) != NULL
) {
8238 ret
= print_trace_line(&iter
);
8239 if (ret
!= TRACE_TYPE_NO_CONSUME
)
8240 trace_consume(&iter
);
8242 touch_nmi_watchdog();
8244 trace_printk_seq(&iter
.seq
);
8248 printk(KERN_TRACE
" (ftrace buffer empty)\n");
8250 printk(KERN_TRACE
"---------------------------------\n");
8253 tr
->trace_flags
|= old_userobj
;
8255 for_each_tracing_cpu(cpu
) {
8256 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8258 atomic_dec(&dump_running
);
8259 local_irq_restore(flags
);
8261 EXPORT_SYMBOL_GPL(ftrace_dump
);
8263 __init
static int tracer_alloc_buffers(void)
8269 * Make sure we don't accidently add more trace options
8270 * than we have bits for.
8272 BUILD_BUG_ON(TRACE_ITER_LAST_BIT
> TRACE_FLAGS_MAX_SIZE
);
8274 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
8277 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
8278 goto out_free_buffer_mask
;
8280 /* Only allocate trace_printk buffers if a trace_printk exists */
8281 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
8282 /* Must be called before global_trace.buffer is allocated */
8283 trace_printk_init_buffers();
8285 /* To save memory, keep the ring buffer size to its minimum */
8286 if (ring_buffer_expanded
)
8287 ring_buf_size
= trace_buf_size
;
8291 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
8292 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
8294 raw_spin_lock_init(&global_trace
.start_lock
);
8297 * The prepare callbacks allocates some memory for the ring buffer. We
8298 * don't free the buffer if the if the CPU goes down. If we were to free
8299 * the buffer, then the user would lose any trace that was in the
8300 * buffer. The memory will be removed once the "instance" is removed.
8302 ret
= cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE
,
8303 "trace/RB:preapre", trace_rb_cpu_prepare
,
8306 goto out_free_cpumask
;
8307 /* Used for event triggers */
8309 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
8311 goto out_rm_hp_state
;
8313 if (trace_create_savedcmd() < 0)
8314 goto out_free_temp_buffer
;
8316 /* TODO: make the number of buffers hot pluggable with CPUS */
8317 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
8318 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
8320 goto out_free_savedcmd
;
8323 if (global_trace
.buffer_disabled
)
8326 if (trace_boot_clock
) {
8327 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
8329 pr_warn("Trace clock %s not defined, going back to default\n",
8334 * register_tracer() might reference current_trace, so it
8335 * needs to be set before we register anything. This is
8336 * just a bootstrap of current_trace anyway.
8338 global_trace
.current_trace
= &nop_trace
;
8340 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
8342 ftrace_init_global_array_ops(&global_trace
);
8344 init_trace_flags_index(&global_trace
);
8346 register_tracer(&nop_trace
);
8348 /* Function tracing may start here (via kernel command line) */
8349 init_function_trace();
8351 /* All seems OK, enable tracing */
8352 tracing_disabled
= 0;
8354 atomic_notifier_chain_register(&panic_notifier_list
,
8355 &trace_panic_notifier
);
8357 register_die_notifier(&trace_die_notifier
);
8359 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
8361 INIT_LIST_HEAD(&global_trace
.systems
);
8362 INIT_LIST_HEAD(&global_trace
.events
);
8363 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
8365 apply_trace_boot_options();
8367 register_snapshot_cmd();
8372 free_saved_cmdlines_buffer(savedcmd
);
8373 out_free_temp_buffer
:
8374 ring_buffer_free(temp_buffer
);
8376 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE
);
8378 free_cpumask_var(global_trace
.tracing_cpumask
);
8379 out_free_buffer_mask
:
8380 free_cpumask_var(tracing_buffer_mask
);
8385 void __init
early_trace_init(void)
8387 if (tracepoint_printk
) {
8388 tracepoint_print_iter
=
8389 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
8390 if (WARN_ON(!tracepoint_print_iter
))
8391 tracepoint_printk
= 0;
8393 static_key_enable(&tracepoint_printk_key
.key
);
8395 tracer_alloc_buffers();
8398 void __init
trace_init(void)
8403 __init
static int clear_boot_tracer(void)
8406 * The default tracer at boot buffer is an init section.
8407 * This function is called in lateinit. If we did not
8408 * find the boot tracer, then clear it out, to prevent
8409 * later registration from accessing the buffer that is
8410 * about to be freed.
8412 if (!default_bootup_tracer
)
8415 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
8416 default_bootup_tracer
);
8417 default_bootup_tracer
= NULL
;
8422 fs_initcall(tracer_init_tracefs
);
8423 late_initcall_sync(clear_boot_tracer
);