2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
47 #include "trace_output.h"
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
53 bool ring_buffer_expanded
;
56 * We need to change this state when a selftest is running.
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
59 * insertions into the ring-buffer such as trace_printk could occurred
60 * at the same time, giving false positive or negative results.
62 static bool __read_mostly tracing_selftest_running
;
65 * If a tracer is running, we do not want to run SELFTEST.
67 bool __read_mostly tracing_selftest_disabled
;
69 /* Pipe tracepoints to printk */
70 struct trace_iterator
*tracepoint_print_iter
;
71 int tracepoint_printk
;
72 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key
);
74 /* For tracers that don't implement custom flags */
75 static struct tracer_opt dummy_tracer_opt
[] = {
80 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
90 static DEFINE_PER_CPU(bool, trace_taskinfo_save
);
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
98 static int tracing_disabled
= 1;
100 cpumask_var_t __read_mostly tracing_buffer_mask
;
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 enum ftrace_dump_mode ftrace_dump_on_oops
;
120 /* When set, tracing will stop when a WARN*() is hit */
121 int __disable_trace_on_warning
;
123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
124 /* Map of enums to their values, for "eval_map" file */
125 struct trace_eval_map_head
{
127 unsigned long length
;
130 union trace_eval_map_item
;
132 struct trace_eval_map_tail
{
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "eval_string"
137 union trace_eval_map_item
*next
;
138 const char *end
; /* points to NULL */
141 static DEFINE_MUTEX(trace_eval_mutex
);
144 * The trace_eval_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved eval_map items.
150 union trace_eval_map_item
{
151 struct trace_eval_map map
;
152 struct trace_eval_map_head head
;
153 struct trace_eval_map_tail tail
;
156 static union trace_eval_map_item
*trace_eval_maps
;
157 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
159 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
161 #define MAX_TRACER_SIZE 100
162 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
163 static char *default_bootup_tracer
;
165 static bool allocate_snapshot
;
167 static int __init
set_cmdline_ftrace(char *str
)
169 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
170 default_bootup_tracer
= bootup_tracer_buf
;
171 /* We are using ftrace early, expand it */
172 ring_buffer_expanded
= true;
175 __setup("ftrace=", set_cmdline_ftrace
);
177 static int __init
set_ftrace_dump_on_oops(char *str
)
179 if (*str
++ != '=' || !*str
) {
180 ftrace_dump_on_oops
= DUMP_ALL
;
184 if (!strcmp("orig_cpu", str
)) {
185 ftrace_dump_on_oops
= DUMP_ORIG
;
191 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
193 static int __init
stop_trace_on_warning(char *str
)
195 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
196 __disable_trace_on_warning
= 1;
199 __setup("traceoff_on_warning", stop_trace_on_warning
);
201 static int __init
boot_alloc_snapshot(char *str
)
203 allocate_snapshot
= true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded
= true;
208 __setup("alloc_snapshot", boot_alloc_snapshot
);
211 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
213 static int __init
set_trace_boot_options(char *str
)
215 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
218 __setup("trace_options=", set_trace_boot_options
);
220 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
221 static char *trace_boot_clock __initdata
;
223 static int __init
set_trace_boot_clock(char *str
)
225 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
226 trace_boot_clock
= trace_boot_clock_buf
;
229 __setup("trace_clock=", set_trace_boot_clock
);
231 static int __init
set_tracepoint_printk(char *str
)
233 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
234 tracepoint_printk
= 1;
237 __setup("tp_printk", set_tracepoint_printk
);
239 unsigned long long ns2usecs(u64 nsec
)
246 /* trace_flags holds trace_options default values */
247 #define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
254 /* trace_options that are only supported by global_trace */
255 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
258 /* trace_flags that are default zero for instances */
259 #define ZEROED_TRACE_FLAGS \
260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
266 static struct trace_array global_trace
= {
267 .trace_flags
= TRACE_DEFAULT_FLAGS
,
270 LIST_HEAD(ftrace_trace_arrays
);
272 int trace_array_get(struct trace_array
*this_tr
)
274 struct trace_array
*tr
;
277 mutex_lock(&trace_types_lock
);
278 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
285 mutex_unlock(&trace_types_lock
);
290 static void __trace_array_put(struct trace_array
*this_tr
)
292 WARN_ON(!this_tr
->ref
);
296 void trace_array_put(struct trace_array
*this_tr
)
298 mutex_lock(&trace_types_lock
);
299 __trace_array_put(this_tr
);
300 mutex_unlock(&trace_types_lock
);
303 int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
304 struct ring_buffer
*buffer
,
305 struct ring_buffer_event
*event
)
307 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
308 !filter_match_preds(call
->filter
, rec
)) {
309 __trace_event_discard_commit(buffer
, event
);
316 void trace_free_pid_list(struct trace_pid_list
*pid_list
)
318 vfree(pid_list
->pids
);
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
, pid_t search_pid
)
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
336 if (search_pid
>= filtered_pids
->pid_max
)
339 return test_bit(search_pid
, filtered_pids
->pids
);
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
352 trace_ignore_this_task(struct trace_pid_list
*filtered_pids
, struct task_struct
*task
)
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
361 return !trace_find_filtered_pid(filtered_pids
, task
->pid
);
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
376 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
377 struct task_struct
*self
,
378 struct task_struct
*task
)
383 /* For forks, we only add if the forking task is listed */
385 if (!trace_find_filtered_pid(pid_list
, self
->pid
))
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task
->pid
>= pid_list
->pid_max
)
393 /* "self" is set for forks, and NULL for exits */
395 set_bit(task
->pid
, pid_list
->pids
);
397 clear_bit(task
->pid
, pid_list
->pids
);
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
412 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
)
414 unsigned long pid
= (unsigned long)v
;
418 /* pid already is +1 of the actual prevous bit */
419 pid
= find_next_bit(pid_list
->pids
, pid_list
->pid_max
, pid
);
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid
< pid_list
->pid_max
)
423 return (void *)(pid
+ 1);
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
433 * This is used by seq_file "start" operation to start the iteration
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
439 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
)
444 pid
= find_first_bit(pid_list
->pids
, pid_list
->pid_max
);
445 if (pid
>= pid_list
->pid_max
)
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid
++; pid
&& l
< *pos
;
450 pid
= (unsigned long)trace_pid_next(pid_list
, (void *)pid
, &l
))
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
460 * Can be directly used by seq_file operations to display the current
463 int trace_pid_show(struct seq_file
*m
, void *v
)
465 unsigned long pid
= (unsigned long)v
- 1;
467 seq_printf(m
, "%lu\n", pid
);
471 /* 128 should be much more than enough */
472 #define PID_BUF_SIZE 127
474 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
475 struct trace_pid_list
**new_pid_list
,
476 const char __user
*ubuf
, size_t cnt
)
478 struct trace_pid_list
*pid_list
;
479 struct trace_parser parser
;
487 if (trace_parser_get_init(&parser
, PID_BUF_SIZE
+ 1))
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
496 pid_list
= kmalloc(sizeof(*pid_list
), GFP_KERNEL
);
500 pid_list
->pid_max
= READ_ONCE(pid_max
);
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids
&& filtered_pids
->pid_max
> pid_list
->pid_max
)
504 pid_list
->pid_max
= filtered_pids
->pid_max
;
506 pid_list
->pids
= vzalloc((pid_list
->pid_max
+ 7) >> 3);
507 if (!pid_list
->pids
) {
513 /* copy the current bits to the new max */
514 for_each_set_bit(pid
, filtered_pids
->pids
,
515 filtered_pids
->pid_max
) {
516 set_bit(pid
, pid_list
->pids
);
525 ret
= trace_get_user(&parser
, ubuf
, cnt
, &pos
);
526 if (ret
< 0 || !trace_parser_loaded(&parser
))
533 parser
.buffer
[parser
.idx
] = 0;
536 if (kstrtoul(parser
.buffer
, 0, &val
))
538 if (val
>= pid_list
->pid_max
)
543 set_bit(pid
, pid_list
->pids
);
546 trace_parser_clear(&parser
);
549 trace_parser_put(&parser
);
552 trace_free_pid_list(pid_list
);
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list
);
563 *new_pid_list
= pid_list
;
568 static u64
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
572 /* Early boot up does not have a buffer yet */
574 return trace_clock_local();
576 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
577 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
582 u64
ftrace_now(int cpu
)
584 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
588 * tracing_is_enabled - Show if global_trace has been disabled
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
596 int tracing_is_enabled(void)
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
604 return !global_trace
.buffer_disabled
;
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
619 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer
*trace_types __read_mostly
;
625 * trace_types_lock is used to protect the trace_types list.
627 DEFINE_MUTEX(trace_types_lock
);
630 * serialize the access of the ring buffer
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
644 * These primitives allow multi process access to different cpu ring buffer
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
652 static DECLARE_RWSEM(all_cpu_access_lock
);
653 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
655 static inline void trace_access_lock(int cpu
)
657 if (cpu
== RING_BUFFER_ALL_CPUS
) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock
);
661 /* gain it for accessing a cpu ring buffer. */
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock
);
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
671 static inline void trace_access_unlock(int cpu
)
673 if (cpu
== RING_BUFFER_ALL_CPUS
) {
674 up_write(&all_cpu_access_lock
);
676 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
677 up_read(&all_cpu_access_lock
);
681 static inline void trace_access_lock_init(void)
685 for_each_possible_cpu(cpu
)
686 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
691 static DEFINE_MUTEX(access_lock
);
693 static inline void trace_access_lock(int cpu
)
696 mutex_lock(&access_lock
);
699 static inline void trace_access_unlock(int cpu
)
702 mutex_unlock(&access_lock
);
705 static inline void trace_access_lock_init(void)
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
714 int skip
, int pc
, struct pt_regs
*regs
);
715 static inline void ftrace_trace_stack(struct trace_array
*tr
,
716 struct ring_buffer
*buffer
,
718 int skip
, int pc
, struct pt_regs
*regs
);
721 static inline void __ftrace_trace_stack(struct ring_buffer
*buffer
,
723 int skip
, int pc
, struct pt_regs
*regs
)
726 static inline void ftrace_trace_stack(struct trace_array
*tr
,
727 struct ring_buffer
*buffer
,
729 int skip
, int pc
, struct pt_regs
*regs
)
735 static __always_inline
void
736 trace_event_setup(struct ring_buffer_event
*event
,
737 int type
, unsigned long flags
, int pc
)
739 struct trace_entry
*ent
= ring_buffer_event_data(event
);
741 tracing_generic_entry_update(ent
, flags
, pc
);
745 static __always_inline
struct ring_buffer_event
*
746 __trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
749 unsigned long flags
, int pc
)
751 struct ring_buffer_event
*event
;
753 event
= ring_buffer_lock_reserve(buffer
, len
);
755 trace_event_setup(event
, type
, flags
, pc
);
760 void tracer_tracing_on(struct trace_array
*tr
)
762 if (tr
->trace_buffer
.buffer
)
763 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
772 tr
->buffer_disabled
= 0;
773 /* Make the flag seen by readers */
778 * tracing_on - enable tracing buffers
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
783 void tracing_on(void)
785 tracer_tracing_on(&global_trace
);
787 EXPORT_SYMBOL_GPL(tracing_on
);
790 static __always_inline
void
791 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
793 __this_cpu_write(trace_taskinfo_save
, true);
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event
) == event
) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer
, event
->array
[0], &event
->array
[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt
);
802 ring_buffer_unlock_commit(buffer
, event
);
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
811 int __trace_puts(unsigned long ip
, const char *str
, int size
)
813 struct ring_buffer_event
*event
;
814 struct ring_buffer
*buffer
;
815 struct print_entry
*entry
;
816 unsigned long irq_flags
;
820 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
823 pc
= preempt_count();
825 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
828 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
830 local_save_flags(irq_flags
);
831 buffer
= global_trace
.trace_buffer
.buffer
;
832 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
837 entry
= ring_buffer_event_data(event
);
840 memcpy(&entry
->buf
, str
, size
);
842 /* Add a newline if necessary */
843 if (entry
->buf
[size
- 1] != '\n') {
844 entry
->buf
[size
] = '\n';
845 entry
->buf
[size
+ 1] = '\0';
847 entry
->buf
[size
] = '\0';
849 __buffer_unlock_commit(buffer
, event
);
850 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
854 EXPORT_SYMBOL_GPL(__trace_puts
);
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
861 int __trace_bputs(unsigned long ip
, const char *str
)
863 struct ring_buffer_event
*event
;
864 struct ring_buffer
*buffer
;
865 struct bputs_entry
*entry
;
866 unsigned long irq_flags
;
867 int size
= sizeof(struct bputs_entry
);
870 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
873 pc
= preempt_count();
875 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
878 local_save_flags(irq_flags
);
879 buffer
= global_trace
.trace_buffer
.buffer
;
880 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
885 entry
= ring_buffer_event_data(event
);
889 __buffer_unlock_commit(buffer
, event
);
890 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
894 EXPORT_SYMBOL_GPL(__trace_bputs
);
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 static void tracing_snapshot_instance(struct trace_array
*tr
)
899 struct tracer
*tracer
= tr
->current_trace
;
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
908 if (!tr
->allocated_snapshot
) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer
->use_max_tr
) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 local_irq_save(flags
);
923 update_max_tr(tr
, current
, smp_processor_id());
924 local_irq_restore(flags
);
928 * trace_snapshot - take a snapshot of the current buffer.
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
941 void tracing_snapshot(void)
943 struct trace_array
*tr
= &global_trace
;
945 tracing_snapshot_instance(tr
);
947 EXPORT_SYMBOL_GPL(tracing_snapshot
);
949 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
950 struct trace_buffer
*size_buf
, int cpu_id
);
951 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
953 static int alloc_snapshot(struct trace_array
*tr
)
957 if (!tr
->allocated_snapshot
) {
959 /* allocate spare buffer */
960 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
961 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
965 tr
->allocated_snapshot
= true;
971 static void free_snapshot(struct trace_array
*tr
)
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
978 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
979 set_buffer_entries(&tr
->max_buffer
, 1);
980 tracing_reset_online_cpus(&tr
->max_buffer
);
981 tr
->allocated_snapshot
= false;
985 * tracing_alloc_snapshot - allocate snapshot buffer.
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
994 int tracing_alloc_snapshot(void)
996 struct trace_array
*tr
= &global_trace
;
999 ret
= alloc_snapshot(tr
);
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1017 void tracing_snapshot_alloc(void)
1021 ret
= tracing_alloc_snapshot();
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1029 void tracing_snapshot(void)
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1033 EXPORT_SYMBOL_GPL(tracing_snapshot
);
1034 int tracing_alloc_snapshot(void)
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1040 void tracing_snapshot_alloc(void)
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1048 void tracer_tracing_off(struct trace_array
*tr
)
1050 if (tr
->trace_buffer
.buffer
)
1051 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1060 tr
->buffer_disabled
= 1;
1061 /* Make the flag seen by readers */
1066 * tracing_off - turn off tracing buffers
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1073 void tracing_off(void)
1075 tracer_tracing_off(&global_trace
);
1077 EXPORT_SYMBOL_GPL(tracing_off
);
1079 void disable_trace_on_warning(void)
1081 if (__disable_trace_on_warning
)
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1089 * Shows real state of the ring buffer if it is enabled or not.
1091 int tracer_tracing_is_on(struct trace_array
*tr
)
1093 if (tr
->trace_buffer
.buffer
)
1094 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
1095 return !tr
->buffer_disabled
;
1099 * tracing_is_on - show state of ring buffers enabled
1101 int tracing_is_on(void)
1103 return tracer_tracing_is_on(&global_trace
);
1105 EXPORT_SYMBOL_GPL(tracing_is_on
);
1107 static int __init
set_buf_size(char *str
)
1109 unsigned long buf_size
;
1113 buf_size
= memparse(str
, &str
);
1114 /* nr_entries can not be zero */
1117 trace_buf_size
= buf_size
;
1120 __setup("trace_buf_size=", set_buf_size
);
1122 static int __init
set_tracing_thresh(char *str
)
1124 unsigned long threshold
;
1129 ret
= kstrtoul(str
, 0, &threshold
);
1132 tracing_thresh
= threshold
* 1000;
1135 __setup("tracing_thresh=", set_tracing_thresh
);
1137 unsigned long nsecs_to_usecs(unsigned long nsecs
)
1139 return nsecs
/ 1000;
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options
[] = {
1160 int in_ns
; /* is this clock in nanoseconds? */
1161 } trace_clocks
[] = {
1162 { trace_clock_local
, "local", 1 },
1163 { trace_clock_global
, "global", 1 },
1164 { trace_clock_counter
, "counter", 0 },
1165 { trace_clock_jiffies
, "uptime", 0 },
1166 { trace_clock
, "perf", 1 },
1167 { ktime_get_mono_fast_ns
, "mono", 1 },
1168 { ktime_get_raw_fast_ns
, "mono_raw", 1 },
1169 { ktime_get_boot_fast_ns
, "boot", 1 },
1174 * trace_parser_get_init - gets the buffer for trace parser
1176 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
1178 memset(parser
, 0, sizeof(*parser
));
1180 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
1181 if (!parser
->buffer
)
1184 parser
->size
= size
;
1189 * trace_parser_put - frees the buffer for trace parser
1191 void trace_parser_put(struct trace_parser
*parser
)
1193 kfree(parser
->buffer
);
1194 parser
->buffer
= NULL
;
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1204 * Returns number of bytes read.
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1208 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
1209 size_t cnt
, loff_t
*ppos
)
1216 trace_parser_clear(parser
);
1218 ret
= get_user(ch
, ubuf
++);
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1229 if (!parser
->cont
) {
1230 /* skip white space */
1231 while (cnt
&& isspace(ch
)) {
1232 ret
= get_user(ch
, ubuf
++);
1239 /* only spaces were written */
1249 /* read the non-space input */
1250 while (cnt
&& !isspace(ch
)) {
1251 if (parser
->idx
< parser
->size
- 1)
1252 parser
->buffer
[parser
->idx
++] = ch
;
1257 ret
= get_user(ch
, ubuf
++);
1264 /* We either got finished input or we have to wait for another call. */
1266 parser
->buffer
[parser
->idx
] = 0;
1267 parser
->cont
= false;
1268 } else if (parser
->idx
< parser
->size
- 1) {
1269 parser
->cont
= true;
1270 parser
->buffer
[parser
->idx
++] = ch
;
1283 /* TODO add a seq_buf_to_buffer() */
1284 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
1288 if (trace_seq_used(s
) <= s
->seq
.readpos
)
1291 len
= trace_seq_used(s
) - s
->seq
.readpos
;
1294 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
1296 s
->seq
.readpos
+= cnt
;
1300 unsigned long __read_mostly tracing_thresh
;
1302 #ifdef CONFIG_TRACER_MAX_TRACE
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1309 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1311 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
1312 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
1313 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
1314 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
1317 max_buf
->time_start
= data
->preempt_timestamp
;
1319 max_data
->saved_latency
= tr
->max_latency
;
1320 max_data
->critical_start
= data
->critical_start
;
1321 max_data
->critical_end
= data
->critical_end
;
1323 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
1324 max_data
->pid
= tsk
->pid
;
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1330 max_data
->uid
= current_uid();
1332 max_data
->uid
= task_uid(tsk
);
1334 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1335 max_data
->policy
= tsk
->policy
;
1336 max_data
->rt_priority
= tsk
->rt_priority
;
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk
);
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1352 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1354 struct ring_buffer
*buf
;
1359 WARN_ON_ONCE(!irqs_disabled());
1361 if (!tr
->allocated_snapshot
) {
1362 /* Only the nop tracer should hit this when disabling */
1363 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1367 arch_spin_lock(&tr
->max_lock
);
1369 buf
= tr
->trace_buffer
.buffer
;
1370 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1371 tr
->max_buffer
.buffer
= buf
;
1373 __update_max_tr(tr
, tsk
, cpu
);
1374 arch_spin_unlock(&tr
->max_lock
);
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1386 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1393 WARN_ON_ONCE(!irqs_disabled());
1394 if (!tr
->allocated_snapshot
) {
1395 /* Only the nop tracer should hit this when disabling */
1396 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1400 arch_spin_lock(&tr
->max_lock
);
1402 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1404 if (ret
== -EBUSY
) {
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1411 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1412 "Failed to swap buffers due to commit in progress\n");
1415 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1417 __update_max_tr(tr
, tsk
, cpu
);
1418 arch_spin_unlock(&tr
->max_lock
);
1420 #endif /* CONFIG_TRACER_MAX_TRACE */
1422 static int wait_on_pipe(struct trace_iterator
*iter
, bool full
)
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1428 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
1432 #ifdef CONFIG_FTRACE_STARTUP_TEST
1433 static bool selftests_can_run
;
1435 struct trace_selftests
{
1436 struct list_head list
;
1437 struct tracer
*type
;
1440 static LIST_HEAD(postponed_selftests
);
1442 static int save_selftest(struct tracer
*type
)
1444 struct trace_selftests
*selftest
;
1446 selftest
= kmalloc(sizeof(*selftest
), GFP_KERNEL
);
1450 selftest
->type
= type
;
1451 list_add(&selftest
->list
, &postponed_selftests
);
1455 static int run_tracer_selftest(struct tracer
*type
)
1457 struct trace_array
*tr
= &global_trace
;
1458 struct tracer
*saved_tracer
= tr
->current_trace
;
1461 if (!type
->selftest
|| tracing_selftest_disabled
)
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1469 if (!selftests_can_run
)
1470 return save_selftest(type
);
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
1479 tracing_reset_online_cpus(&tr
->trace_buffer
);
1481 tr
->current_trace
= type
;
1483 #ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type
->use_max_tr
) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded
)
1487 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1488 RING_BUFFER_ALL_CPUS
);
1489 tr
->allocated_snapshot
= true;
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type
->name
);
1495 ret
= type
->selftest(type
, tr
);
1496 /* the test is responsible for resetting too */
1497 tr
->current_trace
= saved_tracer
;
1499 printk(KERN_CONT
"FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr
->trace_buffer
);
1507 #ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type
->use_max_tr
) {
1509 tr
->allocated_snapshot
= false;
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded
)
1513 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1514 RING_BUFFER_ALL_CPUS
);
1518 printk(KERN_CONT
"PASSED\n");
1522 static __init
int init_trace_selftests(void)
1524 struct trace_selftests
*p
, *n
;
1525 struct tracer
*t
, **last
;
1528 selftests_can_run
= true;
1530 mutex_lock(&trace_types_lock
);
1532 if (list_empty(&postponed_selftests
))
1535 pr_info("Running postponed tracer tests:\n");
1537 list_for_each_entry_safe(p
, n
, &postponed_selftests
, list
) {
1538 ret
= run_tracer_selftest(p
->type
);
1539 /* If the test fails, then warn and remove from available_tracers */
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1543 last
= &trace_types
;
1544 for (t
= trace_types
; t
; t
= t
->next
) {
1557 mutex_unlock(&trace_types_lock
);
1561 core_initcall(init_trace_selftests
);
1563 static inline int run_tracer_selftest(struct tracer
*type
)
1567 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1569 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
);
1571 static void __init
apply_trace_boot_options(void);
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1577 * Register a new plugin tracer.
1579 int __init
register_tracer(struct tracer
*type
)
1585 pr_info("Tracer must have a name\n");
1589 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1594 mutex_lock(&trace_types_lock
);
1596 tracing_selftest_running
= true;
1598 for (t
= trace_types
; t
; t
= t
->next
) {
1599 if (strcmp(type
->name
, t
->name
) == 0) {
1601 pr_info("Tracer %s already registered\n",
1608 if (!type
->set_flag
)
1609 type
->set_flag
= &dummy_set_flag
;
1611 /*allocate a dummy tracer_flags*/
1612 type
->flags
= kmalloc(sizeof(*type
->flags
), GFP_KERNEL
);
1617 type
->flags
->val
= 0;
1618 type
->flags
->opts
= dummy_tracer_opt
;
1620 if (!type
->flags
->opts
)
1621 type
->flags
->opts
= dummy_tracer_opt
;
1623 /* store the tracer for __set_tracer_option */
1624 type
->flags
->trace
= type
;
1626 ret
= run_tracer_selftest(type
);
1630 type
->next
= trace_types
;
1632 add_tracer_options(&global_trace
, type
);
1635 tracing_selftest_running
= false;
1636 mutex_unlock(&trace_types_lock
);
1638 if (ret
|| !default_bootup_tracer
)
1641 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1644 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1645 /* Do we want this tracer to start on bootup? */
1646 tracing_set_tracer(&global_trace
, type
->name
);
1647 default_bootup_tracer
= NULL
;
1649 apply_trace_boot_options();
1651 /* disable other selftests, since this will break it. */
1652 tracing_selftest_disabled
= true;
1653 #ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1662 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1664 struct ring_buffer
*buffer
= buf
->buffer
;
1669 ring_buffer_record_disable(buffer
);
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
1673 ring_buffer_reset_cpu(buffer
, cpu
);
1675 ring_buffer_record_enable(buffer
);
1678 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1680 struct ring_buffer
*buffer
= buf
->buffer
;
1686 ring_buffer_record_disable(buffer
);
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1691 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1693 for_each_online_cpu(cpu
)
1694 ring_buffer_reset_cpu(buffer
, cpu
);
1696 ring_buffer_record_enable(buffer
);
1699 /* Must have trace_types_lock held */
1700 void tracing_reset_all_online_cpus(void)
1702 struct trace_array
*tr
;
1704 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1705 tracing_reset_online_cpus(&tr
->trace_buffer
);
1706 #ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr
->max_buffer
);
1712 static int *tgid_map
;
1714 #define SAVED_CMDLINES_DEFAULT 128
1715 #define NO_CMDLINE_MAP UINT_MAX
1716 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1717 struct saved_cmdlines_buffer
{
1718 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1719 unsigned *map_cmdline_to_pid
;
1720 unsigned cmdline_num
;
1722 char *saved_cmdlines
;
1724 static struct saved_cmdlines_buffer
*savedcmd
;
1726 /* temporary disable recording */
1727 static atomic_t trace_record_taskinfo_disabled __read_mostly
;
1729 static inline char *get_saved_cmdlines(int idx
)
1731 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1734 static inline void set_cmdline(int idx
, const char *cmdline
)
1736 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1739 static int allocate_cmdlines_buffer(unsigned int val
,
1740 struct saved_cmdlines_buffer
*s
)
1742 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1744 if (!s
->map_cmdline_to_pid
)
1747 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1748 if (!s
->saved_cmdlines
) {
1749 kfree(s
->map_cmdline_to_pid
);
1754 s
->cmdline_num
= val
;
1755 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1756 sizeof(s
->map_pid_to_cmdline
));
1757 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1758 val
* sizeof(*s
->map_cmdline_to_pid
));
1763 static int trace_create_savedcmd(void)
1767 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1771 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1781 int is_tracing_stopped(void)
1783 return global_trace
.stop_count
;
1787 * tracing_start - quick start of the tracer
1789 * If tracing is enabled but was stopped by tracing_stop,
1790 * this will start the tracer back up.
1792 void tracing_start(void)
1794 struct ring_buffer
*buffer
;
1795 unsigned long flags
;
1797 if (tracing_disabled
)
1800 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1801 if (--global_trace
.stop_count
) {
1802 if (global_trace
.stop_count
< 0) {
1803 /* Someone screwed up their debugging */
1805 global_trace
.stop_count
= 0;
1810 /* Prevent the buffers from switching */
1811 arch_spin_lock(&global_trace
.max_lock
);
1813 buffer
= global_trace
.trace_buffer
.buffer
;
1815 ring_buffer_record_enable(buffer
);
1817 #ifdef CONFIG_TRACER_MAX_TRACE
1818 buffer
= global_trace
.max_buffer
.buffer
;
1820 ring_buffer_record_enable(buffer
);
1823 arch_spin_unlock(&global_trace
.max_lock
);
1826 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1829 static void tracing_start_tr(struct trace_array
*tr
)
1831 struct ring_buffer
*buffer
;
1832 unsigned long flags
;
1834 if (tracing_disabled
)
1837 /* If global, we need to also start the max tracer */
1838 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1839 return tracing_start();
1841 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1843 if (--tr
->stop_count
) {
1844 if (tr
->stop_count
< 0) {
1845 /* Someone screwed up their debugging */
1852 buffer
= tr
->trace_buffer
.buffer
;
1854 ring_buffer_record_enable(buffer
);
1857 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1861 * tracing_stop - quick stop of the tracer
1863 * Light weight way to stop tracing. Use in conjunction with
1866 void tracing_stop(void)
1868 struct ring_buffer
*buffer
;
1869 unsigned long flags
;
1871 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1872 if (global_trace
.stop_count
++)
1875 /* Prevent the buffers from switching */
1876 arch_spin_lock(&global_trace
.max_lock
);
1878 buffer
= global_trace
.trace_buffer
.buffer
;
1880 ring_buffer_record_disable(buffer
);
1882 #ifdef CONFIG_TRACER_MAX_TRACE
1883 buffer
= global_trace
.max_buffer
.buffer
;
1885 ring_buffer_record_disable(buffer
);
1888 arch_spin_unlock(&global_trace
.max_lock
);
1891 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1894 static void tracing_stop_tr(struct trace_array
*tr
)
1896 struct ring_buffer
*buffer
;
1897 unsigned long flags
;
1899 /* If global, we need to also stop the max tracer */
1900 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1901 return tracing_stop();
1903 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1904 if (tr
->stop_count
++)
1907 buffer
= tr
->trace_buffer
.buffer
;
1909 ring_buffer_record_disable(buffer
);
1912 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1915 static int trace_save_cmdline(struct task_struct
*tsk
)
1919 /* treat recording of idle task as a success */
1923 if (unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1927 * It's not the end of the world if we don't get
1928 * the lock, but we also don't want to spin
1929 * nor do we want to disable interrupts,
1930 * so if we miss here, then better luck next time.
1932 if (!arch_spin_trylock(&trace_cmdline_lock
))
1935 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1936 if (idx
== NO_CMDLINE_MAP
) {
1937 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1940 * Check whether the cmdline buffer at idx has a pid
1941 * mapped. We are going to overwrite that entry so we
1942 * need to clear the map_pid_to_cmdline. Otherwise we
1943 * would read the new comm for the old pid.
1945 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1946 if (pid
!= NO_CMDLINE_MAP
)
1947 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1949 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1950 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1952 savedcmd
->cmdline_idx
= idx
;
1955 set_cmdline(idx
, tsk
->comm
);
1957 arch_spin_unlock(&trace_cmdline_lock
);
1962 static void __trace_find_cmdline(int pid
, char comm
[])
1967 strcpy(comm
, "<idle>");
1971 if (WARN_ON_ONCE(pid
< 0)) {
1972 strcpy(comm
, "<XXX>");
1976 if (pid
> PID_MAX_DEFAULT
) {
1977 strcpy(comm
, "<...>");
1981 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1982 if (map
!= NO_CMDLINE_MAP
)
1983 strlcpy(comm
, get_saved_cmdlines(map
), TASK_COMM_LEN
);
1985 strcpy(comm
, "<...>");
1988 void trace_find_cmdline(int pid
, char comm
[])
1991 arch_spin_lock(&trace_cmdline_lock
);
1993 __trace_find_cmdline(pid
, comm
);
1995 arch_spin_unlock(&trace_cmdline_lock
);
1999 int trace_find_tgid(int pid
)
2001 if (unlikely(!tgid_map
|| !pid
|| pid
> PID_MAX_DEFAULT
))
2004 return tgid_map
[pid
];
2007 static int trace_save_tgid(struct task_struct
*tsk
)
2009 /* treat recording of idle task as a success */
2013 if (unlikely(!tgid_map
|| tsk
->pid
> PID_MAX_DEFAULT
))
2016 tgid_map
[tsk
->pid
] = tsk
->tgid
;
2020 static bool tracing_record_taskinfo_skip(int flags
)
2022 if (unlikely(!(flags
& (TRACE_RECORD_CMDLINE
| TRACE_RECORD_TGID
))))
2024 if (atomic_read(&trace_record_taskinfo_disabled
) || !tracing_is_on())
2026 if (!__this_cpu_read(trace_taskinfo_save
))
2032 * tracing_record_taskinfo - record the task info of a task
2034 * @task - task to record
2035 * @flags - TRACE_RECORD_CMDLINE for recording comm
2036 * - TRACE_RECORD_TGID for recording tgid
2038 void tracing_record_taskinfo(struct task_struct
*task
, int flags
)
2042 if (tracing_record_taskinfo_skip(flags
))
2046 * Record as much task information as possible. If some fail, continue
2047 * to try to record the others.
2049 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(task
);
2050 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(task
);
2052 /* If recording any information failed, retry again soon. */
2056 __this_cpu_write(trace_taskinfo_save
, false);
2060 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2062 * @prev - previous task during sched_switch
2063 * @next - next task during sched_switch
2064 * @flags - TRACE_RECORD_CMDLINE for recording comm
2065 * TRACE_RECORD_TGID for recording tgid
2067 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
2068 struct task_struct
*next
, int flags
)
2072 if (tracing_record_taskinfo_skip(flags
))
2076 * Record as much task information as possible. If some fail, continue
2077 * to try to record the others.
2079 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(prev
);
2080 done
&= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(next
);
2081 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(prev
);
2082 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(next
);
2084 /* If recording any information failed, retry again soon. */
2088 __this_cpu_write(trace_taskinfo_save
, false);
2091 /* Helpers to record a specific task information */
2092 void tracing_record_cmdline(struct task_struct
*task
)
2094 tracing_record_taskinfo(task
, TRACE_RECORD_CMDLINE
);
2097 void tracing_record_tgid(struct task_struct
*task
)
2099 tracing_record_taskinfo(task
, TRACE_RECORD_TGID
);
2103 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2104 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2105 * simplifies those functions and keeps them in sync.
2107 enum print_line_t
trace_handle_return(struct trace_seq
*s
)
2109 return trace_seq_has_overflowed(s
) ?
2110 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
2112 EXPORT_SYMBOL_GPL(trace_handle_return
);
2115 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
2118 struct task_struct
*tsk
= current
;
2120 entry
->preempt_count
= pc
& 0xff;
2121 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
2123 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2124 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
2126 TRACE_FLAG_IRQS_NOSUPPORT
|
2128 ((pc
& NMI_MASK
) ? TRACE_FLAG_NMI
: 0) |
2129 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
2130 ((pc
& SOFTIRQ_OFFSET
) ? TRACE_FLAG_SOFTIRQ
: 0) |
2131 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
2132 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
2134 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
2136 struct ring_buffer_event
*
2137 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
2140 unsigned long flags
, int pc
)
2142 return __trace_buffer_lock_reserve(buffer
, type
, len
, flags
, pc
);
2145 DEFINE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
2146 DEFINE_PER_CPU(int, trace_buffered_event_cnt
);
2147 static int trace_buffered_event_ref
;
2150 * trace_buffered_event_enable - enable buffering events
2152 * When events are being filtered, it is quicker to use a temporary
2153 * buffer to write the event data into if there's a likely chance
2154 * that it will not be committed. The discard of the ring buffer
2155 * is not as fast as committing, and is much slower than copying
2158 * When an event is to be filtered, allocate per cpu buffers to
2159 * write the event data into, and if the event is filtered and discarded
2160 * it is simply dropped, otherwise, the entire data is to be committed
2163 void trace_buffered_event_enable(void)
2165 struct ring_buffer_event
*event
;
2169 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2171 if (trace_buffered_event_ref
++)
2174 for_each_tracing_cpu(cpu
) {
2175 page
= alloc_pages_node(cpu_to_node(cpu
),
2176 GFP_KERNEL
| __GFP_NORETRY
, 0);
2180 event
= page_address(page
);
2181 memset(event
, 0, sizeof(*event
));
2183 per_cpu(trace_buffered_event
, cpu
) = event
;
2186 if (cpu
== smp_processor_id() &&
2187 this_cpu_read(trace_buffered_event
) !=
2188 per_cpu(trace_buffered_event
, cpu
))
2195 trace_buffered_event_disable();
2198 static void enable_trace_buffered_event(void *data
)
2200 /* Probably not needed, but do it anyway */
2202 this_cpu_dec(trace_buffered_event_cnt
);
2205 static void disable_trace_buffered_event(void *data
)
2207 this_cpu_inc(trace_buffered_event_cnt
);
2211 * trace_buffered_event_disable - disable buffering events
2213 * When a filter is removed, it is faster to not use the buffered
2214 * events, and to commit directly into the ring buffer. Free up
2215 * the temp buffers when there are no more users. This requires
2216 * special synchronization with current events.
2218 void trace_buffered_event_disable(void)
2222 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2224 if (WARN_ON_ONCE(!trace_buffered_event_ref
))
2227 if (--trace_buffered_event_ref
)
2231 /* For each CPU, set the buffer as used. */
2232 smp_call_function_many(tracing_buffer_mask
,
2233 disable_trace_buffered_event
, NULL
, 1);
2236 /* Wait for all current users to finish */
2237 synchronize_sched();
2239 for_each_tracing_cpu(cpu
) {
2240 free_page((unsigned long)per_cpu(trace_buffered_event
, cpu
));
2241 per_cpu(trace_buffered_event
, cpu
) = NULL
;
2244 * Make sure trace_buffered_event is NULL before clearing
2245 * trace_buffered_event_cnt.
2250 /* Do the work on each cpu */
2251 smp_call_function_many(tracing_buffer_mask
,
2252 enable_trace_buffered_event
, NULL
, 1);
2256 static struct ring_buffer
*temp_buffer
;
2258 struct ring_buffer_event
*
2259 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
2260 struct trace_event_file
*trace_file
,
2261 int type
, unsigned long len
,
2262 unsigned long flags
, int pc
)
2264 struct ring_buffer_event
*entry
;
2267 *current_rb
= trace_file
->tr
->trace_buffer
.buffer
;
2269 if ((trace_file
->flags
&
2270 (EVENT_FILE_FL_SOFT_DISABLED
| EVENT_FILE_FL_FILTERED
)) &&
2271 (entry
= this_cpu_read(trace_buffered_event
))) {
2272 /* Try to use the per cpu buffer first */
2273 val
= this_cpu_inc_return(trace_buffered_event_cnt
);
2275 trace_event_setup(entry
, type
, flags
, pc
);
2276 entry
->array
[0] = len
;
2279 this_cpu_dec(trace_buffered_event_cnt
);
2282 entry
= __trace_buffer_lock_reserve(*current_rb
,
2283 type
, len
, flags
, pc
);
2285 * If tracing is off, but we have triggers enabled
2286 * we still need to look at the event data. Use the temp_buffer
2287 * to store the trace event for the tigger to use. It's recusive
2288 * safe and will not be recorded anywhere.
2290 if (!entry
&& trace_file
->flags
& EVENT_FILE_FL_TRIGGER_COND
) {
2291 *current_rb
= temp_buffer
;
2292 entry
= __trace_buffer_lock_reserve(*current_rb
,
2293 type
, len
, flags
, pc
);
2297 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
2299 static DEFINE_SPINLOCK(tracepoint_iter_lock
);
2300 static DEFINE_MUTEX(tracepoint_printk_mutex
);
2302 static void output_printk(struct trace_event_buffer
*fbuffer
)
2304 struct trace_event_call
*event_call
;
2305 struct trace_event
*event
;
2306 unsigned long flags
;
2307 struct trace_iterator
*iter
= tracepoint_print_iter
;
2309 /* We should never get here if iter is NULL */
2310 if (WARN_ON_ONCE(!iter
))
2313 event_call
= fbuffer
->trace_file
->event_call
;
2314 if (!event_call
|| !event_call
->event
.funcs
||
2315 !event_call
->event
.funcs
->trace
)
2318 event
= &fbuffer
->trace_file
->event_call
->event
;
2320 spin_lock_irqsave(&tracepoint_iter_lock
, flags
);
2321 trace_seq_init(&iter
->seq
);
2322 iter
->ent
= fbuffer
->entry
;
2323 event_call
->event
.funcs
->trace(iter
, 0, event
);
2324 trace_seq_putc(&iter
->seq
, 0);
2325 printk("%s", iter
->seq
.buffer
);
2327 spin_unlock_irqrestore(&tracepoint_iter_lock
, flags
);
2330 int tracepoint_printk_sysctl(struct ctl_table
*table
, int write
,
2331 void __user
*buffer
, size_t *lenp
,
2334 int save_tracepoint_printk
;
2337 mutex_lock(&tracepoint_printk_mutex
);
2338 save_tracepoint_printk
= tracepoint_printk
;
2340 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
2343 * This will force exiting early, as tracepoint_printk
2344 * is always zero when tracepoint_printk_iter is not allocated
2346 if (!tracepoint_print_iter
)
2347 tracepoint_printk
= 0;
2349 if (save_tracepoint_printk
== tracepoint_printk
)
2352 if (tracepoint_printk
)
2353 static_key_enable(&tracepoint_printk_key
.key
);
2355 static_key_disable(&tracepoint_printk_key
.key
);
2358 mutex_unlock(&tracepoint_printk_mutex
);
2363 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
)
2365 if (static_key_false(&tracepoint_printk_key
.key
))
2366 output_printk(fbuffer
);
2368 event_trigger_unlock_commit(fbuffer
->trace_file
, fbuffer
->buffer
,
2369 fbuffer
->event
, fbuffer
->entry
,
2370 fbuffer
->flags
, fbuffer
->pc
);
2372 EXPORT_SYMBOL_GPL(trace_event_buffer_commit
);
2374 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
2375 struct ring_buffer
*buffer
,
2376 struct ring_buffer_event
*event
,
2377 unsigned long flags
, int pc
,
2378 struct pt_regs
*regs
)
2380 __buffer_unlock_commit(buffer
, event
);
2383 * If regs is not set, then skip the following callers:
2384 * trace_buffer_unlock_commit_regs
2385 * event_trigger_unlock_commit
2386 * trace_event_buffer_commit
2387 * trace_event_raw_event_sched_switch
2388 * Note, we can still get here via blktrace, wakeup tracer
2389 * and mmiotrace, but that's ok if they lose a function or
2390 * two. They are that meaningful.
2392 ftrace_trace_stack(tr
, buffer
, flags
, regs
? 0 : 4, pc
, regs
);
2393 ftrace_trace_userstack(buffer
, flags
, pc
);
2397 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2400 trace_buffer_unlock_commit_nostack(struct ring_buffer
*buffer
,
2401 struct ring_buffer_event
*event
)
2403 __buffer_unlock_commit(buffer
, event
);
2407 trace_process_export(struct trace_export
*export
,
2408 struct ring_buffer_event
*event
)
2410 struct trace_entry
*entry
;
2411 unsigned int size
= 0;
2413 entry
= ring_buffer_event_data(event
);
2414 size
= ring_buffer_event_length(event
);
2415 export
->write(entry
, size
);
2418 static DEFINE_MUTEX(ftrace_export_lock
);
2420 static struct trace_export __rcu
*ftrace_exports_list __read_mostly
;
2422 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled
);
2424 static inline void ftrace_exports_enable(void)
2426 static_branch_enable(&ftrace_exports_enabled
);
2429 static inline void ftrace_exports_disable(void)
2431 static_branch_disable(&ftrace_exports_enabled
);
2434 void ftrace_exports(struct ring_buffer_event
*event
)
2436 struct trace_export
*export
;
2438 preempt_disable_notrace();
2440 export
= rcu_dereference_raw_notrace(ftrace_exports_list
);
2442 trace_process_export(export
, event
);
2443 export
= rcu_dereference_raw_notrace(export
->next
);
2446 preempt_enable_notrace();
2450 add_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2452 rcu_assign_pointer(export
->next
, *list
);
2454 * We are entering export into the list but another
2455 * CPU might be walking that list. We need to make sure
2456 * the export->next pointer is valid before another CPU sees
2457 * the export pointer included into the list.
2459 rcu_assign_pointer(*list
, export
);
2463 rm_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2465 struct trace_export
**p
;
2467 for (p
= list
; *p
!= NULL
; p
= &(*p
)->next
)
2474 rcu_assign_pointer(*p
, (*p
)->next
);
2480 add_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2483 ftrace_exports_enable();
2485 add_trace_export(list
, export
);
2489 rm_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2493 ret
= rm_trace_export(list
, export
);
2495 ftrace_exports_disable();
2500 int register_ftrace_export(struct trace_export
*export
)
2502 if (WARN_ON_ONCE(!export
->write
))
2505 mutex_lock(&ftrace_export_lock
);
2507 add_ftrace_export(&ftrace_exports_list
, export
);
2509 mutex_unlock(&ftrace_export_lock
);
2513 EXPORT_SYMBOL_GPL(register_ftrace_export
);
2515 int unregister_ftrace_export(struct trace_export
*export
)
2519 mutex_lock(&ftrace_export_lock
);
2521 ret
= rm_ftrace_export(&ftrace_exports_list
, export
);
2523 mutex_unlock(&ftrace_export_lock
);
2527 EXPORT_SYMBOL_GPL(unregister_ftrace_export
);
2530 trace_function(struct trace_array
*tr
,
2531 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
2534 struct trace_event_call
*call
= &event_function
;
2535 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2536 struct ring_buffer_event
*event
;
2537 struct ftrace_entry
*entry
;
2539 event
= __trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
2543 entry
= ring_buffer_event_data(event
);
2545 entry
->parent_ip
= parent_ip
;
2547 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2548 if (static_branch_unlikely(&ftrace_exports_enabled
))
2549 ftrace_exports(event
);
2550 __buffer_unlock_commit(buffer
, event
);
2554 #ifdef CONFIG_STACKTRACE
2556 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2557 struct ftrace_stack
{
2558 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
2561 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
2562 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
2564 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
2565 unsigned long flags
,
2566 int skip
, int pc
, struct pt_regs
*regs
)
2568 struct trace_event_call
*call
= &event_kernel_stack
;
2569 struct ring_buffer_event
*event
;
2570 struct stack_entry
*entry
;
2571 struct stack_trace trace
;
2573 int size
= FTRACE_STACK_ENTRIES
;
2575 trace
.nr_entries
= 0;
2579 * Add two, for this function and the call to save_stack_trace()
2580 * If regs is set, then these functions will not be in the way.
2586 * Since events can happen in NMIs there's no safe way to
2587 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2588 * or NMI comes in, it will just have to use the default
2589 * FTRACE_STACK_SIZE.
2591 preempt_disable_notrace();
2593 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
2595 * We don't need any atomic variables, just a barrier.
2596 * If an interrupt comes in, we don't care, because it would
2597 * have exited and put the counter back to what we want.
2598 * We just need a barrier to keep gcc from moving things
2602 if (use_stack
== 1) {
2603 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
2604 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
2607 save_stack_trace_regs(regs
, &trace
);
2609 save_stack_trace(&trace
);
2611 if (trace
.nr_entries
> size
)
2612 size
= trace
.nr_entries
;
2614 /* From now on, use_stack is a boolean */
2617 size
*= sizeof(unsigned long);
2619 event
= __trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
2620 sizeof(*entry
) + size
, flags
, pc
);
2623 entry
= ring_buffer_event_data(event
);
2625 memset(&entry
->caller
, 0, size
);
2628 memcpy(&entry
->caller
, trace
.entries
,
2629 trace
.nr_entries
* sizeof(unsigned long));
2631 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2632 trace
.entries
= entry
->caller
;
2634 save_stack_trace_regs(regs
, &trace
);
2636 save_stack_trace(&trace
);
2639 entry
->size
= trace
.nr_entries
;
2641 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2642 __buffer_unlock_commit(buffer
, event
);
2645 /* Again, don't let gcc optimize things here */
2647 __this_cpu_dec(ftrace_stack_reserve
);
2648 preempt_enable_notrace();
2652 static inline void ftrace_trace_stack(struct trace_array
*tr
,
2653 struct ring_buffer
*buffer
,
2654 unsigned long flags
,
2655 int skip
, int pc
, struct pt_regs
*regs
)
2657 if (!(tr
->trace_flags
& TRACE_ITER_STACKTRACE
))
2660 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
2663 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
2666 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2668 if (rcu_is_watching()) {
2669 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2674 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2675 * but if the above rcu_is_watching() failed, then the NMI
2676 * triggered someplace critical, and rcu_irq_enter() should
2677 * not be called from NMI.
2679 if (unlikely(in_nmi()))
2683 * It is possible that a function is being traced in a
2684 * location that RCU is not watching. A call to
2685 * rcu_irq_enter() will make sure that it is, but there's
2686 * a few internal rcu functions that could be traced
2687 * where that wont work either. In those cases, we just
2690 if (unlikely(rcu_irq_enter_disabled()))
2693 rcu_irq_enter_irqson();
2694 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2695 rcu_irq_exit_irqson();
2699 * trace_dump_stack - record a stack back trace in the trace buffer
2700 * @skip: Number of functions to skip (helper handlers)
2702 void trace_dump_stack(int skip
)
2704 unsigned long flags
;
2706 if (tracing_disabled
|| tracing_selftest_running
)
2709 local_save_flags(flags
);
2712 * Skip 3 more, seems to get us at the caller of
2716 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
2717 flags
, skip
, preempt_count(), NULL
);
2720 static DEFINE_PER_CPU(int, user_stack_count
);
2723 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
2725 struct trace_event_call
*call
= &event_user_stack
;
2726 struct ring_buffer_event
*event
;
2727 struct userstack_entry
*entry
;
2728 struct stack_trace trace
;
2730 if (!(global_trace
.trace_flags
& TRACE_ITER_USERSTACKTRACE
))
2734 * NMIs can not handle page faults, even with fix ups.
2735 * The save user stack can (and often does) fault.
2737 if (unlikely(in_nmi()))
2741 * prevent recursion, since the user stack tracing may
2742 * trigger other kernel events.
2745 if (__this_cpu_read(user_stack_count
))
2748 __this_cpu_inc(user_stack_count
);
2750 event
= __trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
2751 sizeof(*entry
), flags
, pc
);
2753 goto out_drop_count
;
2754 entry
= ring_buffer_event_data(event
);
2756 entry
->tgid
= current
->tgid
;
2757 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
2759 trace
.nr_entries
= 0;
2760 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2762 trace
.entries
= entry
->caller
;
2764 save_stack_trace_user(&trace
);
2765 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2766 __buffer_unlock_commit(buffer
, event
);
2769 __this_cpu_dec(user_stack_count
);
2775 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
2777 ftrace_trace_userstack(tr
, flags
, preempt_count());
2781 #endif /* CONFIG_STACKTRACE */
2783 /* created for use with alloc_percpu */
2784 struct trace_buffer_struct
{
2786 char buffer
[4][TRACE_BUF_SIZE
];
2789 static struct trace_buffer_struct
*trace_percpu_buffer
;
2792 * Thise allows for lockless recording. If we're nested too deeply, then
2793 * this returns NULL.
2795 static char *get_trace_buf(void)
2797 struct trace_buffer_struct
*buffer
= this_cpu_ptr(trace_percpu_buffer
);
2799 if (!buffer
|| buffer
->nesting
>= 4)
2804 /* Interrupts must see nesting incremented before we use the buffer */
2806 return &buffer
->buffer
[buffer
->nesting
][0];
2809 static void put_trace_buf(void)
2811 /* Don't let the decrement of nesting leak before this */
2813 this_cpu_dec(trace_percpu_buffer
->nesting
);
2816 static int alloc_percpu_trace_buffer(void)
2818 struct trace_buffer_struct
*buffers
;
2820 buffers
= alloc_percpu(struct trace_buffer_struct
);
2821 if (WARN(!buffers
, "Could not allocate percpu trace_printk buffer"))
2824 trace_percpu_buffer
= buffers
;
2828 static int buffers_allocated
;
2830 void trace_printk_init_buffers(void)
2832 if (buffers_allocated
)
2835 if (alloc_percpu_trace_buffer())
2838 /* trace_printk() is for debug use only. Don't use it in production. */
2841 pr_warn("**********************************************************\n");
2842 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2844 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2846 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2847 pr_warn("** unsafe for production use. **\n");
2849 pr_warn("** If you see this message and you are not debugging **\n");
2850 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2852 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2853 pr_warn("**********************************************************\n");
2855 /* Expand the buffers to set size */
2856 tracing_update_buffers();
2858 buffers_allocated
= 1;
2861 * trace_printk_init_buffers() can be called by modules.
2862 * If that happens, then we need to start cmdline recording
2863 * directly here. If the global_trace.buffer is already
2864 * allocated here, then this was called by module code.
2866 if (global_trace
.trace_buffer
.buffer
)
2867 tracing_start_cmdline_record();
2870 void trace_printk_start_comm(void)
2872 /* Start tracing comms if trace printk is set */
2873 if (!buffers_allocated
)
2875 tracing_start_cmdline_record();
2878 static void trace_printk_start_stop_comm(int enabled
)
2880 if (!buffers_allocated
)
2884 tracing_start_cmdline_record();
2886 tracing_stop_cmdline_record();
2890 * trace_vbprintk - write binary msg to tracing buffer
2893 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2895 struct trace_event_call
*call
= &event_bprint
;
2896 struct ring_buffer_event
*event
;
2897 struct ring_buffer
*buffer
;
2898 struct trace_array
*tr
= &global_trace
;
2899 struct bprint_entry
*entry
;
2900 unsigned long flags
;
2902 int len
= 0, size
, pc
;
2904 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2907 /* Don't pollute graph traces with trace_vprintk internals */
2908 pause_graph_tracing();
2910 pc
= preempt_count();
2911 preempt_disable_notrace();
2913 tbuffer
= get_trace_buf();
2919 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2921 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2924 local_save_flags(flags
);
2925 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2926 buffer
= tr
->trace_buffer
.buffer
;
2927 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2931 entry
= ring_buffer_event_data(event
);
2935 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2936 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2937 __buffer_unlock_commit(buffer
, event
);
2938 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
2945 preempt_enable_notrace();
2946 unpause_graph_tracing();
2950 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2953 __trace_array_vprintk(struct ring_buffer
*buffer
,
2954 unsigned long ip
, const char *fmt
, va_list args
)
2956 struct trace_event_call
*call
= &event_print
;
2957 struct ring_buffer_event
*event
;
2958 int len
= 0, size
, pc
;
2959 struct print_entry
*entry
;
2960 unsigned long flags
;
2963 if (tracing_disabled
|| tracing_selftest_running
)
2966 /* Don't pollute graph traces with trace_vprintk internals */
2967 pause_graph_tracing();
2969 pc
= preempt_count();
2970 preempt_disable_notrace();
2973 tbuffer
= get_trace_buf();
2979 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2981 local_save_flags(flags
);
2982 size
= sizeof(*entry
) + len
+ 1;
2983 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2987 entry
= ring_buffer_event_data(event
);
2990 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
2991 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2992 __buffer_unlock_commit(buffer
, event
);
2993 ftrace_trace_stack(&global_trace
, buffer
, flags
, 6, pc
, NULL
);
3000 preempt_enable_notrace();
3001 unpause_graph_tracing();
3006 int trace_array_vprintk(struct trace_array
*tr
,
3007 unsigned long ip
, const char *fmt
, va_list args
)
3009 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
3012 int trace_array_printk(struct trace_array
*tr
,
3013 unsigned long ip
, const char *fmt
, ...)
3018 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3022 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
3027 int trace_array_printk_buf(struct ring_buffer
*buffer
,
3028 unsigned long ip
, const char *fmt
, ...)
3033 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3037 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
3042 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
3044 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
3046 EXPORT_SYMBOL_GPL(trace_vprintk
);
3048 static void trace_iterator_increment(struct trace_iterator
*iter
)
3050 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
3054 ring_buffer_read(buf_iter
, NULL
);
3057 static struct trace_entry
*
3058 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
3059 unsigned long *lost_events
)
3061 struct ring_buffer_event
*event
;
3062 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
3065 event
= ring_buffer_iter_peek(buf_iter
, ts
);
3067 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
3071 iter
->ent_size
= ring_buffer_event_length(event
);
3072 return ring_buffer_event_data(event
);
3078 static struct trace_entry
*
3079 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
3080 unsigned long *missing_events
, u64
*ent_ts
)
3082 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
3083 struct trace_entry
*ent
, *next
= NULL
;
3084 unsigned long lost_events
= 0, next_lost
= 0;
3085 int cpu_file
= iter
->cpu_file
;
3086 u64 next_ts
= 0, ts
;
3092 * If we are in a per_cpu trace file, don't bother by iterating over
3093 * all cpu and peek directly.
3095 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
3096 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
3098 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
3100 *ent_cpu
= cpu_file
;
3105 for_each_tracing_cpu(cpu
) {
3107 if (ring_buffer_empty_cpu(buffer
, cpu
))
3110 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
3113 * Pick the entry with the smallest timestamp:
3115 if (ent
&& (!next
|| ts
< next_ts
)) {
3119 next_lost
= lost_events
;
3120 next_size
= iter
->ent_size
;
3124 iter
->ent_size
= next_size
;
3127 *ent_cpu
= next_cpu
;
3133 *missing_events
= next_lost
;
3138 /* Find the next real entry, without updating the iterator itself */
3139 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
3140 int *ent_cpu
, u64
*ent_ts
)
3142 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
3145 /* Find the next real entry, and increment the iterator to the next entry */
3146 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
3148 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
3149 &iter
->lost_events
, &iter
->ts
);
3152 trace_iterator_increment(iter
);
3154 return iter
->ent
? iter
: NULL
;
3157 static void trace_consume(struct trace_iterator
*iter
)
3159 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
3160 &iter
->lost_events
);
3163 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3165 struct trace_iterator
*iter
= m
->private;
3169 WARN_ON_ONCE(iter
->leftover
);
3173 /* can't go backwards */
3178 ent
= trace_find_next_entry_inc(iter
);
3182 while (ent
&& iter
->idx
< i
)
3183 ent
= trace_find_next_entry_inc(iter
);
3190 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
3192 struct ring_buffer_event
*event
;
3193 struct ring_buffer_iter
*buf_iter
;
3194 unsigned long entries
= 0;
3197 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
3199 buf_iter
= trace_buffer_iter(iter
, cpu
);
3203 ring_buffer_iter_reset(buf_iter
);
3206 * We could have the case with the max latency tracers
3207 * that a reset never took place on a cpu. This is evident
3208 * by the timestamp being before the start of the buffer.
3210 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
3211 if (ts
>= iter
->trace_buffer
->time_start
)
3214 ring_buffer_read(buf_iter
, NULL
);
3217 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
3221 * The current tracer is copied to avoid a global locking
3224 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3226 struct trace_iterator
*iter
= m
->private;
3227 struct trace_array
*tr
= iter
->tr
;
3228 int cpu_file
= iter
->cpu_file
;
3234 * copy the tracer to avoid using a global lock all around.
3235 * iter->trace is a copy of current_trace, the pointer to the
3236 * name may be used instead of a strcmp(), as iter->trace->name
3237 * will point to the same string as current_trace->name.
3239 mutex_lock(&trace_types_lock
);
3240 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
3241 *iter
->trace
= *tr
->current_trace
;
3242 mutex_unlock(&trace_types_lock
);
3244 #ifdef CONFIG_TRACER_MAX_TRACE
3245 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3246 return ERR_PTR(-EBUSY
);
3249 if (!iter
->snapshot
)
3250 atomic_inc(&trace_record_taskinfo_disabled
);
3252 if (*pos
!= iter
->pos
) {
3257 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
3258 for_each_tracing_cpu(cpu
)
3259 tracing_iter_reset(iter
, cpu
);
3261 tracing_iter_reset(iter
, cpu_file
);
3264 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
3269 * If we overflowed the seq_file before, then we want
3270 * to just reuse the trace_seq buffer again.
3276 p
= s_next(m
, p
, &l
);
3280 trace_event_read_lock();
3281 trace_access_lock(cpu_file
);
3285 static void s_stop(struct seq_file
*m
, void *p
)
3287 struct trace_iterator
*iter
= m
->private;
3289 #ifdef CONFIG_TRACER_MAX_TRACE
3290 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3294 if (!iter
->snapshot
)
3295 atomic_dec(&trace_record_taskinfo_disabled
);
3297 trace_access_unlock(iter
->cpu_file
);
3298 trace_event_read_unlock();
3302 get_total_entries(struct trace_buffer
*buf
,
3303 unsigned long *total
, unsigned long *entries
)
3305 unsigned long count
;
3311 for_each_tracing_cpu(cpu
) {
3312 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
3314 * If this buffer has skipped entries, then we hold all
3315 * entries for the trace and we need to ignore the
3316 * ones before the time stamp.
3318 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
3319 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
3320 /* total is the same as the entries */
3324 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
3329 static void print_lat_help_header(struct seq_file
*m
)
3331 seq_puts(m
, "# _------=> CPU# \n"
3332 "# / _-----=> irqs-off \n"
3333 "# | / _----=> need-resched \n"
3334 "# || / _---=> hardirq/softirq \n"
3335 "# ||| / _--=> preempt-depth \n"
3337 "# cmd pid ||||| time | caller \n"
3338 "# \\ / ||||| \\ | / \n");
3341 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
3343 unsigned long total
;
3344 unsigned long entries
;
3346 get_total_entries(buf
, &total
, &entries
);
3347 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3348 entries
, total
, num_online_cpus());
3352 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
,
3355 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3357 print_event_info(buf
, m
);
3359 seq_printf(m
, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid
? "TGID " : "");
3360 seq_printf(m
, "# | | | %s | |\n", tgid
? " | " : "");
3363 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
,
3366 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3367 const char tgid_space
[] = " ";
3368 const char space
[] = " ";
3370 seq_printf(m
, "# %s _-----=> irqs-off\n",
3371 tgid
? tgid_space
: space
);
3372 seq_printf(m
, "# %s / _----=> need-resched\n",
3373 tgid
? tgid_space
: space
);
3374 seq_printf(m
, "# %s| / _---=> hardirq/softirq\n",
3375 tgid
? tgid_space
: space
);
3376 seq_printf(m
, "# %s|| / _--=> preempt-depth\n",
3377 tgid
? tgid_space
: space
);
3378 seq_printf(m
, "# %s||| / delay\n",
3379 tgid
? tgid_space
: space
);
3380 seq_printf(m
, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3381 tgid
? " TGID " : space
);
3382 seq_printf(m
, "# | | | %s|||| | |\n",
3383 tgid
? " | " : space
);
3387 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
3389 unsigned long sym_flags
= (global_trace
.trace_flags
& TRACE_ITER_SYM_MASK
);
3390 struct trace_buffer
*buf
= iter
->trace_buffer
;
3391 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
3392 struct tracer
*type
= iter
->trace
;
3393 unsigned long entries
;
3394 unsigned long total
;
3395 const char *name
= "preemption";
3399 get_total_entries(buf
, &total
, &entries
);
3401 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
3403 seq_puts(m
, "# -----------------------------------"
3404 "---------------------------------\n");
3405 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3406 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3407 nsecs_to_usecs(data
->saved_latency
),
3411 #if defined(CONFIG_PREEMPT_NONE)
3413 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3415 #elif defined(CONFIG_PREEMPT)
3420 /* These are reserved for later use */
3423 seq_printf(m
, " #P:%d)\n", num_online_cpus());
3427 seq_puts(m
, "# -----------------\n");
3428 seq_printf(m
, "# | task: %.16s-%d "
3429 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3430 data
->comm
, data
->pid
,
3431 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
3432 data
->policy
, data
->rt_priority
);
3433 seq_puts(m
, "# -----------------\n");
3435 if (data
->critical_start
) {
3436 seq_puts(m
, "# => started at: ");
3437 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
3438 trace_print_seq(m
, &iter
->seq
);
3439 seq_puts(m
, "\n# => ended at: ");
3440 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
3441 trace_print_seq(m
, &iter
->seq
);
3442 seq_puts(m
, "\n#\n");
3448 static void test_cpu_buff_start(struct trace_iterator
*iter
)
3450 struct trace_seq
*s
= &iter
->seq
;
3451 struct trace_array
*tr
= iter
->tr
;
3453 if (!(tr
->trace_flags
& TRACE_ITER_ANNOTATE
))
3456 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
3459 if (cpumask_available(iter
->started
) &&
3460 cpumask_test_cpu(iter
->cpu
, iter
->started
))
3463 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
3466 if (cpumask_available(iter
->started
))
3467 cpumask_set_cpu(iter
->cpu
, iter
->started
);
3469 /* Don't print started cpu buffer for the first entry of the trace */
3471 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
3475 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
3477 struct trace_array
*tr
= iter
->tr
;
3478 struct trace_seq
*s
= &iter
->seq
;
3479 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
3480 struct trace_entry
*entry
;
3481 struct trace_event
*event
;
3485 test_cpu_buff_start(iter
);
3487 event
= ftrace_find_event(entry
->type
);
3489 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3490 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3491 trace_print_lat_context(iter
);
3493 trace_print_context(iter
);
3496 if (trace_seq_has_overflowed(s
))
3497 return TRACE_TYPE_PARTIAL_LINE
;
3500 return event
->funcs
->trace(iter
, sym_flags
, event
);
3502 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
3504 return trace_handle_return(s
);
3507 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
3509 struct trace_array
*tr
= iter
->tr
;
3510 struct trace_seq
*s
= &iter
->seq
;
3511 struct trace_entry
*entry
;
3512 struct trace_event
*event
;
3516 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
)
3517 trace_seq_printf(s
, "%d %d %llu ",
3518 entry
->pid
, iter
->cpu
, iter
->ts
);
3520 if (trace_seq_has_overflowed(s
))
3521 return TRACE_TYPE_PARTIAL_LINE
;
3523 event
= ftrace_find_event(entry
->type
);
3525 return event
->funcs
->raw(iter
, 0, event
);
3527 trace_seq_printf(s
, "%d ?\n", entry
->type
);
3529 return trace_handle_return(s
);
3532 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
3534 struct trace_array
*tr
= iter
->tr
;
3535 struct trace_seq
*s
= &iter
->seq
;
3536 unsigned char newline
= '\n';
3537 struct trace_entry
*entry
;
3538 struct trace_event
*event
;
3542 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3543 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
3544 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
3545 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
3546 if (trace_seq_has_overflowed(s
))
3547 return TRACE_TYPE_PARTIAL_LINE
;
3550 event
= ftrace_find_event(entry
->type
);
3552 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
3553 if (ret
!= TRACE_TYPE_HANDLED
)
3557 SEQ_PUT_FIELD(s
, newline
);
3559 return trace_handle_return(s
);
3562 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
3564 struct trace_array
*tr
= iter
->tr
;
3565 struct trace_seq
*s
= &iter
->seq
;
3566 struct trace_entry
*entry
;
3567 struct trace_event
*event
;
3571 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3572 SEQ_PUT_FIELD(s
, entry
->pid
);
3573 SEQ_PUT_FIELD(s
, iter
->cpu
);
3574 SEQ_PUT_FIELD(s
, iter
->ts
);
3575 if (trace_seq_has_overflowed(s
))
3576 return TRACE_TYPE_PARTIAL_LINE
;
3579 event
= ftrace_find_event(entry
->type
);
3580 return event
? event
->funcs
->binary(iter
, 0, event
) :
3584 int trace_empty(struct trace_iterator
*iter
)
3586 struct ring_buffer_iter
*buf_iter
;
3589 /* If we are looking at one CPU buffer, only check that one */
3590 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
3591 cpu
= iter
->cpu_file
;
3592 buf_iter
= trace_buffer_iter(iter
, cpu
);
3594 if (!ring_buffer_iter_empty(buf_iter
))
3597 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3603 for_each_tracing_cpu(cpu
) {
3604 buf_iter
= trace_buffer_iter(iter
, cpu
);
3606 if (!ring_buffer_iter_empty(buf_iter
))
3609 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3617 /* Called with trace_event_read_lock() held. */
3618 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
3620 struct trace_array
*tr
= iter
->tr
;
3621 unsigned long trace_flags
= tr
->trace_flags
;
3622 enum print_line_t ret
;
3624 if (iter
->lost_events
) {
3625 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
3626 iter
->cpu
, iter
->lost_events
);
3627 if (trace_seq_has_overflowed(&iter
->seq
))
3628 return TRACE_TYPE_PARTIAL_LINE
;
3631 if (iter
->trace
&& iter
->trace
->print_line
) {
3632 ret
= iter
->trace
->print_line(iter
);
3633 if (ret
!= TRACE_TYPE_UNHANDLED
)
3637 if (iter
->ent
->type
== TRACE_BPUTS
&&
3638 trace_flags
& TRACE_ITER_PRINTK
&&
3639 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3640 return trace_print_bputs_msg_only(iter
);
3642 if (iter
->ent
->type
== TRACE_BPRINT
&&
3643 trace_flags
& TRACE_ITER_PRINTK
&&
3644 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3645 return trace_print_bprintk_msg_only(iter
);
3647 if (iter
->ent
->type
== TRACE_PRINT
&&
3648 trace_flags
& TRACE_ITER_PRINTK
&&
3649 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3650 return trace_print_printk_msg_only(iter
);
3652 if (trace_flags
& TRACE_ITER_BIN
)
3653 return print_bin_fmt(iter
);
3655 if (trace_flags
& TRACE_ITER_HEX
)
3656 return print_hex_fmt(iter
);
3658 if (trace_flags
& TRACE_ITER_RAW
)
3659 return print_raw_fmt(iter
);
3661 return print_trace_fmt(iter
);
3664 void trace_latency_header(struct seq_file
*m
)
3666 struct trace_iterator
*iter
= m
->private;
3667 struct trace_array
*tr
= iter
->tr
;
3669 /* print nothing if the buffers are empty */
3670 if (trace_empty(iter
))
3673 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3674 print_trace_header(m
, iter
);
3676 if (!(tr
->trace_flags
& TRACE_ITER_VERBOSE
))
3677 print_lat_help_header(m
);
3680 void trace_default_header(struct seq_file
*m
)
3682 struct trace_iterator
*iter
= m
->private;
3683 struct trace_array
*tr
= iter
->tr
;
3684 unsigned long trace_flags
= tr
->trace_flags
;
3686 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
3689 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
3690 /* print nothing if the buffers are empty */
3691 if (trace_empty(iter
))
3693 print_trace_header(m
, iter
);
3694 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
3695 print_lat_help_header(m
);
3697 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
3698 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
3699 print_func_help_header_irq(iter
->trace_buffer
,
3702 print_func_help_header(iter
->trace_buffer
, m
,
3708 static void test_ftrace_alive(struct seq_file
*m
)
3710 if (!ftrace_is_dead())
3712 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3713 "# MAY BE MISSING FUNCTION EVENTS\n");
3716 #ifdef CONFIG_TRACER_MAX_TRACE
3717 static void show_snapshot_main_help(struct seq_file
*m
)
3719 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3720 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3721 "# Takes a snapshot of the main buffer.\n"
3722 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3723 "# (Doesn't have to be '2' works with any number that\n"
3724 "# is not a '0' or '1')\n");
3727 static void show_snapshot_percpu_help(struct seq_file
*m
)
3729 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3730 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3731 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3732 "# Takes a snapshot of the main buffer for this cpu.\n");
3734 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
3735 "# Must use main snapshot file to allocate.\n");
3737 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3738 "# (Doesn't have to be '2' works with any number that\n"
3739 "# is not a '0' or '1')\n");
3742 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
3744 if (iter
->tr
->allocated_snapshot
)
3745 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
3747 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
3749 seq_puts(m
, "# Snapshot commands:\n");
3750 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
3751 show_snapshot_main_help(m
);
3753 show_snapshot_percpu_help(m
);
3756 /* Should never be called */
3757 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
3760 static int s_show(struct seq_file
*m
, void *v
)
3762 struct trace_iterator
*iter
= v
;
3765 if (iter
->ent
== NULL
) {
3767 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
3769 test_ftrace_alive(m
);
3771 if (iter
->snapshot
&& trace_empty(iter
))
3772 print_snapshot_help(m
, iter
);
3773 else if (iter
->trace
&& iter
->trace
->print_header
)
3774 iter
->trace
->print_header(m
);
3776 trace_default_header(m
);
3778 } else if (iter
->leftover
) {
3780 * If we filled the seq_file buffer earlier, we
3781 * want to just show it now.
3783 ret
= trace_print_seq(m
, &iter
->seq
);
3785 /* ret should this time be zero, but you never know */
3786 iter
->leftover
= ret
;
3789 print_trace_line(iter
);
3790 ret
= trace_print_seq(m
, &iter
->seq
);
3792 * If we overflow the seq_file buffer, then it will
3793 * ask us for this data again at start up.
3795 * ret is 0 if seq_file write succeeded.
3798 iter
->leftover
= ret
;
3805 * Should be used after trace_array_get(), trace_types_lock
3806 * ensures that i_cdev was already initialized.
3808 static inline int tracing_get_cpu(struct inode
*inode
)
3810 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
3811 return (long)inode
->i_cdev
- 1;
3812 return RING_BUFFER_ALL_CPUS
;
3815 static const struct seq_operations tracer_seq_ops
= {
3822 static struct trace_iterator
*
3823 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
3825 struct trace_array
*tr
= inode
->i_private
;
3826 struct trace_iterator
*iter
;
3829 if (tracing_disabled
)
3830 return ERR_PTR(-ENODEV
);
3832 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
3834 return ERR_PTR(-ENOMEM
);
3836 iter
->buffer_iter
= kcalloc(nr_cpu_ids
, sizeof(*iter
->buffer_iter
),
3838 if (!iter
->buffer_iter
)
3842 * We make a copy of the current tracer to avoid concurrent
3843 * changes on it while we are reading.
3845 mutex_lock(&trace_types_lock
);
3846 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3850 *iter
->trace
= *tr
->current_trace
;
3852 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3857 #ifdef CONFIG_TRACER_MAX_TRACE
3858 /* Currently only the top directory has a snapshot */
3859 if (tr
->current_trace
->print_max
|| snapshot
)
3860 iter
->trace_buffer
= &tr
->max_buffer
;
3863 iter
->trace_buffer
= &tr
->trace_buffer
;
3864 iter
->snapshot
= snapshot
;
3866 iter
->cpu_file
= tracing_get_cpu(inode
);
3867 mutex_init(&iter
->mutex
);
3869 /* Notify the tracer early; before we stop tracing. */
3870 if (iter
->trace
&& iter
->trace
->open
)
3871 iter
->trace
->open(iter
);
3873 /* Annotate start of buffers if we had overruns */
3874 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3875 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3877 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3878 if (trace_clocks
[tr
->clock_id
].in_ns
)
3879 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3881 /* stop the trace while dumping if we are not opening "snapshot" */
3882 if (!iter
->snapshot
)
3883 tracing_stop_tr(tr
);
3885 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3886 for_each_tracing_cpu(cpu
) {
3887 iter
->buffer_iter
[cpu
] =
3888 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3890 ring_buffer_read_prepare_sync();
3891 for_each_tracing_cpu(cpu
) {
3892 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3893 tracing_iter_reset(iter
, cpu
);
3896 cpu
= iter
->cpu_file
;
3897 iter
->buffer_iter
[cpu
] =
3898 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3899 ring_buffer_read_prepare_sync();
3900 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3901 tracing_iter_reset(iter
, cpu
);
3904 mutex_unlock(&trace_types_lock
);
3909 mutex_unlock(&trace_types_lock
);
3911 kfree(iter
->buffer_iter
);
3913 seq_release_private(inode
, file
);
3914 return ERR_PTR(-ENOMEM
);
3917 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3919 if (tracing_disabled
)
3922 filp
->private_data
= inode
->i_private
;
3926 bool tracing_is_disabled(void)
3928 return (tracing_disabled
) ? true: false;
3932 * Open and update trace_array ref count.
3933 * Must have the current trace_array passed to it.
3935 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3937 struct trace_array
*tr
= inode
->i_private
;
3939 if (tracing_disabled
)
3942 if (trace_array_get(tr
) < 0)
3945 filp
->private_data
= inode
->i_private
;
3950 static int tracing_release(struct inode
*inode
, struct file
*file
)
3952 struct trace_array
*tr
= inode
->i_private
;
3953 struct seq_file
*m
= file
->private_data
;
3954 struct trace_iterator
*iter
;
3957 if (!(file
->f_mode
& FMODE_READ
)) {
3958 trace_array_put(tr
);
3962 /* Writes do not use seq_file */
3964 mutex_lock(&trace_types_lock
);
3966 for_each_tracing_cpu(cpu
) {
3967 if (iter
->buffer_iter
[cpu
])
3968 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3971 if (iter
->trace
&& iter
->trace
->close
)
3972 iter
->trace
->close(iter
);
3974 if (!iter
->snapshot
)
3975 /* reenable tracing if it was previously enabled */
3976 tracing_start_tr(tr
);
3978 __trace_array_put(tr
);
3980 mutex_unlock(&trace_types_lock
);
3982 mutex_destroy(&iter
->mutex
);
3983 free_cpumask_var(iter
->started
);
3985 kfree(iter
->buffer_iter
);
3986 seq_release_private(inode
, file
);
3991 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3993 struct trace_array
*tr
= inode
->i_private
;
3995 trace_array_put(tr
);
3999 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
4001 struct trace_array
*tr
= inode
->i_private
;
4003 trace_array_put(tr
);
4005 return single_release(inode
, file
);
4008 static int tracing_open(struct inode
*inode
, struct file
*file
)
4010 struct trace_array
*tr
= inode
->i_private
;
4011 struct trace_iterator
*iter
;
4014 if (trace_array_get(tr
) < 0)
4017 /* If this file was open for write, then erase contents */
4018 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
4019 int cpu
= tracing_get_cpu(inode
);
4020 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
4022 #ifdef CONFIG_TRACER_MAX_TRACE
4023 if (tr
->current_trace
->print_max
)
4024 trace_buf
= &tr
->max_buffer
;
4027 if (cpu
== RING_BUFFER_ALL_CPUS
)
4028 tracing_reset_online_cpus(trace_buf
);
4030 tracing_reset(trace_buf
, cpu
);
4033 if (file
->f_mode
& FMODE_READ
) {
4034 iter
= __tracing_open(inode
, file
, false);
4036 ret
= PTR_ERR(iter
);
4037 else if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
4038 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4042 trace_array_put(tr
);
4048 * Some tracers are not suitable for instance buffers.
4049 * A tracer is always available for the global array (toplevel)
4050 * or if it explicitly states that it is.
4053 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
4055 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
4058 /* Find the next tracer that this trace array may use */
4059 static struct tracer
*
4060 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
4062 while (t
&& !trace_ok_for_array(t
, tr
))
4069 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4071 struct trace_array
*tr
= m
->private;
4072 struct tracer
*t
= v
;
4077 t
= get_tracer_for_array(tr
, t
->next
);
4082 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
4084 struct trace_array
*tr
= m
->private;
4088 mutex_lock(&trace_types_lock
);
4090 t
= get_tracer_for_array(tr
, trace_types
);
4091 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
4097 static void t_stop(struct seq_file
*m
, void *p
)
4099 mutex_unlock(&trace_types_lock
);
4102 static int t_show(struct seq_file
*m
, void *v
)
4104 struct tracer
*t
= v
;
4109 seq_puts(m
, t
->name
);
4118 static const struct seq_operations show_traces_seq_ops
= {
4125 static int show_traces_open(struct inode
*inode
, struct file
*file
)
4127 struct trace_array
*tr
= inode
->i_private
;
4131 if (tracing_disabled
)
4134 ret
= seq_open(file
, &show_traces_seq_ops
);
4138 m
= file
->private_data
;
4145 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
4146 size_t count
, loff_t
*ppos
)
4151 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
4155 if (file
->f_mode
& FMODE_READ
)
4156 ret
= seq_lseek(file
, offset
, whence
);
4158 file
->f_pos
= ret
= 0;
4163 static const struct file_operations tracing_fops
= {
4164 .open
= tracing_open
,
4166 .write
= tracing_write_stub
,
4167 .llseek
= tracing_lseek
,
4168 .release
= tracing_release
,
4171 static const struct file_operations show_traces_fops
= {
4172 .open
= show_traces_open
,
4174 .release
= seq_release
,
4175 .llseek
= seq_lseek
,
4179 * The tracer itself will not take this lock, but still we want
4180 * to provide a consistent cpumask to user-space:
4182 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
4185 * Temporary storage for the character representation of the
4186 * CPU bitmask (and one more byte for the newline):
4188 static char mask_str
[NR_CPUS
+ 1];
4191 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
4192 size_t count
, loff_t
*ppos
)
4194 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4197 mutex_lock(&tracing_cpumask_update_lock
);
4199 len
= snprintf(mask_str
, count
, "%*pb\n",
4200 cpumask_pr_args(tr
->tracing_cpumask
));
4205 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
4208 mutex_unlock(&tracing_cpumask_update_lock
);
4214 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
4215 size_t count
, loff_t
*ppos
)
4217 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4218 cpumask_var_t tracing_cpumask_new
;
4221 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
4224 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
4228 mutex_lock(&tracing_cpumask_update_lock
);
4230 local_irq_disable();
4231 arch_spin_lock(&tr
->max_lock
);
4232 for_each_tracing_cpu(cpu
) {
4234 * Increase/decrease the disabled counter if we are
4235 * about to flip a bit in the cpumask:
4237 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4238 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4239 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4240 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4242 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4243 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4244 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4245 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4248 arch_spin_unlock(&tr
->max_lock
);
4251 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
4253 mutex_unlock(&tracing_cpumask_update_lock
);
4254 free_cpumask_var(tracing_cpumask_new
);
4259 free_cpumask_var(tracing_cpumask_new
);
4264 static const struct file_operations tracing_cpumask_fops
= {
4265 .open
= tracing_open_generic_tr
,
4266 .read
= tracing_cpumask_read
,
4267 .write
= tracing_cpumask_write
,
4268 .release
= tracing_release_generic_tr
,
4269 .llseek
= generic_file_llseek
,
4272 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
4274 struct tracer_opt
*trace_opts
;
4275 struct trace_array
*tr
= m
->private;
4279 mutex_lock(&trace_types_lock
);
4280 tracer_flags
= tr
->current_trace
->flags
->val
;
4281 trace_opts
= tr
->current_trace
->flags
->opts
;
4283 for (i
= 0; trace_options
[i
]; i
++) {
4284 if (tr
->trace_flags
& (1 << i
))
4285 seq_printf(m
, "%s\n", trace_options
[i
]);
4287 seq_printf(m
, "no%s\n", trace_options
[i
]);
4290 for (i
= 0; trace_opts
[i
].name
; i
++) {
4291 if (tracer_flags
& trace_opts
[i
].bit
)
4292 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
4294 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
4296 mutex_unlock(&trace_types_lock
);
4301 static int __set_tracer_option(struct trace_array
*tr
,
4302 struct tracer_flags
*tracer_flags
,
4303 struct tracer_opt
*opts
, int neg
)
4305 struct tracer
*trace
= tracer_flags
->trace
;
4308 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
4313 tracer_flags
->val
&= ~opts
->bit
;
4315 tracer_flags
->val
|= opts
->bit
;
4319 /* Try to assign a tracer specific option */
4320 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
4322 struct tracer
*trace
= tr
->current_trace
;
4323 struct tracer_flags
*tracer_flags
= trace
->flags
;
4324 struct tracer_opt
*opts
= NULL
;
4327 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
4328 opts
= &tracer_flags
->opts
[i
];
4330 if (strcmp(cmp
, opts
->name
) == 0)
4331 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
4337 /* Some tracers require overwrite to stay enabled */
4338 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
4340 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
4346 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
4348 /* do nothing if flag is already set */
4349 if (!!(tr
->trace_flags
& mask
) == !!enabled
)
4352 /* Give the tracer a chance to approve the change */
4353 if (tr
->current_trace
->flag_changed
)
4354 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
4358 tr
->trace_flags
|= mask
;
4360 tr
->trace_flags
&= ~mask
;
4362 if (mask
== TRACE_ITER_RECORD_CMD
)
4363 trace_event_enable_cmd_record(enabled
);
4365 if (mask
== TRACE_ITER_RECORD_TGID
) {
4367 tgid_map
= kzalloc((PID_MAX_DEFAULT
+ 1) * sizeof(*tgid_map
),
4370 tr
->trace_flags
&= ~TRACE_ITER_RECORD_TGID
;
4374 trace_event_enable_tgid_record(enabled
);
4377 if (mask
== TRACE_ITER_EVENT_FORK
)
4378 trace_event_follow_fork(tr
, enabled
);
4380 if (mask
== TRACE_ITER_FUNC_FORK
)
4381 ftrace_pid_follow_fork(tr
, enabled
);
4383 if (mask
== TRACE_ITER_OVERWRITE
) {
4384 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
4385 #ifdef CONFIG_TRACER_MAX_TRACE
4386 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
4390 if (mask
== TRACE_ITER_PRINTK
) {
4391 trace_printk_start_stop_comm(enabled
);
4392 trace_printk_control(enabled
);
4398 static int trace_set_options(struct trace_array
*tr
, char *option
)
4404 size_t orig_len
= strlen(option
);
4406 cmp
= strstrip(option
);
4408 if (strncmp(cmp
, "no", 2) == 0) {
4413 mutex_lock(&trace_types_lock
);
4415 for (i
= 0; trace_options
[i
]; i
++) {
4416 if (strcmp(cmp
, trace_options
[i
]) == 0) {
4417 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
4422 /* If no option could be set, test the specific tracer options */
4423 if (!trace_options
[i
])
4424 ret
= set_tracer_option(tr
, cmp
, neg
);
4426 mutex_unlock(&trace_types_lock
);
4429 * If the first trailing whitespace is replaced with '\0' by strstrip,
4430 * turn it back into a space.
4432 if (orig_len
> strlen(option
))
4433 option
[strlen(option
)] = ' ';
4438 static void __init
apply_trace_boot_options(void)
4440 char *buf
= trace_boot_options_buf
;
4444 option
= strsep(&buf
, ",");
4450 trace_set_options(&global_trace
, option
);
4452 /* Put back the comma to allow this to be called again */
4459 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
4460 size_t cnt
, loff_t
*ppos
)
4462 struct seq_file
*m
= filp
->private_data
;
4463 struct trace_array
*tr
= m
->private;
4467 if (cnt
>= sizeof(buf
))
4470 if (copy_from_user(buf
, ubuf
, cnt
))
4475 ret
= trace_set_options(tr
, buf
);
4484 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
4486 struct trace_array
*tr
= inode
->i_private
;
4489 if (tracing_disabled
)
4492 if (trace_array_get(tr
) < 0)
4495 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
4497 trace_array_put(tr
);
4502 static const struct file_operations tracing_iter_fops
= {
4503 .open
= tracing_trace_options_open
,
4505 .llseek
= seq_lseek
,
4506 .release
= tracing_single_release_tr
,
4507 .write
= tracing_trace_options_write
,
4510 static const char readme_msg
[] =
4511 "tracing mini-HOWTO:\n\n"
4512 "# echo 0 > tracing_on : quick way to disable tracing\n"
4513 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4514 " Important files:\n"
4515 " trace\t\t\t- The static contents of the buffer\n"
4516 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4517 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4518 " current_tracer\t- function and latency tracers\n"
4519 " available_tracers\t- list of configured tracers for current_tracer\n"
4520 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4521 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4522 " trace_clock\t\t-change the clock used to order events\n"
4523 " local: Per cpu clock but may not be synced across CPUs\n"
4524 " global: Synced across CPUs but slows tracing down.\n"
4525 " counter: Not a clock, but just an increment\n"
4526 " uptime: Jiffy counter from time of boot\n"
4527 " perf: Same clock that perf events use\n"
4528 #ifdef CONFIG_X86_64
4529 " x86-tsc: TSC cycle counter\n"
4531 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4532 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4533 " tracing_cpumask\t- Limit which CPUs to trace\n"
4534 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4535 "\t\t\t Remove sub-buffer with rmdir\n"
4536 " trace_options\t\t- Set format or modify how tracing happens\n"
4537 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4538 "\t\t\t option name\n"
4539 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4540 #ifdef CONFIG_DYNAMIC_FTRACE
4541 "\n available_filter_functions - list of functions that can be filtered on\n"
4542 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4543 "\t\t\t functions\n"
4544 "\t accepts: func_full_name or glob-matching-pattern\n"
4545 "\t modules: Can select a group via module\n"
4546 "\t Format: :mod:<module-name>\n"
4547 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4548 "\t triggers: a command to perform when function is hit\n"
4549 "\t Format: <function>:<trigger>[:count]\n"
4550 "\t trigger: traceon, traceoff\n"
4551 "\t\t enable_event:<system>:<event>\n"
4552 "\t\t disable_event:<system>:<event>\n"
4553 #ifdef CONFIG_STACKTRACE
4556 #ifdef CONFIG_TRACER_SNAPSHOT
4561 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4562 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4563 "\t The first one will disable tracing every time do_fault is hit\n"
4564 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4565 "\t The first time do trap is hit and it disables tracing, the\n"
4566 "\t counter will decrement to 2. If tracing is already disabled,\n"
4567 "\t the counter will not decrement. It only decrements when the\n"
4568 "\t trigger did work\n"
4569 "\t To remove trigger without count:\n"
4570 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4571 "\t To remove trigger with a count:\n"
4572 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4573 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4574 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4575 "\t modules: Can select a group via module command :mod:\n"
4576 "\t Does not accept triggers\n"
4577 #endif /* CONFIG_DYNAMIC_FTRACE */
4578 #ifdef CONFIG_FUNCTION_TRACER
4579 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4582 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4583 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4584 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4585 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4587 #ifdef CONFIG_TRACER_SNAPSHOT
4588 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4589 "\t\t\t snapshot buffer. Read the contents for more\n"
4590 "\t\t\t information\n"
4592 #ifdef CONFIG_STACK_TRACER
4593 " stack_trace\t\t- Shows the max stack trace when active\n"
4594 " stack_max_size\t- Shows current max stack size that was traced\n"
4595 "\t\t\t Write into this file to reset the max size (trigger a\n"
4596 "\t\t\t new trace)\n"
4597 #ifdef CONFIG_DYNAMIC_FTRACE
4598 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4601 #endif /* CONFIG_STACK_TRACER */
4602 #ifdef CONFIG_KPROBE_EVENTS
4603 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4604 "\t\t\t Write into this file to define/undefine new trace events.\n"
4606 #ifdef CONFIG_UPROBE_EVENTS
4607 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4608 "\t\t\t Write into this file to define/undefine new trace events.\n"
4610 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4611 "\t accepts: event-definitions (one definition per line)\n"
4612 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4613 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4614 "\t -:[<group>/]<event>\n"
4615 #ifdef CONFIG_KPROBE_EVENTS
4616 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4617 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4619 #ifdef CONFIG_UPROBE_EVENTS
4620 "\t place: <path>:<offset>\n"
4622 "\t args: <name>=fetcharg[:type]\n"
4623 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4624 "\t $stack<index>, $stack, $retval, $comm\n"
4625 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4626 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4628 " events/\t\t- Directory containing all trace event subsystems:\n"
4629 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4630 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4631 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4633 " filter\t\t- If set, only events passing filter are traced\n"
4634 " events/<system>/<event>/\t- Directory containing control files for\n"
4636 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4637 " filter\t\t- If set, only events passing filter are traced\n"
4638 " trigger\t\t- If set, a command to perform when event is hit\n"
4639 "\t Format: <trigger>[:count][if <filter>]\n"
4640 "\t trigger: traceon, traceoff\n"
4641 "\t enable_event:<system>:<event>\n"
4642 "\t disable_event:<system>:<event>\n"
4643 #ifdef CONFIG_HIST_TRIGGERS
4644 "\t enable_hist:<system>:<event>\n"
4645 "\t disable_hist:<system>:<event>\n"
4647 #ifdef CONFIG_STACKTRACE
4650 #ifdef CONFIG_TRACER_SNAPSHOT
4653 #ifdef CONFIG_HIST_TRIGGERS
4654 "\t\t hist (see below)\n"
4656 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4657 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4658 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4659 "\t events/block/block_unplug/trigger\n"
4660 "\t The first disables tracing every time block_unplug is hit.\n"
4661 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4662 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4663 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4664 "\t Like function triggers, the counter is only decremented if it\n"
4665 "\t enabled or disabled tracing.\n"
4666 "\t To remove a trigger without a count:\n"
4667 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4668 "\t To remove a trigger with a count:\n"
4669 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4670 "\t Filters can be ignored when removing a trigger.\n"
4671 #ifdef CONFIG_HIST_TRIGGERS
4672 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4673 "\t Format: hist:keys=<field1[,field2,...]>\n"
4674 "\t [:values=<field1[,field2,...]>]\n"
4675 "\t [:sort=<field1[,field2,...]>]\n"
4676 "\t [:size=#entries]\n"
4677 "\t [:pause][:continue][:clear]\n"
4678 "\t [:name=histname1]\n"
4679 "\t [if <filter>]\n\n"
4680 "\t When a matching event is hit, an entry is added to a hash\n"
4681 "\t table using the key(s) and value(s) named, and the value of a\n"
4682 "\t sum called 'hitcount' is incremented. Keys and values\n"
4683 "\t correspond to fields in the event's format description. Keys\n"
4684 "\t can be any field, or the special string 'stacktrace'.\n"
4685 "\t Compound keys consisting of up to two fields can be specified\n"
4686 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4687 "\t fields. Sort keys consisting of up to two fields can be\n"
4688 "\t specified using the 'sort' keyword. The sort direction can\n"
4689 "\t be modified by appending '.descending' or '.ascending' to a\n"
4690 "\t sort field. The 'size' parameter can be used to specify more\n"
4691 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4692 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4693 "\t its histogram data will be shared with other triggers of the\n"
4694 "\t same name, and trigger hits will update this common data.\n\n"
4695 "\t Reading the 'hist' file for the event will dump the hash\n"
4696 "\t table in its entirety to stdout. If there are multiple hist\n"
4697 "\t triggers attached to an event, there will be a table for each\n"
4698 "\t trigger in the output. The table displayed for a named\n"
4699 "\t trigger will be the same as any other instance having the\n"
4700 "\t same name. The default format used to display a given field\n"
4701 "\t can be modified by appending any of the following modifiers\n"
4702 "\t to the field name, as applicable:\n\n"
4703 "\t .hex display a number as a hex value\n"
4704 "\t .sym display an address as a symbol\n"
4705 "\t .sym-offset display an address as a symbol and offset\n"
4706 "\t .execname display a common_pid as a program name\n"
4707 "\t .syscall display a syscall id as a syscall name\n\n"
4708 "\t .log2 display log2 value rather than raw number\n\n"
4709 "\t The 'pause' parameter can be used to pause an existing hist\n"
4710 "\t trigger or to start a hist trigger but not log any events\n"
4711 "\t until told to do so. 'continue' can be used to start or\n"
4712 "\t restart a paused hist trigger.\n\n"
4713 "\t The 'clear' parameter will clear the contents of a running\n"
4714 "\t hist trigger and leave its current paused/active state\n"
4716 "\t The enable_hist and disable_hist triggers can be used to\n"
4717 "\t have one event conditionally start and stop another event's\n"
4718 "\t already-attached hist trigger. The syntax is analagous to\n"
4719 "\t the enable_event and disable_event triggers.\n"
4724 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
4725 size_t cnt
, loff_t
*ppos
)
4727 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
4728 readme_msg
, strlen(readme_msg
));
4731 static const struct file_operations tracing_readme_fops
= {
4732 .open
= tracing_open_generic
,
4733 .read
= tracing_readme_read
,
4734 .llseek
= generic_file_llseek
,
4737 static void *saved_tgids_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4741 if (*pos
|| m
->count
)
4746 for (; ptr
<= &tgid_map
[PID_MAX_DEFAULT
]; ptr
++) {
4747 if (trace_find_tgid(*ptr
))
4754 static void *saved_tgids_start(struct seq_file
*m
, loff_t
*pos
)
4764 v
= saved_tgids_next(m
, v
, &l
);
4772 static void saved_tgids_stop(struct seq_file
*m
, void *v
)
4776 static int saved_tgids_show(struct seq_file
*m
, void *v
)
4778 int pid
= (int *)v
- tgid_map
;
4780 seq_printf(m
, "%d %d\n", pid
, trace_find_tgid(pid
));
4784 static const struct seq_operations tracing_saved_tgids_seq_ops
= {
4785 .start
= saved_tgids_start
,
4786 .stop
= saved_tgids_stop
,
4787 .next
= saved_tgids_next
,
4788 .show
= saved_tgids_show
,
4791 static int tracing_saved_tgids_open(struct inode
*inode
, struct file
*filp
)
4793 if (tracing_disabled
)
4796 return seq_open(filp
, &tracing_saved_tgids_seq_ops
);
4800 static const struct file_operations tracing_saved_tgids_fops
= {
4801 .open
= tracing_saved_tgids_open
,
4803 .llseek
= seq_lseek
,
4804 .release
= seq_release
,
4807 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4809 unsigned int *ptr
= v
;
4811 if (*pos
|| m
->count
)
4816 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
4818 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
4827 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
4833 arch_spin_lock(&trace_cmdline_lock
);
4835 v
= &savedcmd
->map_cmdline_to_pid
[0];
4837 v
= saved_cmdlines_next(m
, v
, &l
);
4845 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
4847 arch_spin_unlock(&trace_cmdline_lock
);
4851 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
4853 char buf
[TASK_COMM_LEN
];
4854 unsigned int *pid
= v
;
4856 __trace_find_cmdline(*pid
, buf
);
4857 seq_printf(m
, "%d %s\n", *pid
, buf
);
4861 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
4862 .start
= saved_cmdlines_start
,
4863 .next
= saved_cmdlines_next
,
4864 .stop
= saved_cmdlines_stop
,
4865 .show
= saved_cmdlines_show
,
4868 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
4870 if (tracing_disabled
)
4873 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
4876 static const struct file_operations tracing_saved_cmdlines_fops
= {
4877 .open
= tracing_saved_cmdlines_open
,
4879 .llseek
= seq_lseek
,
4880 .release
= seq_release
,
4884 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
4885 size_t cnt
, loff_t
*ppos
)
4890 arch_spin_lock(&trace_cmdline_lock
);
4891 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
4892 arch_spin_unlock(&trace_cmdline_lock
);
4894 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4897 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
4899 kfree(s
->saved_cmdlines
);
4900 kfree(s
->map_cmdline_to_pid
);
4904 static int tracing_resize_saved_cmdlines(unsigned int val
)
4906 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
4908 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
4912 if (allocate_cmdlines_buffer(val
, s
) < 0) {
4917 arch_spin_lock(&trace_cmdline_lock
);
4918 savedcmd_temp
= savedcmd
;
4920 arch_spin_unlock(&trace_cmdline_lock
);
4921 free_saved_cmdlines_buffer(savedcmd_temp
);
4927 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
4928 size_t cnt
, loff_t
*ppos
)
4933 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4937 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4938 if (!val
|| val
> PID_MAX_DEFAULT
)
4941 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
4950 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
4951 .open
= tracing_open_generic
,
4952 .read
= tracing_saved_cmdlines_size_read
,
4953 .write
= tracing_saved_cmdlines_size_write
,
4956 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4957 static union trace_eval_map_item
*
4958 update_eval_map(union trace_eval_map_item
*ptr
)
4960 if (!ptr
->map
.eval_string
) {
4961 if (ptr
->tail
.next
) {
4962 ptr
= ptr
->tail
.next
;
4963 /* Set ptr to the next real item (skip head) */
4971 static void *eval_map_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4973 union trace_eval_map_item
*ptr
= v
;
4976 * Paranoid! If ptr points to end, we don't want to increment past it.
4977 * This really should never happen.
4979 ptr
= update_eval_map(ptr
);
4980 if (WARN_ON_ONCE(!ptr
))
4987 ptr
= update_eval_map(ptr
);
4992 static void *eval_map_start(struct seq_file
*m
, loff_t
*pos
)
4994 union trace_eval_map_item
*v
;
4997 mutex_lock(&trace_eval_mutex
);
4999 v
= trace_eval_maps
;
5003 while (v
&& l
< *pos
) {
5004 v
= eval_map_next(m
, v
, &l
);
5010 static void eval_map_stop(struct seq_file
*m
, void *v
)
5012 mutex_unlock(&trace_eval_mutex
);
5015 static int eval_map_show(struct seq_file
*m
, void *v
)
5017 union trace_eval_map_item
*ptr
= v
;
5019 seq_printf(m
, "%s %ld (%s)\n",
5020 ptr
->map
.eval_string
, ptr
->map
.eval_value
,
5026 static const struct seq_operations tracing_eval_map_seq_ops
= {
5027 .start
= eval_map_start
,
5028 .next
= eval_map_next
,
5029 .stop
= eval_map_stop
,
5030 .show
= eval_map_show
,
5033 static int tracing_eval_map_open(struct inode
*inode
, struct file
*filp
)
5035 if (tracing_disabled
)
5038 return seq_open(filp
, &tracing_eval_map_seq_ops
);
5041 static const struct file_operations tracing_eval_map_fops
= {
5042 .open
= tracing_eval_map_open
,
5044 .llseek
= seq_lseek
,
5045 .release
= seq_release
,
5048 static inline union trace_eval_map_item
*
5049 trace_eval_jmp_to_tail(union trace_eval_map_item
*ptr
)
5051 /* Return tail of array given the head */
5052 return ptr
+ ptr
->head
.length
+ 1;
5056 trace_insert_eval_map_file(struct module
*mod
, struct trace_eval_map
**start
,
5059 struct trace_eval_map
**stop
;
5060 struct trace_eval_map
**map
;
5061 union trace_eval_map_item
*map_array
;
5062 union trace_eval_map_item
*ptr
;
5067 * The trace_eval_maps contains the map plus a head and tail item,
5068 * where the head holds the module and length of array, and the
5069 * tail holds a pointer to the next list.
5071 map_array
= kmalloc(sizeof(*map_array
) * (len
+ 2), GFP_KERNEL
);
5073 pr_warn("Unable to allocate trace eval mapping\n");
5077 mutex_lock(&trace_eval_mutex
);
5079 if (!trace_eval_maps
)
5080 trace_eval_maps
= map_array
;
5082 ptr
= trace_eval_maps
;
5084 ptr
= trace_eval_jmp_to_tail(ptr
);
5085 if (!ptr
->tail
.next
)
5087 ptr
= ptr
->tail
.next
;
5090 ptr
->tail
.next
= map_array
;
5092 map_array
->head
.mod
= mod
;
5093 map_array
->head
.length
= len
;
5096 for (map
= start
; (unsigned long)map
< (unsigned long)stop
; map
++) {
5097 map_array
->map
= **map
;
5100 memset(map_array
, 0, sizeof(*map_array
));
5102 mutex_unlock(&trace_eval_mutex
);
5105 static void trace_create_eval_file(struct dentry
*d_tracer
)
5107 trace_create_file("eval_map", 0444, d_tracer
,
5108 NULL
, &tracing_eval_map_fops
);
5111 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5112 static inline void trace_create_eval_file(struct dentry
*d_tracer
) { }
5113 static inline void trace_insert_eval_map_file(struct module
*mod
,
5114 struct trace_eval_map
**start
, int len
) { }
5115 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5117 static void trace_insert_eval_map(struct module
*mod
,
5118 struct trace_eval_map
**start
, int len
)
5120 struct trace_eval_map
**map
;
5127 trace_event_eval_update(map
, len
);
5129 trace_insert_eval_map_file(mod
, start
, len
);
5133 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
5134 size_t cnt
, loff_t
*ppos
)
5136 struct trace_array
*tr
= filp
->private_data
;
5137 char buf
[MAX_TRACER_SIZE
+2];
5140 mutex_lock(&trace_types_lock
);
5141 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
5142 mutex_unlock(&trace_types_lock
);
5144 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5147 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
5149 tracing_reset_online_cpus(&tr
->trace_buffer
);
5153 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
5157 for_each_tracing_cpu(cpu
)
5158 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
5161 #ifdef CONFIG_TRACER_MAX_TRACE
5162 /* resize @tr's buffer to the size of @size_tr's entries */
5163 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
5164 struct trace_buffer
*size_buf
, int cpu_id
)
5168 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
5169 for_each_tracing_cpu(cpu
) {
5170 ret
= ring_buffer_resize(trace_buf
->buffer
,
5171 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
5174 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
5175 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
5178 ret
= ring_buffer_resize(trace_buf
->buffer
,
5179 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
5181 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
5182 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
5187 #endif /* CONFIG_TRACER_MAX_TRACE */
5189 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
5190 unsigned long size
, int cpu
)
5195 * If kernel or user changes the size of the ring buffer
5196 * we use the size that was given, and we can forget about
5197 * expanding it later.
5199 ring_buffer_expanded
= true;
5201 /* May be called before buffers are initialized */
5202 if (!tr
->trace_buffer
.buffer
)
5205 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
5209 #ifdef CONFIG_TRACER_MAX_TRACE
5210 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
5211 !tr
->current_trace
->use_max_tr
)
5214 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
5216 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
5217 &tr
->trace_buffer
, cpu
);
5220 * AARGH! We are left with different
5221 * size max buffer!!!!
5222 * The max buffer is our "snapshot" buffer.
5223 * When a tracer needs a snapshot (one of the
5224 * latency tracers), it swaps the max buffer
5225 * with the saved snap shot. We succeeded to
5226 * update the size of the main buffer, but failed to
5227 * update the size of the max buffer. But when we tried
5228 * to reset the main buffer to the original size, we
5229 * failed there too. This is very unlikely to
5230 * happen, but if it does, warn and kill all
5234 tracing_disabled
= 1;
5239 if (cpu
== RING_BUFFER_ALL_CPUS
)
5240 set_buffer_entries(&tr
->max_buffer
, size
);
5242 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
5245 #endif /* CONFIG_TRACER_MAX_TRACE */
5247 if (cpu
== RING_BUFFER_ALL_CPUS
)
5248 set_buffer_entries(&tr
->trace_buffer
, size
);
5250 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
5255 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
5256 unsigned long size
, int cpu_id
)
5260 mutex_lock(&trace_types_lock
);
5262 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
5263 /* make sure, this cpu is enabled in the mask */
5264 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
5270 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
5275 mutex_unlock(&trace_types_lock
);
5282 * tracing_update_buffers - used by tracing facility to expand ring buffers
5284 * To save on memory when the tracing is never used on a system with it
5285 * configured in. The ring buffers are set to a minimum size. But once
5286 * a user starts to use the tracing facility, then they need to grow
5287 * to their default size.
5289 * This function is to be called when a tracer is about to be used.
5291 int tracing_update_buffers(void)
5295 mutex_lock(&trace_types_lock
);
5296 if (!ring_buffer_expanded
)
5297 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
5298 RING_BUFFER_ALL_CPUS
);
5299 mutex_unlock(&trace_types_lock
);
5304 struct trace_option_dentry
;
5307 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
5310 * Used to clear out the tracer before deletion of an instance.
5311 * Must have trace_types_lock held.
5313 static void tracing_set_nop(struct trace_array
*tr
)
5315 if (tr
->current_trace
== &nop_trace
)
5318 tr
->current_trace
->enabled
--;
5320 if (tr
->current_trace
->reset
)
5321 tr
->current_trace
->reset(tr
);
5323 tr
->current_trace
= &nop_trace
;
5326 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
)
5328 /* Only enable if the directory has been created already. */
5332 create_trace_option_files(tr
, t
);
5335 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
5338 #ifdef CONFIG_TRACER_MAX_TRACE
5343 mutex_lock(&trace_types_lock
);
5345 if (!ring_buffer_expanded
) {
5346 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
5347 RING_BUFFER_ALL_CPUS
);
5353 for (t
= trace_types
; t
; t
= t
->next
) {
5354 if (strcmp(t
->name
, buf
) == 0)
5361 if (t
== tr
->current_trace
)
5364 /* Some tracers are only allowed for the top level buffer */
5365 if (!trace_ok_for_array(t
, tr
)) {
5370 /* If trace pipe files are being read, we can't change the tracer */
5371 if (tr
->current_trace
->ref
) {
5376 trace_branch_disable();
5378 tr
->current_trace
->enabled
--;
5380 if (tr
->current_trace
->reset
)
5381 tr
->current_trace
->reset(tr
);
5383 /* Current trace needs to be nop_trace before synchronize_sched */
5384 tr
->current_trace
= &nop_trace
;
5386 #ifdef CONFIG_TRACER_MAX_TRACE
5387 had_max_tr
= tr
->allocated_snapshot
;
5389 if (had_max_tr
&& !t
->use_max_tr
) {
5391 * We need to make sure that the update_max_tr sees that
5392 * current_trace changed to nop_trace to keep it from
5393 * swapping the buffers after we resize it.
5394 * The update_max_tr is called from interrupts disabled
5395 * so a synchronized_sched() is sufficient.
5397 synchronize_sched();
5402 #ifdef CONFIG_TRACER_MAX_TRACE
5403 if (t
->use_max_tr
&& !had_max_tr
) {
5404 ret
= alloc_snapshot(tr
);
5411 ret
= tracer_init(t
, tr
);
5416 tr
->current_trace
= t
;
5417 tr
->current_trace
->enabled
++;
5418 trace_branch_enable(tr
);
5420 mutex_unlock(&trace_types_lock
);
5426 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
5427 size_t cnt
, loff_t
*ppos
)
5429 struct trace_array
*tr
= filp
->private_data
;
5430 char buf
[MAX_TRACER_SIZE
+1];
5437 if (cnt
> MAX_TRACER_SIZE
)
5438 cnt
= MAX_TRACER_SIZE
;
5440 if (copy_from_user(buf
, ubuf
, cnt
))
5445 /* strip ending whitespace. */
5446 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
5449 err
= tracing_set_tracer(tr
, buf
);
5459 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
5460 size_t cnt
, loff_t
*ppos
)
5465 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
5466 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
5467 if (r
> sizeof(buf
))
5469 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5473 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
5474 size_t cnt
, loff_t
*ppos
)
5479 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5489 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
5490 size_t cnt
, loff_t
*ppos
)
5492 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
5496 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
5497 size_t cnt
, loff_t
*ppos
)
5499 struct trace_array
*tr
= filp
->private_data
;
5502 mutex_lock(&trace_types_lock
);
5503 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
5507 if (tr
->current_trace
->update_thresh
) {
5508 ret
= tr
->current_trace
->update_thresh(tr
);
5515 mutex_unlock(&trace_types_lock
);
5520 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5523 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
5524 size_t cnt
, loff_t
*ppos
)
5526 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
5530 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
5531 size_t cnt
, loff_t
*ppos
)
5533 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
5538 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
5540 struct trace_array
*tr
= inode
->i_private
;
5541 struct trace_iterator
*iter
;
5544 if (tracing_disabled
)
5547 if (trace_array_get(tr
) < 0)
5550 mutex_lock(&trace_types_lock
);
5552 /* create a buffer to store the information to pass to userspace */
5553 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5556 __trace_array_put(tr
);
5560 trace_seq_init(&iter
->seq
);
5561 iter
->trace
= tr
->current_trace
;
5563 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
5568 /* trace pipe does not show start of buffer */
5569 cpumask_setall(iter
->started
);
5571 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
5572 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
5574 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5575 if (trace_clocks
[tr
->clock_id
].in_ns
)
5576 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
5579 iter
->trace_buffer
= &tr
->trace_buffer
;
5580 iter
->cpu_file
= tracing_get_cpu(inode
);
5581 mutex_init(&iter
->mutex
);
5582 filp
->private_data
= iter
;
5584 if (iter
->trace
->pipe_open
)
5585 iter
->trace
->pipe_open(iter
);
5587 nonseekable_open(inode
, filp
);
5589 tr
->current_trace
->ref
++;
5591 mutex_unlock(&trace_types_lock
);
5597 __trace_array_put(tr
);
5598 mutex_unlock(&trace_types_lock
);
5602 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
5604 struct trace_iterator
*iter
= file
->private_data
;
5605 struct trace_array
*tr
= inode
->i_private
;
5607 mutex_lock(&trace_types_lock
);
5609 tr
->current_trace
->ref
--;
5611 if (iter
->trace
->pipe_close
)
5612 iter
->trace
->pipe_close(iter
);
5614 mutex_unlock(&trace_types_lock
);
5616 free_cpumask_var(iter
->started
);
5617 mutex_destroy(&iter
->mutex
);
5620 trace_array_put(tr
);
5626 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
5628 struct trace_array
*tr
= iter
->tr
;
5630 /* Iterators are static, they should be filled or empty */
5631 if (trace_buffer_iter(iter
, iter
->cpu_file
))
5632 return POLLIN
| POLLRDNORM
;
5634 if (tr
->trace_flags
& TRACE_ITER_BLOCK
)
5636 * Always select as readable when in blocking mode
5638 return POLLIN
| POLLRDNORM
;
5640 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
5645 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
5647 struct trace_iterator
*iter
= filp
->private_data
;
5649 return trace_poll(iter
, filp
, poll_table
);
5652 /* Must be called with iter->mutex held. */
5653 static int tracing_wait_pipe(struct file
*filp
)
5655 struct trace_iterator
*iter
= filp
->private_data
;
5658 while (trace_empty(iter
)) {
5660 if ((filp
->f_flags
& O_NONBLOCK
)) {
5665 * We block until we read something and tracing is disabled.
5666 * We still block if tracing is disabled, but we have never
5667 * read anything. This allows a user to cat this file, and
5668 * then enable tracing. But after we have read something,
5669 * we give an EOF when tracing is again disabled.
5671 * iter->pos will be 0 if we haven't read anything.
5673 if (!tracer_tracing_is_on(iter
->tr
) && iter
->pos
)
5676 mutex_unlock(&iter
->mutex
);
5678 ret
= wait_on_pipe(iter
, false);
5680 mutex_lock(&iter
->mutex
);
5693 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
5694 size_t cnt
, loff_t
*ppos
)
5696 struct trace_iterator
*iter
= filp
->private_data
;
5700 * Avoid more than one consumer on a single file descriptor
5701 * This is just a matter of traces coherency, the ring buffer itself
5704 mutex_lock(&iter
->mutex
);
5706 /* return any leftover data */
5707 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5711 trace_seq_init(&iter
->seq
);
5713 if (iter
->trace
->read
) {
5714 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
5720 sret
= tracing_wait_pipe(filp
);
5724 /* stop when tracing is finished */
5725 if (trace_empty(iter
)) {
5730 if (cnt
>= PAGE_SIZE
)
5731 cnt
= PAGE_SIZE
- 1;
5733 /* reset all but tr, trace, and overruns */
5734 memset(&iter
->seq
, 0,
5735 sizeof(struct trace_iterator
) -
5736 offsetof(struct trace_iterator
, seq
));
5737 cpumask_clear(iter
->started
);
5740 trace_event_read_lock();
5741 trace_access_lock(iter
->cpu_file
);
5742 while (trace_find_next_entry_inc(iter
) != NULL
) {
5743 enum print_line_t ret
;
5744 int save_len
= iter
->seq
.seq
.len
;
5746 ret
= print_trace_line(iter
);
5747 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5748 /* don't print partial lines */
5749 iter
->seq
.seq
.len
= save_len
;
5752 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5753 trace_consume(iter
);
5755 if (trace_seq_used(&iter
->seq
) >= cnt
)
5759 * Setting the full flag means we reached the trace_seq buffer
5760 * size and we should leave by partial output condition above.
5761 * One of the trace_seq_* functions is not used properly.
5763 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
5766 trace_access_unlock(iter
->cpu_file
);
5767 trace_event_read_unlock();
5769 /* Now copy what we have to the user */
5770 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5771 if (iter
->seq
.seq
.readpos
>= trace_seq_used(&iter
->seq
))
5772 trace_seq_init(&iter
->seq
);
5775 * If there was nothing to send to user, in spite of consuming trace
5776 * entries, go back to wait for more entries.
5782 mutex_unlock(&iter
->mutex
);
5787 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
5790 __free_page(spd
->pages
[idx
]);
5793 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
5795 .confirm
= generic_pipe_buf_confirm
,
5796 .release
= generic_pipe_buf_release
,
5797 .steal
= generic_pipe_buf_steal
,
5798 .get
= generic_pipe_buf_get
,
5802 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
5808 /* Seq buffer is page-sized, exactly what we need. */
5810 save_len
= iter
->seq
.seq
.len
;
5811 ret
= print_trace_line(iter
);
5813 if (trace_seq_has_overflowed(&iter
->seq
)) {
5814 iter
->seq
.seq
.len
= save_len
;
5819 * This should not be hit, because it should only
5820 * be set if the iter->seq overflowed. But check it
5821 * anyway to be safe.
5823 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5824 iter
->seq
.seq
.len
= save_len
;
5828 count
= trace_seq_used(&iter
->seq
) - save_len
;
5831 iter
->seq
.seq
.len
= save_len
;
5835 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5836 trace_consume(iter
);
5838 if (!trace_find_next_entry_inc(iter
)) {
5848 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
5850 struct pipe_inode_info
*pipe
,
5854 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5855 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5856 struct trace_iterator
*iter
= filp
->private_data
;
5857 struct splice_pipe_desc spd
= {
5859 .partial
= partial_def
,
5860 .nr_pages
= 0, /* This gets updated below. */
5861 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5862 .ops
= &tracing_pipe_buf_ops
,
5863 .spd_release
= tracing_spd_release_pipe
,
5869 if (splice_grow_spd(pipe
, &spd
))
5872 mutex_lock(&iter
->mutex
);
5874 if (iter
->trace
->splice_read
) {
5875 ret
= iter
->trace
->splice_read(iter
, filp
,
5876 ppos
, pipe
, len
, flags
);
5881 ret
= tracing_wait_pipe(filp
);
5885 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
5890 trace_event_read_lock();
5891 trace_access_lock(iter
->cpu_file
);
5893 /* Fill as many pages as possible. */
5894 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
5895 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
5899 rem
= tracing_fill_pipe_page(rem
, iter
);
5901 /* Copy the data into the page, so we can start over. */
5902 ret
= trace_seq_to_buffer(&iter
->seq
,
5903 page_address(spd
.pages
[i
]),
5904 trace_seq_used(&iter
->seq
));
5906 __free_page(spd
.pages
[i
]);
5909 spd
.partial
[i
].offset
= 0;
5910 spd
.partial
[i
].len
= trace_seq_used(&iter
->seq
);
5912 trace_seq_init(&iter
->seq
);
5915 trace_access_unlock(iter
->cpu_file
);
5916 trace_event_read_unlock();
5917 mutex_unlock(&iter
->mutex
);
5922 ret
= splice_to_pipe(pipe
, &spd
);
5926 splice_shrink_spd(&spd
);
5930 mutex_unlock(&iter
->mutex
);
5935 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
5936 size_t cnt
, loff_t
*ppos
)
5938 struct inode
*inode
= file_inode(filp
);
5939 struct trace_array
*tr
= inode
->i_private
;
5940 int cpu
= tracing_get_cpu(inode
);
5945 mutex_lock(&trace_types_lock
);
5947 if (cpu
== RING_BUFFER_ALL_CPUS
) {
5948 int cpu
, buf_size_same
;
5953 /* check if all cpu sizes are same */
5954 for_each_tracing_cpu(cpu
) {
5955 /* fill in the size from first enabled cpu */
5957 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
5958 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
5964 if (buf_size_same
) {
5965 if (!ring_buffer_expanded
)
5966 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
5968 trace_buf_size
>> 10);
5970 r
= sprintf(buf
, "%lu\n", size
>> 10);
5972 r
= sprintf(buf
, "X\n");
5974 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
5976 mutex_unlock(&trace_types_lock
);
5978 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5983 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
5984 size_t cnt
, loff_t
*ppos
)
5986 struct inode
*inode
= file_inode(filp
);
5987 struct trace_array
*tr
= inode
->i_private
;
5991 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5995 /* must have at least 1 entry */
5999 /* value is in KB */
6001 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
6011 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
6012 size_t cnt
, loff_t
*ppos
)
6014 struct trace_array
*tr
= filp
->private_data
;
6017 unsigned long size
= 0, expanded_size
= 0;
6019 mutex_lock(&trace_types_lock
);
6020 for_each_tracing_cpu(cpu
) {
6021 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
6022 if (!ring_buffer_expanded
)
6023 expanded_size
+= trace_buf_size
>> 10;
6025 if (ring_buffer_expanded
)
6026 r
= sprintf(buf
, "%lu\n", size
);
6028 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
6029 mutex_unlock(&trace_types_lock
);
6031 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6035 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
6036 size_t cnt
, loff_t
*ppos
)
6039 * There is no need to read what the user has written, this function
6040 * is just to make sure that there is no error when "echo" is used
6049 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
6051 struct trace_array
*tr
= inode
->i_private
;
6053 /* disable tracing ? */
6054 if (tr
->trace_flags
& TRACE_ITER_STOP_ON_FREE
)
6055 tracer_tracing_off(tr
);
6056 /* resize the ring buffer to 0 */
6057 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
6059 trace_array_put(tr
);
6065 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
6066 size_t cnt
, loff_t
*fpos
)
6068 struct trace_array
*tr
= filp
->private_data
;
6069 struct ring_buffer_event
*event
;
6070 struct ring_buffer
*buffer
;
6071 struct print_entry
*entry
;
6072 unsigned long irq_flags
;
6073 const char faulted
[] = "<faulted>";
6078 /* Used in tracing_mark_raw_write() as well */
6079 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6081 if (tracing_disabled
)
6084 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6087 if (cnt
> TRACE_BUF_SIZE
)
6088 cnt
= TRACE_BUF_SIZE
;
6090 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6092 local_save_flags(irq_flags
);
6093 size
= sizeof(*entry
) + cnt
+ 2; /* add '\0' and possible '\n' */
6095 /* If less than "<faulted>", then make sure we can still add that */
6096 if (cnt
< FAULTED_SIZE
)
6097 size
+= FAULTED_SIZE
- cnt
;
6099 buffer
= tr
->trace_buffer
.buffer
;
6100 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
6101 irq_flags
, preempt_count());
6102 if (unlikely(!event
))
6103 /* Ring buffer disabled, return as if not open for write */
6106 entry
= ring_buffer_event_data(event
);
6107 entry
->ip
= _THIS_IP_
;
6109 len
= __copy_from_user_inatomic(&entry
->buf
, ubuf
, cnt
);
6111 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6118 if (entry
->buf
[cnt
- 1] != '\n') {
6119 entry
->buf
[cnt
] = '\n';
6120 entry
->buf
[cnt
+ 1] = '\0';
6122 entry
->buf
[cnt
] = '\0';
6124 __buffer_unlock_commit(buffer
, event
);
6132 /* Limit it for now to 3K (including tag) */
6133 #define RAW_DATA_MAX_SIZE (1024*3)
6136 tracing_mark_raw_write(struct file
*filp
, const char __user
*ubuf
,
6137 size_t cnt
, loff_t
*fpos
)
6139 struct trace_array
*tr
= filp
->private_data
;
6140 struct ring_buffer_event
*event
;
6141 struct ring_buffer
*buffer
;
6142 struct raw_data_entry
*entry
;
6143 const char faulted
[] = "<faulted>";
6144 unsigned long irq_flags
;
6149 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6151 if (tracing_disabled
)
6154 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6157 /* The marker must at least have a tag id */
6158 if (cnt
< sizeof(unsigned int) || cnt
> RAW_DATA_MAX_SIZE
)
6161 if (cnt
> TRACE_BUF_SIZE
)
6162 cnt
= TRACE_BUF_SIZE
;
6164 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6166 local_save_flags(irq_flags
);
6167 size
= sizeof(*entry
) + cnt
;
6168 if (cnt
< FAULT_SIZE_ID
)
6169 size
+= FAULT_SIZE_ID
- cnt
;
6171 buffer
= tr
->trace_buffer
.buffer
;
6172 event
= __trace_buffer_lock_reserve(buffer
, TRACE_RAW_DATA
, size
,
6173 irq_flags
, preempt_count());
6175 /* Ring buffer disabled, return as if not open for write */
6178 entry
= ring_buffer_event_data(event
);
6180 len
= __copy_from_user_inatomic(&entry
->id
, ubuf
, cnt
);
6183 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6188 __buffer_unlock_commit(buffer
, event
);
6196 static int tracing_clock_show(struct seq_file
*m
, void *v
)
6198 struct trace_array
*tr
= m
->private;
6201 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
6203 "%s%s%s%s", i
? " " : "",
6204 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
6205 i
== tr
->clock_id
? "]" : "");
6211 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
6215 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
6216 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
6219 if (i
== ARRAY_SIZE(trace_clocks
))
6222 mutex_lock(&trace_types_lock
);
6226 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
6229 * New clock may not be consistent with the previous clock.
6230 * Reset the buffer so that it doesn't have incomparable timestamps.
6232 tracing_reset_online_cpus(&tr
->trace_buffer
);
6234 #ifdef CONFIG_TRACER_MAX_TRACE
6235 if (tr
->max_buffer
.buffer
)
6236 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
6237 tracing_reset_online_cpus(&tr
->max_buffer
);
6240 mutex_unlock(&trace_types_lock
);
6245 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
6246 size_t cnt
, loff_t
*fpos
)
6248 struct seq_file
*m
= filp
->private_data
;
6249 struct trace_array
*tr
= m
->private;
6251 const char *clockstr
;
6254 if (cnt
>= sizeof(buf
))
6257 if (copy_from_user(buf
, ubuf
, cnt
))
6262 clockstr
= strstrip(buf
);
6264 ret
= tracing_set_clock(tr
, clockstr
);
6273 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
6275 struct trace_array
*tr
= inode
->i_private
;
6278 if (tracing_disabled
)
6281 if (trace_array_get(tr
))
6284 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
6286 trace_array_put(tr
);
6291 struct ftrace_buffer_info
{
6292 struct trace_iterator iter
;
6294 unsigned int spare_cpu
;
6298 #ifdef CONFIG_TRACER_SNAPSHOT
6299 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
6301 struct trace_array
*tr
= inode
->i_private
;
6302 struct trace_iterator
*iter
;
6306 if (trace_array_get(tr
) < 0)
6309 if (file
->f_mode
& FMODE_READ
) {
6310 iter
= __tracing_open(inode
, file
, true);
6312 ret
= PTR_ERR(iter
);
6314 /* Writes still need the seq_file to hold the private data */
6316 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
6319 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
6327 iter
->trace_buffer
= &tr
->max_buffer
;
6328 iter
->cpu_file
= tracing_get_cpu(inode
);
6330 file
->private_data
= m
;
6334 trace_array_put(tr
);
6340 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6343 struct seq_file
*m
= filp
->private_data
;
6344 struct trace_iterator
*iter
= m
->private;
6345 struct trace_array
*tr
= iter
->tr
;
6349 ret
= tracing_update_buffers();
6353 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6357 mutex_lock(&trace_types_lock
);
6359 if (tr
->current_trace
->use_max_tr
) {
6366 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6370 if (tr
->allocated_snapshot
)
6374 /* Only allow per-cpu swap if the ring buffer supports it */
6375 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6376 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6381 if (!tr
->allocated_snapshot
) {
6382 ret
= alloc_snapshot(tr
);
6386 local_irq_disable();
6387 /* Now, we're going to swap */
6388 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6389 update_max_tr(tr
, current
, smp_processor_id());
6391 update_max_tr_single(tr
, current
, iter
->cpu_file
);
6395 if (tr
->allocated_snapshot
) {
6396 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6397 tracing_reset_online_cpus(&tr
->max_buffer
);
6399 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
6409 mutex_unlock(&trace_types_lock
);
6413 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
6415 struct seq_file
*m
= file
->private_data
;
6418 ret
= tracing_release(inode
, file
);
6420 if (file
->f_mode
& FMODE_READ
)
6423 /* If write only, the seq_file is just a stub */
6431 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
6432 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6433 size_t count
, loff_t
*ppos
);
6434 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
6435 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6436 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
6438 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
6440 struct ftrace_buffer_info
*info
;
6443 ret
= tracing_buffers_open(inode
, filp
);
6447 info
= filp
->private_data
;
6449 if (info
->iter
.trace
->use_max_tr
) {
6450 tracing_buffers_release(inode
, filp
);
6454 info
->iter
.snapshot
= true;
6455 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
6460 #endif /* CONFIG_TRACER_SNAPSHOT */
6463 static const struct file_operations tracing_thresh_fops
= {
6464 .open
= tracing_open_generic
,
6465 .read
= tracing_thresh_read
,
6466 .write
= tracing_thresh_write
,
6467 .llseek
= generic_file_llseek
,
6470 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6471 static const struct file_operations tracing_max_lat_fops
= {
6472 .open
= tracing_open_generic
,
6473 .read
= tracing_max_lat_read
,
6474 .write
= tracing_max_lat_write
,
6475 .llseek
= generic_file_llseek
,
6479 static const struct file_operations set_tracer_fops
= {
6480 .open
= tracing_open_generic
,
6481 .read
= tracing_set_trace_read
,
6482 .write
= tracing_set_trace_write
,
6483 .llseek
= generic_file_llseek
,
6486 static const struct file_operations tracing_pipe_fops
= {
6487 .open
= tracing_open_pipe
,
6488 .poll
= tracing_poll_pipe
,
6489 .read
= tracing_read_pipe
,
6490 .splice_read
= tracing_splice_read_pipe
,
6491 .release
= tracing_release_pipe
,
6492 .llseek
= no_llseek
,
6495 static const struct file_operations tracing_entries_fops
= {
6496 .open
= tracing_open_generic_tr
,
6497 .read
= tracing_entries_read
,
6498 .write
= tracing_entries_write
,
6499 .llseek
= generic_file_llseek
,
6500 .release
= tracing_release_generic_tr
,
6503 static const struct file_operations tracing_total_entries_fops
= {
6504 .open
= tracing_open_generic_tr
,
6505 .read
= tracing_total_entries_read
,
6506 .llseek
= generic_file_llseek
,
6507 .release
= tracing_release_generic_tr
,
6510 static const struct file_operations tracing_free_buffer_fops
= {
6511 .open
= tracing_open_generic_tr
,
6512 .write
= tracing_free_buffer_write
,
6513 .release
= tracing_free_buffer_release
,
6516 static const struct file_operations tracing_mark_fops
= {
6517 .open
= tracing_open_generic_tr
,
6518 .write
= tracing_mark_write
,
6519 .llseek
= generic_file_llseek
,
6520 .release
= tracing_release_generic_tr
,
6523 static const struct file_operations tracing_mark_raw_fops
= {
6524 .open
= tracing_open_generic_tr
,
6525 .write
= tracing_mark_raw_write
,
6526 .llseek
= generic_file_llseek
,
6527 .release
= tracing_release_generic_tr
,
6530 static const struct file_operations trace_clock_fops
= {
6531 .open
= tracing_clock_open
,
6533 .llseek
= seq_lseek
,
6534 .release
= tracing_single_release_tr
,
6535 .write
= tracing_clock_write
,
6538 #ifdef CONFIG_TRACER_SNAPSHOT
6539 static const struct file_operations snapshot_fops
= {
6540 .open
= tracing_snapshot_open
,
6542 .write
= tracing_snapshot_write
,
6543 .llseek
= tracing_lseek
,
6544 .release
= tracing_snapshot_release
,
6547 static const struct file_operations snapshot_raw_fops
= {
6548 .open
= snapshot_raw_open
,
6549 .read
= tracing_buffers_read
,
6550 .release
= tracing_buffers_release
,
6551 .splice_read
= tracing_buffers_splice_read
,
6552 .llseek
= no_llseek
,
6555 #endif /* CONFIG_TRACER_SNAPSHOT */
6557 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
6559 struct trace_array
*tr
= inode
->i_private
;
6560 struct ftrace_buffer_info
*info
;
6563 if (tracing_disabled
)
6566 if (trace_array_get(tr
) < 0)
6569 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
6571 trace_array_put(tr
);
6575 mutex_lock(&trace_types_lock
);
6578 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
6579 info
->iter
.trace
= tr
->current_trace
;
6580 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
6582 /* Force reading ring buffer for first read */
6583 info
->read
= (unsigned int)-1;
6585 filp
->private_data
= info
;
6587 tr
->current_trace
->ref
++;
6589 mutex_unlock(&trace_types_lock
);
6591 ret
= nonseekable_open(inode
, filp
);
6593 trace_array_put(tr
);
6599 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
6601 struct ftrace_buffer_info
*info
= filp
->private_data
;
6602 struct trace_iterator
*iter
= &info
->iter
;
6604 return trace_poll(iter
, filp
, poll_table
);
6608 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6609 size_t count
, loff_t
*ppos
)
6611 struct ftrace_buffer_info
*info
= filp
->private_data
;
6612 struct trace_iterator
*iter
= &info
->iter
;
6619 #ifdef CONFIG_TRACER_MAX_TRACE
6620 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6625 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
6627 if (IS_ERR(info
->spare
)) {
6628 ret
= PTR_ERR(info
->spare
);
6631 info
->spare_cpu
= iter
->cpu_file
;
6637 /* Do we have previous read data to read? */
6638 if (info
->read
< PAGE_SIZE
)
6642 trace_access_lock(iter
->cpu_file
);
6643 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
6647 trace_access_unlock(iter
->cpu_file
);
6650 if (trace_empty(iter
)) {
6651 if ((filp
->f_flags
& O_NONBLOCK
))
6654 ret
= wait_on_pipe(iter
, false);
6665 size
= PAGE_SIZE
- info
->read
;
6669 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
6681 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
6683 struct ftrace_buffer_info
*info
= file
->private_data
;
6684 struct trace_iterator
*iter
= &info
->iter
;
6686 mutex_lock(&trace_types_lock
);
6688 iter
->tr
->current_trace
->ref
--;
6690 __trace_array_put(iter
->tr
);
6693 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
,
6694 info
->spare_cpu
, info
->spare
);
6697 mutex_unlock(&trace_types_lock
);
6703 struct ring_buffer
*buffer
;
6709 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
6710 struct pipe_buffer
*buf
)
6712 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6717 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6722 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
6723 struct pipe_buffer
*buf
)
6725 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6730 /* Pipe buffer operations for a buffer. */
6731 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
6733 .confirm
= generic_pipe_buf_confirm
,
6734 .release
= buffer_pipe_buf_release
,
6735 .steal
= generic_pipe_buf_steal
,
6736 .get
= buffer_pipe_buf_get
,
6740 * Callback from splice_to_pipe(), if we need to release some pages
6741 * at the end of the spd in case we error'ed out in filling the pipe.
6743 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
6745 struct buffer_ref
*ref
=
6746 (struct buffer_ref
*)spd
->partial
[i
].private;
6751 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6753 spd
->partial
[i
].private = 0;
6757 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6758 struct pipe_inode_info
*pipe
, size_t len
,
6761 struct ftrace_buffer_info
*info
= file
->private_data
;
6762 struct trace_iterator
*iter
= &info
->iter
;
6763 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
6764 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
6765 struct splice_pipe_desc spd
= {
6767 .partial
= partial_def
,
6768 .nr_pages_max
= PIPE_DEF_BUFFERS
,
6769 .ops
= &buffer_pipe_buf_ops
,
6770 .spd_release
= buffer_spd_release
,
6772 struct buffer_ref
*ref
;
6773 int entries
, size
, i
;
6776 #ifdef CONFIG_TRACER_MAX_TRACE
6777 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6781 if (*ppos
& (PAGE_SIZE
- 1))
6784 if (len
& (PAGE_SIZE
- 1)) {
6785 if (len
< PAGE_SIZE
)
6790 if (splice_grow_spd(pipe
, &spd
))
6794 trace_access_lock(iter
->cpu_file
);
6795 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6797 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
6801 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
6808 ref
->buffer
= iter
->trace_buffer
->buffer
;
6809 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
6810 if (IS_ERR(ref
->page
)) {
6811 ret
= PTR_ERR(ref
->page
);
6816 ref
->cpu
= iter
->cpu_file
;
6818 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
6819 len
, iter
->cpu_file
, 1);
6821 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
,
6828 * zero out any left over data, this is going to
6831 size
= ring_buffer_page_len(ref
->page
);
6832 if (size
< PAGE_SIZE
)
6833 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
6835 page
= virt_to_page(ref
->page
);
6837 spd
.pages
[i
] = page
;
6838 spd
.partial
[i
].len
= PAGE_SIZE
;
6839 spd
.partial
[i
].offset
= 0;
6840 spd
.partial
[i
].private = (unsigned long)ref
;
6844 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6847 trace_access_unlock(iter
->cpu_file
);
6850 /* did we read anything? */
6851 if (!spd
.nr_pages
) {
6856 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
))
6859 ret
= wait_on_pipe(iter
, true);
6866 ret
= splice_to_pipe(pipe
, &spd
);
6868 splice_shrink_spd(&spd
);
6873 static const struct file_operations tracing_buffers_fops
= {
6874 .open
= tracing_buffers_open
,
6875 .read
= tracing_buffers_read
,
6876 .poll
= tracing_buffers_poll
,
6877 .release
= tracing_buffers_release
,
6878 .splice_read
= tracing_buffers_splice_read
,
6879 .llseek
= no_llseek
,
6883 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
6884 size_t count
, loff_t
*ppos
)
6886 struct inode
*inode
= file_inode(filp
);
6887 struct trace_array
*tr
= inode
->i_private
;
6888 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
6889 int cpu
= tracing_get_cpu(inode
);
6890 struct trace_seq
*s
;
6892 unsigned long long t
;
6893 unsigned long usec_rem
;
6895 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
6901 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
6902 trace_seq_printf(s
, "entries: %ld\n", cnt
);
6904 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
6905 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
6907 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
6908 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
6910 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
6911 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
6913 if (trace_clocks
[tr
->clock_id
].in_ns
) {
6914 /* local or global for trace_clock */
6915 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6916 usec_rem
= do_div(t
, USEC_PER_SEC
);
6917 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
6920 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6921 usec_rem
= do_div(t
, USEC_PER_SEC
);
6922 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
6924 /* counter or tsc mode for trace_clock */
6925 trace_seq_printf(s
, "oldest event ts: %llu\n",
6926 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6928 trace_seq_printf(s
, "now ts: %llu\n",
6929 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6932 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
6933 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
6935 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
6936 trace_seq_printf(s
, "read events: %ld\n", cnt
);
6938 count
= simple_read_from_buffer(ubuf
, count
, ppos
,
6939 s
->buffer
, trace_seq_used(s
));
6946 static const struct file_operations tracing_stats_fops
= {
6947 .open
= tracing_open_generic_tr
,
6948 .read
= tracing_stats_read
,
6949 .llseek
= generic_file_llseek
,
6950 .release
= tracing_release_generic_tr
,
6953 #ifdef CONFIG_DYNAMIC_FTRACE
6956 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
6957 size_t cnt
, loff_t
*ppos
)
6959 unsigned long *p
= filp
->private_data
;
6960 char buf
[64]; /* Not too big for a shallow stack */
6963 r
= scnprintf(buf
, 63, "%ld", *p
);
6966 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6969 static const struct file_operations tracing_dyn_info_fops
= {
6970 .open
= tracing_open_generic
,
6971 .read
= tracing_read_dyn_info
,
6972 .llseek
= generic_file_llseek
,
6974 #endif /* CONFIG_DYNAMIC_FTRACE */
6976 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6978 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
,
6979 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
6982 tracing_snapshot_instance(tr
);
6986 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
,
6987 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
6990 struct ftrace_func_mapper
*mapper
= data
;
6994 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7004 tracing_snapshot_instance(tr
);
7008 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
7009 struct ftrace_probe_ops
*ops
, void *data
)
7011 struct ftrace_func_mapper
*mapper
= data
;
7014 seq_printf(m
, "%ps:", (void *)ip
);
7016 seq_puts(m
, "snapshot");
7019 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7022 seq_printf(m
, ":count=%ld\n", *count
);
7024 seq_puts(m
, ":unlimited\n");
7030 ftrace_snapshot_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7031 unsigned long ip
, void *init_data
, void **data
)
7033 struct ftrace_func_mapper
*mapper
= *data
;
7036 mapper
= allocate_ftrace_func_mapper();
7042 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
7046 ftrace_snapshot_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7047 unsigned long ip
, void *data
)
7049 struct ftrace_func_mapper
*mapper
= data
;
7054 free_ftrace_func_mapper(mapper
, NULL
);
7058 ftrace_func_mapper_remove_ip(mapper
, ip
);
7061 static struct ftrace_probe_ops snapshot_probe_ops
= {
7062 .func
= ftrace_snapshot
,
7063 .print
= ftrace_snapshot_print
,
7066 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
7067 .func
= ftrace_count_snapshot
,
7068 .print
= ftrace_snapshot_print
,
7069 .init
= ftrace_snapshot_init
,
7070 .free
= ftrace_snapshot_free
,
7074 ftrace_trace_snapshot_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
7075 char *glob
, char *cmd
, char *param
, int enable
)
7077 struct ftrace_probe_ops
*ops
;
7078 void *count
= (void *)-1;
7085 /* hash funcs only work with set_ftrace_filter */
7089 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
7092 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
7097 number
= strsep(¶m
, ":");
7099 if (!strlen(number
))
7103 * We use the callback data field (which is a pointer)
7106 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
7111 ret
= alloc_snapshot(tr
);
7115 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
7118 return ret
< 0 ? ret
: 0;
7121 static struct ftrace_func_command ftrace_snapshot_cmd
= {
7123 .func
= ftrace_trace_snapshot_callback
,
7126 static __init
int register_snapshot_cmd(void)
7128 return register_ftrace_command(&ftrace_snapshot_cmd
);
7131 static inline __init
int register_snapshot_cmd(void) { return 0; }
7132 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7134 static struct dentry
*tracing_get_dentry(struct trace_array
*tr
)
7136 if (WARN_ON(!tr
->dir
))
7137 return ERR_PTR(-ENODEV
);
7139 /* Top directory uses NULL as the parent */
7140 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
7143 /* All sub buffers have a descriptor */
7147 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
7149 struct dentry
*d_tracer
;
7152 return tr
->percpu_dir
;
7154 d_tracer
= tracing_get_dentry(tr
);
7155 if (IS_ERR(d_tracer
))
7158 tr
->percpu_dir
= tracefs_create_dir("per_cpu", d_tracer
);
7160 WARN_ONCE(!tr
->percpu_dir
,
7161 "Could not create tracefs directory 'per_cpu/%d'\n", cpu
);
7163 return tr
->percpu_dir
;
7166 static struct dentry
*
7167 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
7168 void *data
, long cpu
, const struct file_operations
*fops
)
7170 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
7172 if (ret
) /* See tracing_get_cpu() */
7173 d_inode(ret
)->i_cdev
= (void *)(cpu
+ 1);
7178 tracing_init_tracefs_percpu(struct trace_array
*tr
, long cpu
)
7180 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
7181 struct dentry
*d_cpu
;
7182 char cpu_dir
[30]; /* 30 characters should be more than enough */
7187 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
7188 d_cpu
= tracefs_create_dir(cpu_dir
, d_percpu
);
7190 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir
);
7194 /* per cpu trace_pipe */
7195 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
7196 tr
, cpu
, &tracing_pipe_fops
);
7199 trace_create_cpu_file("trace", 0644, d_cpu
,
7200 tr
, cpu
, &tracing_fops
);
7202 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
7203 tr
, cpu
, &tracing_buffers_fops
);
7205 trace_create_cpu_file("stats", 0444, d_cpu
,
7206 tr
, cpu
, &tracing_stats_fops
);
7208 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
7209 tr
, cpu
, &tracing_entries_fops
);
7211 #ifdef CONFIG_TRACER_SNAPSHOT
7212 trace_create_cpu_file("snapshot", 0644, d_cpu
,
7213 tr
, cpu
, &snapshot_fops
);
7215 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
7216 tr
, cpu
, &snapshot_raw_fops
);
7220 #ifdef CONFIG_FTRACE_SELFTEST
7221 /* Let selftest have access to static functions in this file */
7222 #include "trace_selftest.c"
7226 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7229 struct trace_option_dentry
*topt
= filp
->private_data
;
7232 if (topt
->flags
->val
& topt
->opt
->bit
)
7237 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7241 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7244 struct trace_option_dentry
*topt
= filp
->private_data
;
7248 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7252 if (val
!= 0 && val
!= 1)
7255 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
7256 mutex_lock(&trace_types_lock
);
7257 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
7259 mutex_unlock(&trace_types_lock
);
7270 static const struct file_operations trace_options_fops
= {
7271 .open
= tracing_open_generic
,
7272 .read
= trace_options_read
,
7273 .write
= trace_options_write
,
7274 .llseek
= generic_file_llseek
,
7278 * In order to pass in both the trace_array descriptor as well as the index
7279 * to the flag that the trace option file represents, the trace_array
7280 * has a character array of trace_flags_index[], which holds the index
7281 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7282 * The address of this character array is passed to the flag option file
7283 * read/write callbacks.
7285 * In order to extract both the index and the trace_array descriptor,
7286 * get_tr_index() uses the following algorithm.
7290 * As the pointer itself contains the address of the index (remember
7293 * Then to get the trace_array descriptor, by subtracting that index
7294 * from the ptr, we get to the start of the index itself.
7296 * ptr - idx == &index[0]
7298 * Then a simple container_of() from that pointer gets us to the
7299 * trace_array descriptor.
7301 static void get_tr_index(void *data
, struct trace_array
**ptr
,
7302 unsigned int *pindex
)
7304 *pindex
= *(unsigned char *)data
;
7306 *ptr
= container_of(data
- *pindex
, struct trace_array
,
7311 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7314 void *tr_index
= filp
->private_data
;
7315 struct trace_array
*tr
;
7319 get_tr_index(tr_index
, &tr
, &index
);
7321 if (tr
->trace_flags
& (1 << index
))
7326 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7330 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7333 void *tr_index
= filp
->private_data
;
7334 struct trace_array
*tr
;
7339 get_tr_index(tr_index
, &tr
, &index
);
7341 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7345 if (val
!= 0 && val
!= 1)
7348 mutex_lock(&trace_types_lock
);
7349 ret
= set_tracer_flag(tr
, 1 << index
, val
);
7350 mutex_unlock(&trace_types_lock
);
7360 static const struct file_operations trace_options_core_fops
= {
7361 .open
= tracing_open_generic
,
7362 .read
= trace_options_core_read
,
7363 .write
= trace_options_core_write
,
7364 .llseek
= generic_file_llseek
,
7367 struct dentry
*trace_create_file(const char *name
,
7369 struct dentry
*parent
,
7371 const struct file_operations
*fops
)
7375 ret
= tracefs_create_file(name
, mode
, parent
, data
, fops
);
7377 pr_warn("Could not create tracefs '%s' entry\n", name
);
7383 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
7385 struct dentry
*d_tracer
;
7390 d_tracer
= tracing_get_dentry(tr
);
7391 if (IS_ERR(d_tracer
))
7394 tr
->options
= tracefs_create_dir("options", d_tracer
);
7396 pr_warn("Could not create tracefs directory 'options'\n");
7404 create_trace_option_file(struct trace_array
*tr
,
7405 struct trace_option_dentry
*topt
,
7406 struct tracer_flags
*flags
,
7407 struct tracer_opt
*opt
)
7409 struct dentry
*t_options
;
7411 t_options
= trace_options_init_dentry(tr
);
7415 topt
->flags
= flags
;
7419 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
7420 &trace_options_fops
);
7425 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
7427 struct trace_option_dentry
*topts
;
7428 struct trace_options
*tr_topts
;
7429 struct tracer_flags
*flags
;
7430 struct tracer_opt
*opts
;
7437 flags
= tracer
->flags
;
7439 if (!flags
|| !flags
->opts
)
7443 * If this is an instance, only create flags for tracers
7444 * the instance may have.
7446 if (!trace_ok_for_array(tracer
, tr
))
7449 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7450 /* Make sure there's no duplicate flags. */
7451 if (WARN_ON_ONCE(tr
->topts
[i
].tracer
->flags
== tracer
->flags
))
7457 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
7460 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
7464 tr_topts
= krealloc(tr
->topts
, sizeof(*tr
->topts
) * (tr
->nr_topts
+ 1),
7471 tr
->topts
= tr_topts
;
7472 tr
->topts
[tr
->nr_topts
].tracer
= tracer
;
7473 tr
->topts
[tr
->nr_topts
].topts
= topts
;
7476 for (cnt
= 0; opts
[cnt
].name
; cnt
++) {
7477 create_trace_option_file(tr
, &topts
[cnt
], flags
,
7479 WARN_ONCE(topts
[cnt
].entry
== NULL
,
7480 "Failed to create trace option: %s",
7485 static struct dentry
*
7486 create_trace_option_core_file(struct trace_array
*tr
,
7487 const char *option
, long index
)
7489 struct dentry
*t_options
;
7491 t_options
= trace_options_init_dentry(tr
);
7495 return trace_create_file(option
, 0644, t_options
,
7496 (void *)&tr
->trace_flags_index
[index
],
7497 &trace_options_core_fops
);
7500 static void create_trace_options_dir(struct trace_array
*tr
)
7502 struct dentry
*t_options
;
7503 bool top_level
= tr
== &global_trace
;
7506 t_options
= trace_options_init_dentry(tr
);
7510 for (i
= 0; trace_options
[i
]; i
++) {
7512 !((1 << i
) & TOP_LEVEL_TRACE_FLAGS
))
7513 create_trace_option_core_file(tr
, trace_options
[i
], i
);
7518 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
7519 size_t cnt
, loff_t
*ppos
)
7521 struct trace_array
*tr
= filp
->private_data
;
7525 r
= tracer_tracing_is_on(tr
);
7526 r
= sprintf(buf
, "%d\n", r
);
7528 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7532 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
7533 size_t cnt
, loff_t
*ppos
)
7535 struct trace_array
*tr
= filp
->private_data
;
7536 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
7540 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7545 mutex_lock(&trace_types_lock
);
7547 tracer_tracing_on(tr
);
7548 if (tr
->current_trace
->start
)
7549 tr
->current_trace
->start(tr
);
7551 tracer_tracing_off(tr
);
7552 if (tr
->current_trace
->stop
)
7553 tr
->current_trace
->stop(tr
);
7555 mutex_unlock(&trace_types_lock
);
7563 static const struct file_operations rb_simple_fops
= {
7564 .open
= tracing_open_generic_tr
,
7565 .read
= rb_simple_read
,
7566 .write
= rb_simple_write
,
7567 .release
= tracing_release_generic_tr
,
7568 .llseek
= default_llseek
,
7571 struct dentry
*trace_instance_dir
;
7574 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
7577 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
7579 enum ring_buffer_flags rb_flags
;
7581 rb_flags
= tr
->trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
7585 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
7589 buf
->data
= alloc_percpu(struct trace_array_cpu
);
7591 ring_buffer_free(buf
->buffer
);
7595 /* Allocate the first page for all buffers */
7596 set_buffer_entries(&tr
->trace_buffer
,
7597 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
7602 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
7606 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
7610 #ifdef CONFIG_TRACER_MAX_TRACE
7611 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
7612 allocate_snapshot
? size
: 1);
7614 ring_buffer_free(tr
->trace_buffer
.buffer
);
7615 free_percpu(tr
->trace_buffer
.data
);
7618 tr
->allocated_snapshot
= allocate_snapshot
;
7621 * Only the top level trace array gets its snapshot allocated
7622 * from the kernel command line.
7624 allocate_snapshot
= false;
7629 static void free_trace_buffer(struct trace_buffer
*buf
)
7632 ring_buffer_free(buf
->buffer
);
7634 free_percpu(buf
->data
);
7639 static void free_trace_buffers(struct trace_array
*tr
)
7644 free_trace_buffer(&tr
->trace_buffer
);
7646 #ifdef CONFIG_TRACER_MAX_TRACE
7647 free_trace_buffer(&tr
->max_buffer
);
7651 static void init_trace_flags_index(struct trace_array
*tr
)
7655 /* Used by the trace options files */
7656 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++)
7657 tr
->trace_flags_index
[i
] = i
;
7660 static void __update_tracer_options(struct trace_array
*tr
)
7664 for (t
= trace_types
; t
; t
= t
->next
)
7665 add_tracer_options(tr
, t
);
7668 static void update_tracer_options(struct trace_array
*tr
)
7670 mutex_lock(&trace_types_lock
);
7671 __update_tracer_options(tr
);
7672 mutex_unlock(&trace_types_lock
);
7675 static int instance_mkdir(const char *name
)
7677 struct trace_array
*tr
;
7680 mutex_lock(&trace_types_lock
);
7683 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7684 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
7689 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
7693 tr
->name
= kstrdup(name
, GFP_KERNEL
);
7697 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
7700 tr
->trace_flags
= global_trace
.trace_flags
& ~ZEROED_TRACE_FLAGS
;
7702 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
7704 raw_spin_lock_init(&tr
->start_lock
);
7706 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
7708 tr
->current_trace
= &nop_trace
;
7710 INIT_LIST_HEAD(&tr
->systems
);
7711 INIT_LIST_HEAD(&tr
->events
);
7713 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
7716 tr
->dir
= tracefs_create_dir(name
, trace_instance_dir
);
7720 ret
= event_trace_add_tracer(tr
->dir
, tr
);
7722 tracefs_remove_recursive(tr
->dir
);
7726 ftrace_init_trace_array(tr
);
7728 init_tracer_tracefs(tr
, tr
->dir
);
7729 init_trace_flags_index(tr
);
7730 __update_tracer_options(tr
);
7732 list_add(&tr
->list
, &ftrace_trace_arrays
);
7734 mutex_unlock(&trace_types_lock
);
7739 free_trace_buffers(tr
);
7740 free_cpumask_var(tr
->tracing_cpumask
);
7745 mutex_unlock(&trace_types_lock
);
7751 static int instance_rmdir(const char *name
)
7753 struct trace_array
*tr
;
7758 mutex_lock(&trace_types_lock
);
7761 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7762 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
7771 if (tr
->ref
|| (tr
->current_trace
&& tr
->current_trace
->ref
))
7774 list_del(&tr
->list
);
7776 /* Disable all the flags that were enabled coming in */
7777 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++) {
7778 if ((1 << i
) & ZEROED_TRACE_FLAGS
)
7779 set_tracer_flag(tr
, 1 << i
, 0);
7782 tracing_set_nop(tr
);
7783 clear_ftrace_function_probes(tr
);
7784 event_trace_del_tracer(tr
);
7785 ftrace_clear_pids(tr
);
7786 ftrace_destroy_function_files(tr
);
7787 tracefs_remove_recursive(tr
->dir
);
7788 free_trace_buffers(tr
);
7790 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7791 kfree(tr
->topts
[i
].topts
);
7795 free_cpumask_var(tr
->tracing_cpumask
);
7802 mutex_unlock(&trace_types_lock
);
7807 static __init
void create_trace_instances(struct dentry
*d_tracer
)
7809 trace_instance_dir
= tracefs_create_instance_dir("instances", d_tracer
,
7812 if (WARN_ON(!trace_instance_dir
))
7817 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
7821 trace_create_file("available_tracers", 0444, d_tracer
,
7822 tr
, &show_traces_fops
);
7824 trace_create_file("current_tracer", 0644, d_tracer
,
7825 tr
, &set_tracer_fops
);
7827 trace_create_file("tracing_cpumask", 0644, d_tracer
,
7828 tr
, &tracing_cpumask_fops
);
7830 trace_create_file("trace_options", 0644, d_tracer
,
7831 tr
, &tracing_iter_fops
);
7833 trace_create_file("trace", 0644, d_tracer
,
7836 trace_create_file("trace_pipe", 0444, d_tracer
,
7837 tr
, &tracing_pipe_fops
);
7839 trace_create_file("buffer_size_kb", 0644, d_tracer
,
7840 tr
, &tracing_entries_fops
);
7842 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
7843 tr
, &tracing_total_entries_fops
);
7845 trace_create_file("free_buffer", 0200, d_tracer
,
7846 tr
, &tracing_free_buffer_fops
);
7848 trace_create_file("trace_marker", 0220, d_tracer
,
7849 tr
, &tracing_mark_fops
);
7851 trace_create_file("trace_marker_raw", 0220, d_tracer
,
7852 tr
, &tracing_mark_raw_fops
);
7854 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
7857 trace_create_file("tracing_on", 0644, d_tracer
,
7858 tr
, &rb_simple_fops
);
7860 create_trace_options_dir(tr
);
7862 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7863 trace_create_file("tracing_max_latency", 0644, d_tracer
,
7864 &tr
->max_latency
, &tracing_max_lat_fops
);
7867 if (ftrace_create_function_files(tr
, d_tracer
))
7868 WARN(1, "Could not allocate function filter files");
7870 #ifdef CONFIG_TRACER_SNAPSHOT
7871 trace_create_file("snapshot", 0644, d_tracer
,
7872 tr
, &snapshot_fops
);
7875 for_each_tracing_cpu(cpu
)
7876 tracing_init_tracefs_percpu(tr
, cpu
);
7878 ftrace_init_tracefs(tr
, d_tracer
);
7881 static struct vfsmount
*trace_automount(struct dentry
*mntpt
, void *ingore
)
7883 struct vfsmount
*mnt
;
7884 struct file_system_type
*type
;
7887 * To maintain backward compatibility for tools that mount
7888 * debugfs to get to the tracing facility, tracefs is automatically
7889 * mounted to the debugfs/tracing directory.
7891 type
= get_fs_type("tracefs");
7894 mnt
= vfs_submount(mntpt
, type
, "tracefs", NULL
);
7895 put_filesystem(type
);
7904 * tracing_init_dentry - initialize top level trace array
7906 * This is called when creating files or directories in the tracing
7907 * directory. It is called via fs_initcall() by any of the boot up code
7908 * and expects to return the dentry of the top level tracing directory.
7910 struct dentry
*tracing_init_dentry(void)
7912 struct trace_array
*tr
= &global_trace
;
7914 /* The top level trace array uses NULL as parent */
7918 if (WARN_ON(!tracefs_initialized()) ||
7919 (IS_ENABLED(CONFIG_DEBUG_FS
) &&
7920 WARN_ON(!debugfs_initialized())))
7921 return ERR_PTR(-ENODEV
);
7924 * As there may still be users that expect the tracing
7925 * files to exist in debugfs/tracing, we must automount
7926 * the tracefs file system there, so older tools still
7927 * work with the newer kerenl.
7929 tr
->dir
= debugfs_create_automount("tracing", NULL
,
7930 trace_automount
, NULL
);
7932 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7933 return ERR_PTR(-ENOMEM
);
7939 extern struct trace_eval_map
*__start_ftrace_eval_maps
[];
7940 extern struct trace_eval_map
*__stop_ftrace_eval_maps
[];
7942 static void __init
trace_eval_init(void)
7946 len
= __stop_ftrace_eval_maps
- __start_ftrace_eval_maps
;
7947 trace_insert_eval_map(NULL
, __start_ftrace_eval_maps
, len
);
7950 #ifdef CONFIG_MODULES
7951 static void trace_module_add_evals(struct module
*mod
)
7953 if (!mod
->num_trace_evals
)
7957 * Modules with bad taint do not have events created, do
7958 * not bother with enums either.
7960 if (trace_module_has_bad_taint(mod
))
7963 trace_insert_eval_map(mod
, mod
->trace_evals
, mod
->num_trace_evals
);
7966 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
7967 static void trace_module_remove_evals(struct module
*mod
)
7969 union trace_eval_map_item
*map
;
7970 union trace_eval_map_item
**last
= &trace_eval_maps
;
7972 if (!mod
->num_trace_evals
)
7975 mutex_lock(&trace_eval_mutex
);
7977 map
= trace_eval_maps
;
7980 if (map
->head
.mod
== mod
)
7982 map
= trace_eval_jmp_to_tail(map
);
7983 last
= &map
->tail
.next
;
7984 map
= map
->tail
.next
;
7989 *last
= trace_eval_jmp_to_tail(map
)->tail
.next
;
7992 mutex_unlock(&trace_eval_mutex
);
7995 static inline void trace_module_remove_evals(struct module
*mod
) { }
7996 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
7998 static int trace_module_notify(struct notifier_block
*self
,
7999 unsigned long val
, void *data
)
8001 struct module
*mod
= data
;
8004 case MODULE_STATE_COMING
:
8005 trace_module_add_evals(mod
);
8007 case MODULE_STATE_GOING
:
8008 trace_module_remove_evals(mod
);
8015 static struct notifier_block trace_module_nb
= {
8016 .notifier_call
= trace_module_notify
,
8019 #endif /* CONFIG_MODULES */
8021 static __init
int tracer_init_tracefs(void)
8023 struct dentry
*d_tracer
;
8025 trace_access_lock_init();
8027 d_tracer
= tracing_init_dentry();
8028 if (IS_ERR(d_tracer
))
8031 init_tracer_tracefs(&global_trace
, d_tracer
);
8032 ftrace_init_tracefs_toplevel(&global_trace
, d_tracer
);
8034 trace_create_file("tracing_thresh", 0644, d_tracer
,
8035 &global_trace
, &tracing_thresh_fops
);
8037 trace_create_file("README", 0444, d_tracer
,
8038 NULL
, &tracing_readme_fops
);
8040 trace_create_file("saved_cmdlines", 0444, d_tracer
,
8041 NULL
, &tracing_saved_cmdlines_fops
);
8043 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
8044 NULL
, &tracing_saved_cmdlines_size_fops
);
8046 trace_create_file("saved_tgids", 0444, d_tracer
,
8047 NULL
, &tracing_saved_tgids_fops
);
8051 trace_create_eval_file(d_tracer
);
8053 #ifdef CONFIG_MODULES
8054 register_module_notifier(&trace_module_nb
);
8057 #ifdef CONFIG_DYNAMIC_FTRACE
8058 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
8059 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
8062 create_trace_instances(d_tracer
);
8064 update_tracer_options(&global_trace
);
8069 static int trace_panic_handler(struct notifier_block
*this,
8070 unsigned long event
, void *unused
)
8072 if (ftrace_dump_on_oops
)
8073 ftrace_dump(ftrace_dump_on_oops
);
8077 static struct notifier_block trace_panic_notifier
= {
8078 .notifier_call
= trace_panic_handler
,
8080 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
8083 static int trace_die_handler(struct notifier_block
*self
,
8089 if (ftrace_dump_on_oops
)
8090 ftrace_dump(ftrace_dump_on_oops
);
8098 static struct notifier_block trace_die_notifier
= {
8099 .notifier_call
= trace_die_handler
,
8104 * printk is set to max of 1024, we really don't need it that big.
8105 * Nothing should be printing 1000 characters anyway.
8107 #define TRACE_MAX_PRINT 1000
8110 * Define here KERN_TRACE so that we have one place to modify
8111 * it if we decide to change what log level the ftrace dump
8114 #define KERN_TRACE KERN_EMERG
8117 trace_printk_seq(struct trace_seq
*s
)
8119 /* Probably should print a warning here. */
8120 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
8121 s
->seq
.len
= TRACE_MAX_PRINT
;
8124 * More paranoid code. Although the buffer size is set to
8125 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8126 * an extra layer of protection.
8128 if (WARN_ON_ONCE(s
->seq
.len
>= s
->seq
.size
))
8129 s
->seq
.len
= s
->seq
.size
- 1;
8131 /* should be zero ended, but we are paranoid. */
8132 s
->buffer
[s
->seq
.len
] = 0;
8134 printk(KERN_TRACE
"%s", s
->buffer
);
8139 void trace_init_global_iter(struct trace_iterator
*iter
)
8141 iter
->tr
= &global_trace
;
8142 iter
->trace
= iter
->tr
->current_trace
;
8143 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
8144 iter
->trace_buffer
= &global_trace
.trace_buffer
;
8146 if (iter
->trace
&& iter
->trace
->open
)
8147 iter
->trace
->open(iter
);
8149 /* Annotate start of buffers if we had overruns */
8150 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
8151 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
8153 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8154 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
8155 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
8158 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
8160 /* use static because iter can be a bit big for the stack */
8161 static struct trace_iterator iter
;
8162 static atomic_t dump_running
;
8163 struct trace_array
*tr
= &global_trace
;
8164 unsigned int old_userobj
;
8165 unsigned long flags
;
8168 /* Only allow one dump user at a time. */
8169 if (atomic_inc_return(&dump_running
) != 1) {
8170 atomic_dec(&dump_running
);
8175 * Always turn off tracing when we dump.
8176 * We don't need to show trace output of what happens
8177 * between multiple crashes.
8179 * If the user does a sysrq-z, then they can re-enable
8180 * tracing with echo 1 > tracing_on.
8184 local_irq_save(flags
);
8186 /* Simulate the iterator */
8187 trace_init_global_iter(&iter
);
8189 for_each_tracing_cpu(cpu
) {
8190 atomic_inc(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8193 old_userobj
= tr
->trace_flags
& TRACE_ITER_SYM_USEROBJ
;
8195 /* don't look at user memory in panic mode */
8196 tr
->trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
8198 switch (oops_dump_mode
) {
8200 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8203 iter
.cpu_file
= raw_smp_processor_id();
8208 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
8209 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8212 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
8214 /* Did function tracer already get disabled? */
8215 if (ftrace_is_dead()) {
8216 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8217 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8221 * We need to stop all tracing on all CPUS to read the
8222 * the next buffer. This is a bit expensive, but is
8223 * not done often. We fill all what we can read,
8224 * and then release the locks again.
8227 while (!trace_empty(&iter
)) {
8230 printk(KERN_TRACE
"---------------------------------\n");
8234 /* reset all but tr, trace, and overruns */
8235 memset(&iter
.seq
, 0,
8236 sizeof(struct trace_iterator
) -
8237 offsetof(struct trace_iterator
, seq
));
8238 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
8241 if (trace_find_next_entry_inc(&iter
) != NULL
) {
8244 ret
= print_trace_line(&iter
);
8245 if (ret
!= TRACE_TYPE_NO_CONSUME
)
8246 trace_consume(&iter
);
8248 touch_nmi_watchdog();
8250 trace_printk_seq(&iter
.seq
);
8254 printk(KERN_TRACE
" (ftrace buffer empty)\n");
8256 printk(KERN_TRACE
"---------------------------------\n");
8259 tr
->trace_flags
|= old_userobj
;
8261 for_each_tracing_cpu(cpu
) {
8262 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8264 atomic_dec(&dump_running
);
8265 local_irq_restore(flags
);
8267 EXPORT_SYMBOL_GPL(ftrace_dump
);
8269 __init
static int tracer_alloc_buffers(void)
8275 * Make sure we don't accidently add more trace options
8276 * than we have bits for.
8278 BUILD_BUG_ON(TRACE_ITER_LAST_BIT
> TRACE_FLAGS_MAX_SIZE
);
8280 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
8283 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
8284 goto out_free_buffer_mask
;
8286 /* Only allocate trace_printk buffers if a trace_printk exists */
8287 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
8288 /* Must be called before global_trace.buffer is allocated */
8289 trace_printk_init_buffers();
8291 /* To save memory, keep the ring buffer size to its minimum */
8292 if (ring_buffer_expanded
)
8293 ring_buf_size
= trace_buf_size
;
8297 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
8298 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
8300 raw_spin_lock_init(&global_trace
.start_lock
);
8303 * The prepare callbacks allocates some memory for the ring buffer. We
8304 * don't free the buffer if the if the CPU goes down. If we were to free
8305 * the buffer, then the user would lose any trace that was in the
8306 * buffer. The memory will be removed once the "instance" is removed.
8308 ret
= cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE
,
8309 "trace/RB:preapre", trace_rb_cpu_prepare
,
8312 goto out_free_cpumask
;
8313 /* Used for event triggers */
8315 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
8317 goto out_rm_hp_state
;
8319 if (trace_create_savedcmd() < 0)
8320 goto out_free_temp_buffer
;
8322 /* TODO: make the number of buffers hot pluggable with CPUS */
8323 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
8324 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
8326 goto out_free_savedcmd
;
8329 if (global_trace
.buffer_disabled
)
8332 if (trace_boot_clock
) {
8333 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
8335 pr_warn("Trace clock %s not defined, going back to default\n",
8340 * register_tracer() might reference current_trace, so it
8341 * needs to be set before we register anything. This is
8342 * just a bootstrap of current_trace anyway.
8344 global_trace
.current_trace
= &nop_trace
;
8346 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
8348 ftrace_init_global_array_ops(&global_trace
);
8350 init_trace_flags_index(&global_trace
);
8352 register_tracer(&nop_trace
);
8354 /* Function tracing may start here (via kernel command line) */
8355 init_function_trace();
8357 /* All seems OK, enable tracing */
8358 tracing_disabled
= 0;
8360 atomic_notifier_chain_register(&panic_notifier_list
,
8361 &trace_panic_notifier
);
8363 register_die_notifier(&trace_die_notifier
);
8365 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
8367 INIT_LIST_HEAD(&global_trace
.systems
);
8368 INIT_LIST_HEAD(&global_trace
.events
);
8369 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
8371 apply_trace_boot_options();
8373 register_snapshot_cmd();
8378 free_saved_cmdlines_buffer(savedcmd
);
8379 out_free_temp_buffer
:
8380 ring_buffer_free(temp_buffer
);
8382 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE
);
8384 free_cpumask_var(global_trace
.tracing_cpumask
);
8385 out_free_buffer_mask
:
8386 free_cpumask_var(tracing_buffer_mask
);
8391 void __init
early_trace_init(void)
8393 if (tracepoint_printk
) {
8394 tracepoint_print_iter
=
8395 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
8396 if (WARN_ON(!tracepoint_print_iter
))
8397 tracepoint_printk
= 0;
8399 static_key_enable(&tracepoint_printk_key
.key
);
8401 tracer_alloc_buffers();
8404 void __init
trace_init(void)
8409 __init
static int clear_boot_tracer(void)
8412 * The default tracer at boot buffer is an init section.
8413 * This function is called in lateinit. If we did not
8414 * find the boot tracer, then clear it out, to prevent
8415 * later registration from accessing the buffer that is
8416 * about to be freed.
8418 if (!default_bootup_tracer
)
8421 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
8422 default_bootup_tracer
);
8423 default_bootup_tracer
= NULL
;
8428 fs_initcall(tracer_init_tracefs
);
8429 late_initcall_sync(clear_boot_tracer
);