1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded
;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running
;
71 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
74 bool __read_mostly tracing_selftest_disabled
;
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init
disable_tracing_selftest(const char *reason
)
79 if (!tracing_selftest_disabled
) {
80 tracing_selftest_disabled
= true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason
);
86 /* Pipe tracepoints to printk */
87 struct trace_iterator
*tracepoint_print_iter
;
88 int tracepoint_printk
;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key
);
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt
[] = {
97 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
107 static DEFINE_PER_CPU(bool, trace_taskinfo_save
);
110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
115 static int tracing_disabled
= 1;
117 cpumask_var_t __read_mostly tracing_buffer_mask
;
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
135 enum ftrace_dump_mode ftrace_dump_on_oops
;
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning
;
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head
{
144 unsigned long length
;
147 union trace_eval_map_item
;
149 struct trace_eval_map_tail
{
151 * "end" is first and points to NULL as it must be different
152 * than "mod" or "eval_string"
154 union trace_eval_map_item
*next
;
155 const char *end
; /* points to NULL */
158 static DEFINE_MUTEX(trace_eval_mutex
);
161 * The trace_eval_maps are saved in an array with two extra elements,
162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
165 * pointer to the next array of saved eval_map items.
167 union trace_eval_map_item
{
168 struct trace_eval_map map
;
169 struct trace_eval_map_head head
;
170 struct trace_eval_map_tail tail
;
173 static union trace_eval_map_item
*trace_eval_maps
;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
176 int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
177 static void ftrace_trace_userstack(struct trace_array
*tr
,
178 struct trace_buffer
*buffer
,
179 unsigned long flags
, int pc
);
181 #define MAX_TRACER_SIZE 100
182 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
183 static char *default_bootup_tracer
;
185 static bool allocate_snapshot
;
187 static int __init
set_cmdline_ftrace(char *str
)
189 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
190 default_bootup_tracer
= bootup_tracer_buf
;
191 /* We are using ftrace early, expand it */
192 ring_buffer_expanded
= true;
195 __setup("ftrace=", set_cmdline_ftrace
);
197 static int __init
set_ftrace_dump_on_oops(char *str
)
199 if (*str
++ != '=' || !*str
) {
200 ftrace_dump_on_oops
= DUMP_ALL
;
204 if (!strcmp("orig_cpu", str
)) {
205 ftrace_dump_on_oops
= DUMP_ORIG
;
211 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
213 static int __init
stop_trace_on_warning(char *str
)
215 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
216 __disable_trace_on_warning
= 1;
219 __setup("traceoff_on_warning", stop_trace_on_warning
);
221 static int __init
boot_alloc_snapshot(char *str
)
223 allocate_snapshot
= true;
224 /* We also need the main ring buffer expanded */
225 ring_buffer_expanded
= true;
228 __setup("alloc_snapshot", boot_alloc_snapshot
);
231 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
233 static int __init
set_trace_boot_options(char *str
)
235 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
238 __setup("trace_options=", set_trace_boot_options
);
240 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
241 static char *trace_boot_clock __initdata
;
243 static int __init
set_trace_boot_clock(char *str
)
245 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
246 trace_boot_clock
= trace_boot_clock_buf
;
249 __setup("trace_clock=", set_trace_boot_clock
);
251 static int __init
set_tracepoint_printk(char *str
)
253 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
254 tracepoint_printk
= 1;
257 __setup("tp_printk", set_tracepoint_printk
);
259 unsigned long long ns2usecs(u64 nsec
)
267 trace_process_export(struct trace_export
*export
,
268 struct ring_buffer_event
*event
, int flag
)
270 struct trace_entry
*entry
;
271 unsigned int size
= 0;
273 if (export
->flags
& flag
) {
274 entry
= ring_buffer_event_data(event
);
275 size
= ring_buffer_event_length(event
);
276 export
->write(export
, entry
, size
);
280 static DEFINE_MUTEX(ftrace_export_lock
);
282 static struct trace_export __rcu
*ftrace_exports_list __read_mostly
;
284 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled
);
285 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled
);
286 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled
);
288 static inline void ftrace_exports_enable(struct trace_export
*export
)
290 if (export
->flags
& TRACE_EXPORT_FUNCTION
)
291 static_branch_inc(&trace_function_exports_enabled
);
293 if (export
->flags
& TRACE_EXPORT_EVENT
)
294 static_branch_inc(&trace_event_exports_enabled
);
296 if (export
->flags
& TRACE_EXPORT_MARKER
)
297 static_branch_inc(&trace_marker_exports_enabled
);
300 static inline void ftrace_exports_disable(struct trace_export
*export
)
302 if (export
->flags
& TRACE_EXPORT_FUNCTION
)
303 static_branch_dec(&trace_function_exports_enabled
);
305 if (export
->flags
& TRACE_EXPORT_EVENT
)
306 static_branch_dec(&trace_event_exports_enabled
);
308 if (export
->flags
& TRACE_EXPORT_MARKER
)
309 static_branch_dec(&trace_marker_exports_enabled
);
312 static void ftrace_exports(struct ring_buffer_event
*event
, int flag
)
314 struct trace_export
*export
;
316 preempt_disable_notrace();
318 export
= rcu_dereference_raw_check(ftrace_exports_list
);
320 trace_process_export(export
, event
, flag
);
321 export
= rcu_dereference_raw_check(export
->next
);
324 preempt_enable_notrace();
328 add_trace_export(struct trace_export
**list
, struct trace_export
*export
)
330 rcu_assign_pointer(export
->next
, *list
);
332 * We are entering export into the list but another
333 * CPU might be walking that list. We need to make sure
334 * the export->next pointer is valid before another CPU sees
335 * the export pointer included into the list.
337 rcu_assign_pointer(*list
, export
);
341 rm_trace_export(struct trace_export
**list
, struct trace_export
*export
)
343 struct trace_export
**p
;
345 for (p
= list
; *p
!= NULL
; p
= &(*p
)->next
)
352 rcu_assign_pointer(*p
, (*p
)->next
);
358 add_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
360 ftrace_exports_enable(export
);
362 add_trace_export(list
, export
);
366 rm_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
370 ret
= rm_trace_export(list
, export
);
371 ftrace_exports_disable(export
);
376 int register_ftrace_export(struct trace_export
*export
)
378 if (WARN_ON_ONCE(!export
->write
))
381 mutex_lock(&ftrace_export_lock
);
383 add_ftrace_export(&ftrace_exports_list
, export
);
385 mutex_unlock(&ftrace_export_lock
);
389 EXPORT_SYMBOL_GPL(register_ftrace_export
);
391 int unregister_ftrace_export(struct trace_export
*export
)
395 mutex_lock(&ftrace_export_lock
);
397 ret
= rm_ftrace_export(&ftrace_exports_list
, export
);
399 mutex_unlock(&ftrace_export_lock
);
403 EXPORT_SYMBOL_GPL(unregister_ftrace_export
);
405 /* trace_flags holds trace_options default values */
406 #define TRACE_DEFAULT_FLAGS \
407 (FUNCTION_DEFAULT_FLAGS | \
408 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
409 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
410 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
411 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
413 /* trace_options that are only supported by global_trace */
414 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
415 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
417 /* trace_flags that are default zero for instances */
418 #define ZEROED_TRACE_FLAGS \
419 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
422 * The global_trace is the descriptor that holds the top-level tracing
423 * buffers for the live tracing.
425 static struct trace_array global_trace
= {
426 .trace_flags
= TRACE_DEFAULT_FLAGS
,
429 LIST_HEAD(ftrace_trace_arrays
);
431 int trace_array_get(struct trace_array
*this_tr
)
433 struct trace_array
*tr
;
436 mutex_lock(&trace_types_lock
);
437 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
444 mutex_unlock(&trace_types_lock
);
449 static void __trace_array_put(struct trace_array
*this_tr
)
451 WARN_ON(!this_tr
->ref
);
456 * trace_array_put - Decrement the reference counter for this trace array.
458 * NOTE: Use this when we no longer need the trace array returned by
459 * trace_array_get_by_name(). This ensures the trace array can be later
463 void trace_array_put(struct trace_array
*this_tr
)
468 mutex_lock(&trace_types_lock
);
469 __trace_array_put(this_tr
);
470 mutex_unlock(&trace_types_lock
);
472 EXPORT_SYMBOL_GPL(trace_array_put
);
474 int tracing_check_open_get_tr(struct trace_array
*tr
)
478 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
482 if (tracing_disabled
)
485 if (tr
&& trace_array_get(tr
) < 0)
491 int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
492 struct trace_buffer
*buffer
,
493 struct ring_buffer_event
*event
)
495 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
496 !filter_match_preds(call
->filter
, rec
)) {
497 __trace_event_discard_commit(buffer
, event
);
504 void trace_free_pid_list(struct trace_pid_list
*pid_list
)
506 vfree(pid_list
->pids
);
511 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
512 * @filtered_pids: The list of pids to check
513 * @search_pid: The PID to find in @filtered_pids
515 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
518 trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
, pid_t search_pid
)
521 * If pid_max changed after filtered_pids was created, we
522 * by default ignore all pids greater than the previous pid_max.
524 if (search_pid
>= filtered_pids
->pid_max
)
527 return test_bit(search_pid
, filtered_pids
->pids
);
531 * trace_ignore_this_task - should a task be ignored for tracing
532 * @filtered_pids: The list of pids to check
533 * @task: The task that should be ignored if not filtered
535 * Checks if @task should be traced or not from @filtered_pids.
536 * Returns true if @task should *NOT* be traced.
537 * Returns false if @task should be traced.
540 trace_ignore_this_task(struct trace_pid_list
*filtered_pids
,
541 struct trace_pid_list
*filtered_no_pids
,
542 struct task_struct
*task
)
545 * If filterd_no_pids is not empty, and the task's pid is listed
546 * in filtered_no_pids, then return true.
547 * Otherwise, if filtered_pids is empty, that means we can
548 * trace all tasks. If it has content, then only trace pids
549 * within filtered_pids.
552 return (filtered_pids
&&
553 !trace_find_filtered_pid(filtered_pids
, task
->pid
)) ||
555 trace_find_filtered_pid(filtered_no_pids
, task
->pid
));
559 * trace_filter_add_remove_task - Add or remove a task from a pid_list
560 * @pid_list: The list to modify
561 * @self: The current task for fork or NULL for exit
562 * @task: The task to add or remove
564 * If adding a task, if @self is defined, the task is only added if @self
565 * is also included in @pid_list. This happens on fork and tasks should
566 * only be added when the parent is listed. If @self is NULL, then the
567 * @task pid will be removed from the list, which would happen on exit
570 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
571 struct task_struct
*self
,
572 struct task_struct
*task
)
577 /* For forks, we only add if the forking task is listed */
579 if (!trace_find_filtered_pid(pid_list
, self
->pid
))
583 /* Sorry, but we don't support pid_max changing after setting */
584 if (task
->pid
>= pid_list
->pid_max
)
587 /* "self" is set for forks, and NULL for exits */
589 set_bit(task
->pid
, pid_list
->pids
);
591 clear_bit(task
->pid
, pid_list
->pids
);
595 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
596 * @pid_list: The pid list to show
597 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
598 * @pos: The position of the file
600 * This is used by the seq_file "next" operation to iterate the pids
601 * listed in a trace_pid_list structure.
603 * Returns the pid+1 as we want to display pid of zero, but NULL would
604 * stop the iteration.
606 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
)
608 unsigned long pid
= (unsigned long)v
;
612 /* pid already is +1 of the actual prevous bit */
613 pid
= find_next_bit(pid_list
->pids
, pid_list
->pid_max
, pid
);
615 /* Return pid + 1 to allow zero to be represented */
616 if (pid
< pid_list
->pid_max
)
617 return (void *)(pid
+ 1);
623 * trace_pid_start - Used for seq_file to start reading pid lists
624 * @pid_list: The pid list to show
625 * @pos: The position of the file
627 * This is used by seq_file "start" operation to start the iteration
630 * Returns the pid+1 as we want to display pid of zero, but NULL would
631 * stop the iteration.
633 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
)
638 pid
= find_first_bit(pid_list
->pids
, pid_list
->pid_max
);
639 if (pid
>= pid_list
->pid_max
)
642 /* Return pid + 1 so that zero can be the exit value */
643 for (pid
++; pid
&& l
< *pos
;
644 pid
= (unsigned long)trace_pid_next(pid_list
, (void *)pid
, &l
))
650 * trace_pid_show - show the current pid in seq_file processing
651 * @m: The seq_file structure to write into
652 * @v: A void pointer of the pid (+1) value to display
654 * Can be directly used by seq_file operations to display the current
657 int trace_pid_show(struct seq_file
*m
, void *v
)
659 unsigned long pid
= (unsigned long)v
- 1;
661 seq_printf(m
, "%lu\n", pid
);
665 /* 128 should be much more than enough */
666 #define PID_BUF_SIZE 127
668 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
669 struct trace_pid_list
**new_pid_list
,
670 const char __user
*ubuf
, size_t cnt
)
672 struct trace_pid_list
*pid_list
;
673 struct trace_parser parser
;
681 if (trace_parser_get_init(&parser
, PID_BUF_SIZE
+ 1))
685 * Always recreate a new array. The write is an all or nothing
686 * operation. Always create a new array when adding new pids by
687 * the user. If the operation fails, then the current list is
690 pid_list
= kmalloc(sizeof(*pid_list
), GFP_KERNEL
);
692 trace_parser_put(&parser
);
696 pid_list
->pid_max
= READ_ONCE(pid_max
);
698 /* Only truncating will shrink pid_max */
699 if (filtered_pids
&& filtered_pids
->pid_max
> pid_list
->pid_max
)
700 pid_list
->pid_max
= filtered_pids
->pid_max
;
702 pid_list
->pids
= vzalloc((pid_list
->pid_max
+ 7) >> 3);
703 if (!pid_list
->pids
) {
704 trace_parser_put(&parser
);
710 /* copy the current bits to the new max */
711 for_each_set_bit(pid
, filtered_pids
->pids
,
712 filtered_pids
->pid_max
) {
713 set_bit(pid
, pid_list
->pids
);
722 ret
= trace_get_user(&parser
, ubuf
, cnt
, &pos
);
723 if (ret
< 0 || !trace_parser_loaded(&parser
))
731 if (kstrtoul(parser
.buffer
, 0, &val
))
733 if (val
>= pid_list
->pid_max
)
738 set_bit(pid
, pid_list
->pids
);
741 trace_parser_clear(&parser
);
744 trace_parser_put(&parser
);
747 trace_free_pid_list(pid_list
);
752 /* Cleared the list of pids */
753 trace_free_pid_list(pid_list
);
758 *new_pid_list
= pid_list
;
763 static u64
buffer_ftrace_now(struct array_buffer
*buf
, int cpu
)
767 /* Early boot up does not have a buffer yet */
769 return trace_clock_local();
771 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
772 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
777 u64
ftrace_now(int cpu
)
779 return buffer_ftrace_now(&global_trace
.array_buffer
, cpu
);
783 * tracing_is_enabled - Show if global_trace has been disabled
785 * Shows if the global trace has been enabled or not. It uses the
786 * mirror flag "buffer_disabled" to be used in fast paths such as for
787 * the irqsoff tracer. But it may be inaccurate due to races. If you
788 * need to know the accurate state, use tracing_is_on() which is a little
789 * slower, but accurate.
791 int tracing_is_enabled(void)
794 * For quick access (irqsoff uses this in fast path), just
795 * return the mirror variable of the state of the ring buffer.
796 * It's a little racy, but we don't really care.
799 return !global_trace
.buffer_disabled
;
803 * trace_buf_size is the size in bytes that is allocated
804 * for a buffer. Note, the number of bytes is always rounded
807 * This number is purposely set to a low number of 16384.
808 * If the dump on oops happens, it will be much appreciated
809 * to not have to wait for all that output. Anyway this can be
810 * boot time and run time configurable.
812 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
814 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
816 /* trace_types holds a link list of available tracers. */
817 static struct tracer
*trace_types __read_mostly
;
820 * trace_types_lock is used to protect the trace_types list.
822 DEFINE_MUTEX(trace_types_lock
);
825 * serialize the access of the ring buffer
827 * ring buffer serializes readers, but it is low level protection.
828 * The validity of the events (which returns by ring_buffer_peek() ..etc)
829 * are not protected by ring buffer.
831 * The content of events may become garbage if we allow other process consumes
832 * these events concurrently:
833 * A) the page of the consumed events may become a normal page
834 * (not reader page) in ring buffer, and this page will be rewrited
835 * by events producer.
836 * B) The page of the consumed events may become a page for splice_read,
837 * and this page will be returned to system.
839 * These primitives allow multi process access to different cpu ring buffer
842 * These primitives don't distinguish read-only and read-consume access.
843 * Multi read-only access are also serialized.
847 static DECLARE_RWSEM(all_cpu_access_lock
);
848 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
850 static inline void trace_access_lock(int cpu
)
852 if (cpu
== RING_BUFFER_ALL_CPUS
) {
853 /* gain it for accessing the whole ring buffer. */
854 down_write(&all_cpu_access_lock
);
856 /* gain it for accessing a cpu ring buffer. */
858 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
859 down_read(&all_cpu_access_lock
);
861 /* Secondly block other access to this @cpu ring buffer. */
862 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
866 static inline void trace_access_unlock(int cpu
)
868 if (cpu
== RING_BUFFER_ALL_CPUS
) {
869 up_write(&all_cpu_access_lock
);
871 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
872 up_read(&all_cpu_access_lock
);
876 static inline void trace_access_lock_init(void)
880 for_each_possible_cpu(cpu
)
881 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
886 static DEFINE_MUTEX(access_lock
);
888 static inline void trace_access_lock(int cpu
)
891 mutex_lock(&access_lock
);
894 static inline void trace_access_unlock(int cpu
)
897 mutex_unlock(&access_lock
);
900 static inline void trace_access_lock_init(void)
906 #ifdef CONFIG_STACKTRACE
907 static void __ftrace_trace_stack(struct trace_buffer
*buffer
,
909 int skip
, int pc
, struct pt_regs
*regs
);
910 static inline void ftrace_trace_stack(struct trace_array
*tr
,
911 struct trace_buffer
*buffer
,
913 int skip
, int pc
, struct pt_regs
*regs
);
916 static inline void __ftrace_trace_stack(struct trace_buffer
*buffer
,
918 int skip
, int pc
, struct pt_regs
*regs
)
921 static inline void ftrace_trace_stack(struct trace_array
*tr
,
922 struct trace_buffer
*buffer
,
924 int skip
, int pc
, struct pt_regs
*regs
)
930 static __always_inline
void
931 trace_event_setup(struct ring_buffer_event
*event
,
932 int type
, unsigned long flags
, int pc
)
934 struct trace_entry
*ent
= ring_buffer_event_data(event
);
936 tracing_generic_entry_update(ent
, type
, flags
, pc
);
939 static __always_inline
struct ring_buffer_event
*
940 __trace_buffer_lock_reserve(struct trace_buffer
*buffer
,
943 unsigned long flags
, int pc
)
945 struct ring_buffer_event
*event
;
947 event
= ring_buffer_lock_reserve(buffer
, len
);
949 trace_event_setup(event
, type
, flags
, pc
);
954 void tracer_tracing_on(struct trace_array
*tr
)
956 if (tr
->array_buffer
.buffer
)
957 ring_buffer_record_on(tr
->array_buffer
.buffer
);
959 * This flag is looked at when buffers haven't been allocated
960 * yet, or by some tracers (like irqsoff), that just want to
961 * know if the ring buffer has been disabled, but it can handle
962 * races of where it gets disabled but we still do a record.
963 * As the check is in the fast path of the tracers, it is more
964 * important to be fast than accurate.
966 tr
->buffer_disabled
= 0;
967 /* Make the flag seen by readers */
972 * tracing_on - enable tracing buffers
974 * This function enables tracing buffers that may have been
975 * disabled with tracing_off.
977 void tracing_on(void)
979 tracer_tracing_on(&global_trace
);
981 EXPORT_SYMBOL_GPL(tracing_on
);
984 static __always_inline
void
985 __buffer_unlock_commit(struct trace_buffer
*buffer
, struct ring_buffer_event
*event
)
987 __this_cpu_write(trace_taskinfo_save
, true);
989 /* If this is the temp buffer, we need to commit fully */
990 if (this_cpu_read(trace_buffered_event
) == event
) {
991 /* Length is in event->array[0] */
992 ring_buffer_write(buffer
, event
->array
[0], &event
->array
[1]);
993 /* Release the temp buffer */
994 this_cpu_dec(trace_buffered_event_cnt
);
996 ring_buffer_unlock_commit(buffer
, event
);
1000 * __trace_puts - write a constant string into the trace buffer.
1001 * @ip: The address of the caller
1002 * @str: The constant string to write
1003 * @size: The size of the string.
1005 int __trace_puts(unsigned long ip
, const char *str
, int size
)
1007 struct ring_buffer_event
*event
;
1008 struct trace_buffer
*buffer
;
1009 struct print_entry
*entry
;
1010 unsigned long irq_flags
;
1014 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
1017 pc
= preempt_count();
1019 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
1022 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
1024 local_save_flags(irq_flags
);
1025 buffer
= global_trace
.array_buffer
.buffer
;
1026 ring_buffer_nest_start(buffer
);
1027 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
1034 entry
= ring_buffer_event_data(event
);
1037 memcpy(&entry
->buf
, str
, size
);
1039 /* Add a newline if necessary */
1040 if (entry
->buf
[size
- 1] != '\n') {
1041 entry
->buf
[size
] = '\n';
1042 entry
->buf
[size
+ 1] = '\0';
1044 entry
->buf
[size
] = '\0';
1046 __buffer_unlock_commit(buffer
, event
);
1047 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
1049 ring_buffer_nest_end(buffer
);
1052 EXPORT_SYMBOL_GPL(__trace_puts
);
1055 * __trace_bputs - write the pointer to a constant string into trace buffer
1056 * @ip: The address of the caller
1057 * @str: The constant string to write to the buffer to
1059 int __trace_bputs(unsigned long ip
, const char *str
)
1061 struct ring_buffer_event
*event
;
1062 struct trace_buffer
*buffer
;
1063 struct bputs_entry
*entry
;
1064 unsigned long irq_flags
;
1065 int size
= sizeof(struct bputs_entry
);
1069 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
1072 pc
= preempt_count();
1074 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
1077 local_save_flags(irq_flags
);
1078 buffer
= global_trace
.array_buffer
.buffer
;
1080 ring_buffer_nest_start(buffer
);
1081 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
1086 entry
= ring_buffer_event_data(event
);
1090 __buffer_unlock_commit(buffer
, event
);
1091 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
1095 ring_buffer_nest_end(buffer
);
1098 EXPORT_SYMBOL_GPL(__trace_bputs
);
1100 #ifdef CONFIG_TRACER_SNAPSHOT
1101 static void tracing_snapshot_instance_cond(struct trace_array
*tr
,
1104 struct tracer
*tracer
= tr
->current_trace
;
1105 unsigned long flags
;
1108 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1109 internal_trace_puts("*** snapshot is being ignored ***\n");
1113 if (!tr
->allocated_snapshot
) {
1114 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1115 internal_trace_puts("*** stopping trace here! ***\n");
1120 /* Note, snapshot can not be used when the tracer uses it */
1121 if (tracer
->use_max_tr
) {
1122 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1123 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1127 local_irq_save(flags
);
1128 update_max_tr(tr
, current
, smp_processor_id(), cond_data
);
1129 local_irq_restore(flags
);
1132 void tracing_snapshot_instance(struct trace_array
*tr
)
1134 tracing_snapshot_instance_cond(tr
, NULL
);
1138 * tracing_snapshot - take a snapshot of the current buffer.
1140 * This causes a swap between the snapshot buffer and the current live
1141 * tracing buffer. You can use this to take snapshots of the live
1142 * trace when some condition is triggered, but continue to trace.
1144 * Note, make sure to allocate the snapshot with either
1145 * a tracing_snapshot_alloc(), or by doing it manually
1146 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1148 * If the snapshot buffer is not allocated, it will stop tracing.
1149 * Basically making a permanent snapshot.
1151 void tracing_snapshot(void)
1153 struct trace_array
*tr
= &global_trace
;
1155 tracing_snapshot_instance(tr
);
1157 EXPORT_SYMBOL_GPL(tracing_snapshot
);
1160 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1161 * @tr: The tracing instance to snapshot
1162 * @cond_data: The data to be tested conditionally, and possibly saved
1164 * This is the same as tracing_snapshot() except that the snapshot is
1165 * conditional - the snapshot will only happen if the
1166 * cond_snapshot.update() implementation receiving the cond_data
1167 * returns true, which means that the trace array's cond_snapshot
1168 * update() operation used the cond_data to determine whether the
1169 * snapshot should be taken, and if it was, presumably saved it along
1170 * with the snapshot.
1172 void tracing_snapshot_cond(struct trace_array
*tr
, void *cond_data
)
1174 tracing_snapshot_instance_cond(tr
, cond_data
);
1176 EXPORT_SYMBOL_GPL(tracing_snapshot_cond
);
1179 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1180 * @tr: The tracing instance
1182 * When the user enables a conditional snapshot using
1183 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1184 * with the snapshot. This accessor is used to retrieve it.
1186 * Should not be called from cond_snapshot.update(), since it takes
1187 * the tr->max_lock lock, which the code calling
1188 * cond_snapshot.update() has already done.
1190 * Returns the cond_data associated with the trace array's snapshot.
1192 void *tracing_cond_snapshot_data(struct trace_array
*tr
)
1194 void *cond_data
= NULL
;
1196 arch_spin_lock(&tr
->max_lock
);
1198 if (tr
->cond_snapshot
)
1199 cond_data
= tr
->cond_snapshot
->cond_data
;
1201 arch_spin_unlock(&tr
->max_lock
);
1205 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data
);
1207 static int resize_buffer_duplicate_size(struct array_buffer
*trace_buf
,
1208 struct array_buffer
*size_buf
, int cpu_id
);
1209 static void set_buffer_entries(struct array_buffer
*buf
, unsigned long val
);
1211 int tracing_alloc_snapshot_instance(struct trace_array
*tr
)
1215 if (!tr
->allocated_snapshot
) {
1217 /* allocate spare buffer */
1218 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
1219 &tr
->array_buffer
, RING_BUFFER_ALL_CPUS
);
1223 tr
->allocated_snapshot
= true;
1229 static void free_snapshot(struct trace_array
*tr
)
1232 * We don't free the ring buffer. instead, resize it because
1233 * The max_tr ring buffer has some state (e.g. ring->clock) and
1234 * we want preserve it.
1236 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
1237 set_buffer_entries(&tr
->max_buffer
, 1);
1238 tracing_reset_online_cpus(&tr
->max_buffer
);
1239 tr
->allocated_snapshot
= false;
1243 * tracing_alloc_snapshot - allocate snapshot buffer.
1245 * This only allocates the snapshot buffer if it isn't already
1246 * allocated - it doesn't also take a snapshot.
1248 * This is meant to be used in cases where the snapshot buffer needs
1249 * to be set up for events that can't sleep but need to be able to
1250 * trigger a snapshot.
1252 int tracing_alloc_snapshot(void)
1254 struct trace_array
*tr
= &global_trace
;
1257 ret
= tracing_alloc_snapshot_instance(tr
);
1262 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1265 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1267 * This is similar to tracing_snapshot(), but it will allocate the
1268 * snapshot buffer if it isn't already allocated. Use this only
1269 * where it is safe to sleep, as the allocation may sleep.
1271 * This causes a swap between the snapshot buffer and the current live
1272 * tracing buffer. You can use this to take snapshots of the live
1273 * trace when some condition is triggered, but continue to trace.
1275 void tracing_snapshot_alloc(void)
1279 ret
= tracing_alloc_snapshot();
1285 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1288 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1289 * @tr: The tracing instance
1290 * @cond_data: User data to associate with the snapshot
1291 * @update: Implementation of the cond_snapshot update function
1293 * Check whether the conditional snapshot for the given instance has
1294 * already been enabled, or if the current tracer is already using a
1295 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1296 * save the cond_data and update function inside.
1298 * Returns 0 if successful, error otherwise.
1300 int tracing_snapshot_cond_enable(struct trace_array
*tr
, void *cond_data
,
1301 cond_update_fn_t update
)
1303 struct cond_snapshot
*cond_snapshot
;
1306 cond_snapshot
= kzalloc(sizeof(*cond_snapshot
), GFP_KERNEL
);
1310 cond_snapshot
->cond_data
= cond_data
;
1311 cond_snapshot
->update
= update
;
1313 mutex_lock(&trace_types_lock
);
1315 ret
= tracing_alloc_snapshot_instance(tr
);
1319 if (tr
->current_trace
->use_max_tr
) {
1325 * The cond_snapshot can only change to NULL without the
1326 * trace_types_lock. We don't care if we race with it going
1327 * to NULL, but we want to make sure that it's not set to
1328 * something other than NULL when we get here, which we can
1329 * do safely with only holding the trace_types_lock and not
1330 * having to take the max_lock.
1332 if (tr
->cond_snapshot
) {
1337 arch_spin_lock(&tr
->max_lock
);
1338 tr
->cond_snapshot
= cond_snapshot
;
1339 arch_spin_unlock(&tr
->max_lock
);
1341 mutex_unlock(&trace_types_lock
);
1346 mutex_unlock(&trace_types_lock
);
1347 kfree(cond_snapshot
);
1350 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable
);
1353 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1354 * @tr: The tracing instance
1356 * Check whether the conditional snapshot for the given instance is
1357 * enabled; if so, free the cond_snapshot associated with it,
1358 * otherwise return -EINVAL.
1360 * Returns 0 if successful, error otherwise.
1362 int tracing_snapshot_cond_disable(struct trace_array
*tr
)
1366 arch_spin_lock(&tr
->max_lock
);
1368 if (!tr
->cond_snapshot
)
1371 kfree(tr
->cond_snapshot
);
1372 tr
->cond_snapshot
= NULL
;
1375 arch_spin_unlock(&tr
->max_lock
);
1379 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable
);
1381 void tracing_snapshot(void)
1383 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1385 EXPORT_SYMBOL_GPL(tracing_snapshot
);
1386 void tracing_snapshot_cond(struct trace_array
*tr
, void *cond_data
)
1388 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1390 EXPORT_SYMBOL_GPL(tracing_snapshot_cond
);
1391 int tracing_alloc_snapshot(void)
1393 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1396 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1397 void tracing_snapshot_alloc(void)
1402 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1403 void *tracing_cond_snapshot_data(struct trace_array
*tr
)
1407 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data
);
1408 int tracing_snapshot_cond_enable(struct trace_array
*tr
, void *cond_data
, cond_update_fn_t update
)
1412 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable
);
1413 int tracing_snapshot_cond_disable(struct trace_array
*tr
)
1417 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable
);
1418 #endif /* CONFIG_TRACER_SNAPSHOT */
1420 void tracer_tracing_off(struct trace_array
*tr
)
1422 if (tr
->array_buffer
.buffer
)
1423 ring_buffer_record_off(tr
->array_buffer
.buffer
);
1425 * This flag is looked at when buffers haven't been allocated
1426 * yet, or by some tracers (like irqsoff), that just want to
1427 * know if the ring buffer has been disabled, but it can handle
1428 * races of where it gets disabled but we still do a record.
1429 * As the check is in the fast path of the tracers, it is more
1430 * important to be fast than accurate.
1432 tr
->buffer_disabled
= 1;
1433 /* Make the flag seen by readers */
1438 * tracing_off - turn off tracing buffers
1440 * This function stops the tracing buffers from recording data.
1441 * It does not disable any overhead the tracers themselves may
1442 * be causing. This function simply causes all recording to
1443 * the ring buffers to fail.
1445 void tracing_off(void)
1447 tracer_tracing_off(&global_trace
);
1449 EXPORT_SYMBOL_GPL(tracing_off
);
1451 void disable_trace_on_warning(void)
1453 if (__disable_trace_on_warning
) {
1454 trace_array_printk_buf(global_trace
.array_buffer
.buffer
, _THIS_IP_
,
1455 "Disabling tracing due to warning\n");
1461 * tracer_tracing_is_on - show real state of ring buffer enabled
1462 * @tr : the trace array to know if ring buffer is enabled
1464 * Shows real state of the ring buffer if it is enabled or not.
1466 bool tracer_tracing_is_on(struct trace_array
*tr
)
1468 if (tr
->array_buffer
.buffer
)
1469 return ring_buffer_record_is_on(tr
->array_buffer
.buffer
);
1470 return !tr
->buffer_disabled
;
1474 * tracing_is_on - show state of ring buffers enabled
1476 int tracing_is_on(void)
1478 return tracer_tracing_is_on(&global_trace
);
1480 EXPORT_SYMBOL_GPL(tracing_is_on
);
1482 static int __init
set_buf_size(char *str
)
1484 unsigned long buf_size
;
1488 buf_size
= memparse(str
, &str
);
1489 /* nr_entries can not be zero */
1492 trace_buf_size
= buf_size
;
1495 __setup("trace_buf_size=", set_buf_size
);
1497 static int __init
set_tracing_thresh(char *str
)
1499 unsigned long threshold
;
1504 ret
= kstrtoul(str
, 0, &threshold
);
1507 tracing_thresh
= threshold
* 1000;
1510 __setup("tracing_thresh=", set_tracing_thresh
);
1512 unsigned long nsecs_to_usecs(unsigned long nsecs
)
1514 return nsecs
/ 1000;
1518 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1519 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1520 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1521 * of strings in the order that the evals (enum) were defined.
1526 /* These must match the bit postions in trace_iterator_flags */
1527 static const char *trace_options
[] = {
1535 int in_ns
; /* is this clock in nanoseconds? */
1536 } trace_clocks
[] = {
1537 { trace_clock_local
, "local", 1 },
1538 { trace_clock_global
, "global", 1 },
1539 { trace_clock_counter
, "counter", 0 },
1540 { trace_clock_jiffies
, "uptime", 0 },
1541 { trace_clock
, "perf", 1 },
1542 { ktime_get_mono_fast_ns
, "mono", 1 },
1543 { ktime_get_raw_fast_ns
, "mono_raw", 1 },
1544 { ktime_get_boot_fast_ns
, "boot", 1 },
1548 bool trace_clock_in_ns(struct trace_array
*tr
)
1550 if (trace_clocks
[tr
->clock_id
].in_ns
)
1557 * trace_parser_get_init - gets the buffer for trace parser
1559 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
1561 memset(parser
, 0, sizeof(*parser
));
1563 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
1564 if (!parser
->buffer
)
1567 parser
->size
= size
;
1572 * trace_parser_put - frees the buffer for trace parser
1574 void trace_parser_put(struct trace_parser
*parser
)
1576 kfree(parser
->buffer
);
1577 parser
->buffer
= NULL
;
1581 * trace_get_user - reads the user input string separated by space
1582 * (matched by isspace(ch))
1584 * For each string found the 'struct trace_parser' is updated,
1585 * and the function returns.
1587 * Returns number of bytes read.
1589 * See kernel/trace/trace.h for 'struct trace_parser' details.
1591 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
1592 size_t cnt
, loff_t
*ppos
)
1599 trace_parser_clear(parser
);
1601 ret
= get_user(ch
, ubuf
++);
1609 * The parser is not finished with the last write,
1610 * continue reading the user input without skipping spaces.
1612 if (!parser
->cont
) {
1613 /* skip white space */
1614 while (cnt
&& isspace(ch
)) {
1615 ret
= get_user(ch
, ubuf
++);
1624 /* only spaces were written */
1625 if (isspace(ch
) || !ch
) {
1632 /* read the non-space input */
1633 while (cnt
&& !isspace(ch
) && ch
) {
1634 if (parser
->idx
< parser
->size
- 1)
1635 parser
->buffer
[parser
->idx
++] = ch
;
1640 ret
= get_user(ch
, ubuf
++);
1647 /* We either got finished input or we have to wait for another call. */
1648 if (isspace(ch
) || !ch
) {
1649 parser
->buffer
[parser
->idx
] = 0;
1650 parser
->cont
= false;
1651 } else if (parser
->idx
< parser
->size
- 1) {
1652 parser
->cont
= true;
1653 parser
->buffer
[parser
->idx
++] = ch
;
1654 /* Make sure the parsed string always terminates with '\0'. */
1655 parser
->buffer
[parser
->idx
] = 0;
1668 /* TODO add a seq_buf_to_buffer() */
1669 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
1673 if (trace_seq_used(s
) <= s
->seq
.readpos
)
1676 len
= trace_seq_used(s
) - s
->seq
.readpos
;
1679 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
1681 s
->seq
.readpos
+= cnt
;
1685 unsigned long __read_mostly tracing_thresh
;
1686 static const struct file_operations tracing_max_lat_fops
;
1688 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1689 defined(CONFIG_FSNOTIFY)
1691 static struct workqueue_struct
*fsnotify_wq
;
1693 static void latency_fsnotify_workfn(struct work_struct
*work
)
1695 struct trace_array
*tr
= container_of(work
, struct trace_array
,
1697 fsnotify_inode(tr
->d_max_latency
->d_inode
, FS_MODIFY
);
1700 static void latency_fsnotify_workfn_irq(struct irq_work
*iwork
)
1702 struct trace_array
*tr
= container_of(iwork
, struct trace_array
,
1704 queue_work(fsnotify_wq
, &tr
->fsnotify_work
);
1707 static void trace_create_maxlat_file(struct trace_array
*tr
,
1708 struct dentry
*d_tracer
)
1710 INIT_WORK(&tr
->fsnotify_work
, latency_fsnotify_workfn
);
1711 init_irq_work(&tr
->fsnotify_irqwork
, latency_fsnotify_workfn_irq
);
1712 tr
->d_max_latency
= trace_create_file("tracing_max_latency", 0644,
1713 d_tracer
, &tr
->max_latency
,
1714 &tracing_max_lat_fops
);
1717 __init
static int latency_fsnotify_init(void)
1719 fsnotify_wq
= alloc_workqueue("tr_max_lat_wq",
1720 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
1722 pr_err("Unable to allocate tr_max_lat_wq\n");
1728 late_initcall_sync(latency_fsnotify_init
);
1730 void latency_fsnotify(struct trace_array
*tr
)
1735 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1736 * possible that we are called from __schedule() or do_idle(), which
1737 * could cause a deadlock.
1739 irq_work_queue(&tr
->fsnotify_irqwork
);
1743 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1744 * defined(CONFIG_FSNOTIFY)
1748 #define trace_create_maxlat_file(tr, d_tracer) \
1749 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1750 &tr->max_latency, &tracing_max_lat_fops)
1754 #ifdef CONFIG_TRACER_MAX_TRACE
1756 * Copy the new maximum trace into the separate maximum-trace
1757 * structure. (this way the maximum trace is permanently saved,
1758 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1761 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1763 struct array_buffer
*trace_buf
= &tr
->array_buffer
;
1764 struct array_buffer
*max_buf
= &tr
->max_buffer
;
1765 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
1766 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
1769 max_buf
->time_start
= data
->preempt_timestamp
;
1771 max_data
->saved_latency
= tr
->max_latency
;
1772 max_data
->critical_start
= data
->critical_start
;
1773 max_data
->critical_end
= data
->critical_end
;
1775 strncpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
1776 max_data
->pid
= tsk
->pid
;
1778 * If tsk == current, then use current_uid(), as that does not use
1779 * RCU. The irq tracer can be called out of RCU scope.
1782 max_data
->uid
= current_uid();
1784 max_data
->uid
= task_uid(tsk
);
1786 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1787 max_data
->policy
= tsk
->policy
;
1788 max_data
->rt_priority
= tsk
->rt_priority
;
1790 /* record this tasks comm */
1791 tracing_record_cmdline(tsk
);
1792 latency_fsnotify(tr
);
1796 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1798 * @tsk: the task with the latency
1799 * @cpu: The cpu that initiated the trace.
1800 * @cond_data: User data associated with a conditional snapshot
1802 * Flip the buffers between the @tr and the max_tr and record information
1803 * about which task was the cause of this latency.
1806 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
,
1812 WARN_ON_ONCE(!irqs_disabled());
1814 if (!tr
->allocated_snapshot
) {
1815 /* Only the nop tracer should hit this when disabling */
1816 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1820 arch_spin_lock(&tr
->max_lock
);
1822 /* Inherit the recordable setting from array_buffer */
1823 if (ring_buffer_record_is_set_on(tr
->array_buffer
.buffer
))
1824 ring_buffer_record_on(tr
->max_buffer
.buffer
);
1826 ring_buffer_record_off(tr
->max_buffer
.buffer
);
1828 #ifdef CONFIG_TRACER_SNAPSHOT
1829 if (tr
->cond_snapshot
&& !tr
->cond_snapshot
->update(tr
, cond_data
))
1832 swap(tr
->array_buffer
.buffer
, tr
->max_buffer
.buffer
);
1834 __update_max_tr(tr
, tsk
, cpu
);
1837 arch_spin_unlock(&tr
->max_lock
);
1841 * update_max_tr_single - only copy one trace over, and reset the rest
1843 * @tsk: task with the latency
1844 * @cpu: the cpu of the buffer to copy.
1846 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1849 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1856 WARN_ON_ONCE(!irqs_disabled());
1857 if (!tr
->allocated_snapshot
) {
1858 /* Only the nop tracer should hit this when disabling */
1859 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1863 arch_spin_lock(&tr
->max_lock
);
1865 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->array_buffer
.buffer
, cpu
);
1867 if (ret
== -EBUSY
) {
1869 * We failed to swap the buffer due to a commit taking
1870 * place on this CPU. We fail to record, but we reset
1871 * the max trace buffer (no one writes directly to it)
1872 * and flag that it failed.
1874 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1875 "Failed to swap buffers due to commit in progress\n");
1878 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1880 __update_max_tr(tr
, tsk
, cpu
);
1881 arch_spin_unlock(&tr
->max_lock
);
1883 #endif /* CONFIG_TRACER_MAX_TRACE */
1885 static int wait_on_pipe(struct trace_iterator
*iter
, int full
)
1887 /* Iterators are static, they should be filled or empty */
1888 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1891 return ring_buffer_wait(iter
->array_buffer
->buffer
, iter
->cpu_file
,
1895 #ifdef CONFIG_FTRACE_STARTUP_TEST
1896 static bool selftests_can_run
;
1898 struct trace_selftests
{
1899 struct list_head list
;
1900 struct tracer
*type
;
1903 static LIST_HEAD(postponed_selftests
);
1905 static int save_selftest(struct tracer
*type
)
1907 struct trace_selftests
*selftest
;
1909 selftest
= kmalloc(sizeof(*selftest
), GFP_KERNEL
);
1913 selftest
->type
= type
;
1914 list_add(&selftest
->list
, &postponed_selftests
);
1918 static int run_tracer_selftest(struct tracer
*type
)
1920 struct trace_array
*tr
= &global_trace
;
1921 struct tracer
*saved_tracer
= tr
->current_trace
;
1924 if (!type
->selftest
|| tracing_selftest_disabled
)
1928 * If a tracer registers early in boot up (before scheduling is
1929 * initialized and such), then do not run its selftests yet.
1930 * Instead, run it a little later in the boot process.
1932 if (!selftests_can_run
)
1933 return save_selftest(type
);
1936 * Run a selftest on this tracer.
1937 * Here we reset the trace buffer, and set the current
1938 * tracer to be this tracer. The tracer can then run some
1939 * internal tracing to verify that everything is in order.
1940 * If we fail, we do not register this tracer.
1942 tracing_reset_online_cpus(&tr
->array_buffer
);
1944 tr
->current_trace
= type
;
1946 #ifdef CONFIG_TRACER_MAX_TRACE
1947 if (type
->use_max_tr
) {
1948 /* If we expanded the buffers, make sure the max is expanded too */
1949 if (ring_buffer_expanded
)
1950 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1951 RING_BUFFER_ALL_CPUS
);
1952 tr
->allocated_snapshot
= true;
1956 /* the test is responsible for initializing and enabling */
1957 pr_info("Testing tracer %s: ", type
->name
);
1958 ret
= type
->selftest(type
, tr
);
1959 /* the test is responsible for resetting too */
1960 tr
->current_trace
= saved_tracer
;
1962 printk(KERN_CONT
"FAILED!\n");
1963 /* Add the warning after printing 'FAILED' */
1967 /* Only reset on passing, to avoid touching corrupted buffers */
1968 tracing_reset_online_cpus(&tr
->array_buffer
);
1970 #ifdef CONFIG_TRACER_MAX_TRACE
1971 if (type
->use_max_tr
) {
1972 tr
->allocated_snapshot
= false;
1974 /* Shrink the max buffer again */
1975 if (ring_buffer_expanded
)
1976 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1977 RING_BUFFER_ALL_CPUS
);
1981 printk(KERN_CONT
"PASSED\n");
1985 static __init
int init_trace_selftests(void)
1987 struct trace_selftests
*p
, *n
;
1988 struct tracer
*t
, **last
;
1991 selftests_can_run
= true;
1993 mutex_lock(&trace_types_lock
);
1995 if (list_empty(&postponed_selftests
))
1998 pr_info("Running postponed tracer tests:\n");
2000 tracing_selftest_running
= true;
2001 list_for_each_entry_safe(p
, n
, &postponed_selftests
, list
) {
2002 /* This loop can take minutes when sanitizers are enabled, so
2003 * lets make sure we allow RCU processing.
2006 ret
= run_tracer_selftest(p
->type
);
2007 /* If the test fails, then warn and remove from available_tracers */
2009 WARN(1, "tracer: %s failed selftest, disabling\n",
2011 last
= &trace_types
;
2012 for (t
= trace_types
; t
; t
= t
->next
) {
2023 tracing_selftest_running
= false;
2026 mutex_unlock(&trace_types_lock
);
2030 core_initcall(init_trace_selftests
);
2032 static inline int run_tracer_selftest(struct tracer
*type
)
2036 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2038 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
);
2040 static void __init
apply_trace_boot_options(void);
2043 * register_tracer - register a tracer with the ftrace system.
2044 * @type: the plugin for the tracer
2046 * Register a new plugin tracer.
2048 int __init
register_tracer(struct tracer
*type
)
2054 pr_info("Tracer must have a name\n");
2058 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
2059 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
2063 if (security_locked_down(LOCKDOWN_TRACEFS
)) {
2064 pr_warn("Can not register tracer %s due to lockdown\n",
2069 mutex_lock(&trace_types_lock
);
2071 tracing_selftest_running
= true;
2073 for (t
= trace_types
; t
; t
= t
->next
) {
2074 if (strcmp(type
->name
, t
->name
) == 0) {
2076 pr_info("Tracer %s already registered\n",
2083 if (!type
->set_flag
)
2084 type
->set_flag
= &dummy_set_flag
;
2086 /*allocate a dummy tracer_flags*/
2087 type
->flags
= kmalloc(sizeof(*type
->flags
), GFP_KERNEL
);
2092 type
->flags
->val
= 0;
2093 type
->flags
->opts
= dummy_tracer_opt
;
2095 if (!type
->flags
->opts
)
2096 type
->flags
->opts
= dummy_tracer_opt
;
2098 /* store the tracer for __set_tracer_option */
2099 type
->flags
->trace
= type
;
2101 ret
= run_tracer_selftest(type
);
2105 type
->next
= trace_types
;
2107 add_tracer_options(&global_trace
, type
);
2110 tracing_selftest_running
= false;
2111 mutex_unlock(&trace_types_lock
);
2113 if (ret
|| !default_bootup_tracer
)
2116 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
2119 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
2120 /* Do we want this tracer to start on bootup? */
2121 tracing_set_tracer(&global_trace
, type
->name
);
2122 default_bootup_tracer
= NULL
;
2124 apply_trace_boot_options();
2126 /* disable other selftests, since this will break it. */
2127 disable_tracing_selftest("running a tracer");
2133 static void tracing_reset_cpu(struct array_buffer
*buf
, int cpu
)
2135 struct trace_buffer
*buffer
= buf
->buffer
;
2140 ring_buffer_record_disable(buffer
);
2142 /* Make sure all commits have finished */
2144 ring_buffer_reset_cpu(buffer
, cpu
);
2146 ring_buffer_record_enable(buffer
);
2149 void tracing_reset_online_cpus(struct array_buffer
*buf
)
2151 struct trace_buffer
*buffer
= buf
->buffer
;
2156 ring_buffer_record_disable(buffer
);
2158 /* Make sure all commits have finished */
2161 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
2163 ring_buffer_reset_online_cpus(buffer
);
2165 ring_buffer_record_enable(buffer
);
2168 /* Must have trace_types_lock held */
2169 void tracing_reset_all_online_cpus(void)
2171 struct trace_array
*tr
;
2173 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
2174 if (!tr
->clear_trace
)
2176 tr
->clear_trace
= false;
2177 tracing_reset_online_cpus(&tr
->array_buffer
);
2178 #ifdef CONFIG_TRACER_MAX_TRACE
2179 tracing_reset_online_cpus(&tr
->max_buffer
);
2185 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2186 * is the tgid last observed corresponding to pid=i.
2188 static int *tgid_map
;
2190 /* The maximum valid index into tgid_map. */
2191 static size_t tgid_map_max
;
2193 #define SAVED_CMDLINES_DEFAULT 128
2194 #define NO_CMDLINE_MAP UINT_MAX
2195 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
2196 struct saved_cmdlines_buffer
{
2197 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
2198 unsigned *map_cmdline_to_pid
;
2199 unsigned cmdline_num
;
2201 char *saved_cmdlines
;
2203 static struct saved_cmdlines_buffer
*savedcmd
;
2205 static inline char *get_saved_cmdlines(int idx
)
2207 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
2210 static inline void set_cmdline(int idx
, const char *cmdline
)
2212 strncpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
2215 static int allocate_cmdlines_buffer(unsigned int val
,
2216 struct saved_cmdlines_buffer
*s
)
2218 s
->map_cmdline_to_pid
= kmalloc_array(val
,
2219 sizeof(*s
->map_cmdline_to_pid
),
2221 if (!s
->map_cmdline_to_pid
)
2224 s
->saved_cmdlines
= kmalloc_array(TASK_COMM_LEN
, val
, GFP_KERNEL
);
2225 if (!s
->saved_cmdlines
) {
2226 kfree(s
->map_cmdline_to_pid
);
2231 s
->cmdline_num
= val
;
2232 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
2233 sizeof(s
->map_pid_to_cmdline
));
2234 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
2235 val
* sizeof(*s
->map_cmdline_to_pid
));
2240 static int trace_create_savedcmd(void)
2244 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
2248 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
2258 int is_tracing_stopped(void)
2260 return global_trace
.stop_count
;
2264 * tracing_start - quick start of the tracer
2266 * If tracing is enabled but was stopped by tracing_stop,
2267 * this will start the tracer back up.
2269 void tracing_start(void)
2271 struct trace_buffer
*buffer
;
2272 unsigned long flags
;
2274 if (tracing_disabled
)
2277 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
2278 if (--global_trace
.stop_count
) {
2279 if (global_trace
.stop_count
< 0) {
2280 /* Someone screwed up their debugging */
2282 global_trace
.stop_count
= 0;
2287 /* Prevent the buffers from switching */
2288 arch_spin_lock(&global_trace
.max_lock
);
2290 buffer
= global_trace
.array_buffer
.buffer
;
2292 ring_buffer_record_enable(buffer
);
2294 #ifdef CONFIG_TRACER_MAX_TRACE
2295 buffer
= global_trace
.max_buffer
.buffer
;
2297 ring_buffer_record_enable(buffer
);
2300 arch_spin_unlock(&global_trace
.max_lock
);
2303 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
2306 static void tracing_start_tr(struct trace_array
*tr
)
2308 struct trace_buffer
*buffer
;
2309 unsigned long flags
;
2311 if (tracing_disabled
)
2314 /* If global, we need to also start the max tracer */
2315 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
2316 return tracing_start();
2318 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
2320 if (--tr
->stop_count
) {
2321 if (tr
->stop_count
< 0) {
2322 /* Someone screwed up their debugging */
2329 buffer
= tr
->array_buffer
.buffer
;
2331 ring_buffer_record_enable(buffer
);
2334 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
2338 * tracing_stop - quick stop of the tracer
2340 * Light weight way to stop tracing. Use in conjunction with
2343 void tracing_stop(void)
2345 struct trace_buffer
*buffer
;
2346 unsigned long flags
;
2348 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
2349 if (global_trace
.stop_count
++)
2352 /* Prevent the buffers from switching */
2353 arch_spin_lock(&global_trace
.max_lock
);
2355 buffer
= global_trace
.array_buffer
.buffer
;
2357 ring_buffer_record_disable(buffer
);
2359 #ifdef CONFIG_TRACER_MAX_TRACE
2360 buffer
= global_trace
.max_buffer
.buffer
;
2362 ring_buffer_record_disable(buffer
);
2365 arch_spin_unlock(&global_trace
.max_lock
);
2368 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
2371 static void tracing_stop_tr(struct trace_array
*tr
)
2373 struct trace_buffer
*buffer
;
2374 unsigned long flags
;
2376 /* If global, we need to also stop the max tracer */
2377 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
2378 return tracing_stop();
2380 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
2381 if (tr
->stop_count
++)
2384 buffer
= tr
->array_buffer
.buffer
;
2386 ring_buffer_record_disable(buffer
);
2389 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
2392 static int trace_save_cmdline(struct task_struct
*tsk
)
2396 /* treat recording of idle task as a success */
2400 tpid
= tsk
->pid
& (PID_MAX_DEFAULT
- 1);
2403 * It's not the end of the world if we don't get
2404 * the lock, but we also don't want to spin
2405 * nor do we want to disable interrupts,
2406 * so if we miss here, then better luck next time.
2408 if (!arch_spin_trylock(&trace_cmdline_lock
))
2411 idx
= savedcmd
->map_pid_to_cmdline
[tpid
];
2412 if (idx
== NO_CMDLINE_MAP
) {
2413 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
2415 savedcmd
->map_pid_to_cmdline
[tpid
] = idx
;
2416 savedcmd
->cmdline_idx
= idx
;
2419 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
2420 set_cmdline(idx
, tsk
->comm
);
2422 arch_spin_unlock(&trace_cmdline_lock
);
2427 static void __trace_find_cmdline(int pid
, char comm
[])
2433 strcpy(comm
, "<idle>");
2437 if (WARN_ON_ONCE(pid
< 0)) {
2438 strcpy(comm
, "<XXX>");
2442 tpid
= pid
& (PID_MAX_DEFAULT
- 1);
2443 map
= savedcmd
->map_pid_to_cmdline
[tpid
];
2444 if (map
!= NO_CMDLINE_MAP
) {
2445 tpid
= savedcmd
->map_cmdline_to_pid
[map
];
2447 strlcpy(comm
, get_saved_cmdlines(map
), TASK_COMM_LEN
);
2451 strcpy(comm
, "<...>");
2454 void trace_find_cmdline(int pid
, char comm
[])
2457 arch_spin_lock(&trace_cmdline_lock
);
2459 __trace_find_cmdline(pid
, comm
);
2461 arch_spin_unlock(&trace_cmdline_lock
);
2465 static int *trace_find_tgid_ptr(int pid
)
2468 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2469 * if we observe a non-NULL tgid_map then we also observe the correct
2472 int *map
= smp_load_acquire(&tgid_map
);
2474 if (unlikely(!map
|| pid
> tgid_map_max
))
2480 int trace_find_tgid(int pid
)
2482 int *ptr
= trace_find_tgid_ptr(pid
);
2484 return ptr
? *ptr
: 0;
2487 static int trace_save_tgid(struct task_struct
*tsk
)
2491 /* treat recording of idle task as a success */
2495 ptr
= trace_find_tgid_ptr(tsk
->pid
);
2503 static bool tracing_record_taskinfo_skip(int flags
)
2505 if (unlikely(!(flags
& (TRACE_RECORD_CMDLINE
| TRACE_RECORD_TGID
))))
2507 if (!__this_cpu_read(trace_taskinfo_save
))
2513 * tracing_record_taskinfo - record the task info of a task
2515 * @task: task to record
2516 * @flags: TRACE_RECORD_CMDLINE for recording comm
2517 * TRACE_RECORD_TGID for recording tgid
2519 void tracing_record_taskinfo(struct task_struct
*task
, int flags
)
2523 if (tracing_record_taskinfo_skip(flags
))
2527 * Record as much task information as possible. If some fail, continue
2528 * to try to record the others.
2530 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(task
);
2531 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(task
);
2533 /* If recording any information failed, retry again soon. */
2537 __this_cpu_write(trace_taskinfo_save
, false);
2541 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2543 * @prev: previous task during sched_switch
2544 * @next: next task during sched_switch
2545 * @flags: TRACE_RECORD_CMDLINE for recording comm
2546 * TRACE_RECORD_TGID for recording tgid
2548 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
2549 struct task_struct
*next
, int flags
)
2553 if (tracing_record_taskinfo_skip(flags
))
2557 * Record as much task information as possible. If some fail, continue
2558 * to try to record the others.
2560 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(prev
);
2561 done
&= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(next
);
2562 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(prev
);
2563 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(next
);
2565 /* If recording any information failed, retry again soon. */
2569 __this_cpu_write(trace_taskinfo_save
, false);
2572 /* Helpers to record a specific task information */
2573 void tracing_record_cmdline(struct task_struct
*task
)
2575 tracing_record_taskinfo(task
, TRACE_RECORD_CMDLINE
);
2578 void tracing_record_tgid(struct task_struct
*task
)
2580 tracing_record_taskinfo(task
, TRACE_RECORD_TGID
);
2584 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2585 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2586 * simplifies those functions and keeps them in sync.
2588 enum print_line_t
trace_handle_return(struct trace_seq
*s
)
2590 return trace_seq_has_overflowed(s
) ?
2591 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
2593 EXPORT_SYMBOL_GPL(trace_handle_return
);
2596 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned short type
,
2597 unsigned long flags
, int pc
)
2599 struct task_struct
*tsk
= current
;
2601 entry
->preempt_count
= pc
& 0xff;
2602 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
2605 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2606 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
2608 TRACE_FLAG_IRQS_NOSUPPORT
|
2610 ((pc
& NMI_MASK
) ? TRACE_FLAG_NMI
: 0) |
2611 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
2612 ((pc
& SOFTIRQ_OFFSET
) ? TRACE_FLAG_SOFTIRQ
: 0) |
2613 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
2614 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
2616 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
2618 struct ring_buffer_event
*
2619 trace_buffer_lock_reserve(struct trace_buffer
*buffer
,
2622 unsigned long flags
, int pc
)
2624 return __trace_buffer_lock_reserve(buffer
, type
, len
, flags
, pc
);
2627 DEFINE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
2628 DEFINE_PER_CPU(int, trace_buffered_event_cnt
);
2629 static int trace_buffered_event_ref
;
2632 * trace_buffered_event_enable - enable buffering events
2634 * When events are being filtered, it is quicker to use a temporary
2635 * buffer to write the event data into if there's a likely chance
2636 * that it will not be committed. The discard of the ring buffer
2637 * is not as fast as committing, and is much slower than copying
2640 * When an event is to be filtered, allocate per cpu buffers to
2641 * write the event data into, and if the event is filtered and discarded
2642 * it is simply dropped, otherwise, the entire data is to be committed
2645 void trace_buffered_event_enable(void)
2647 struct ring_buffer_event
*event
;
2651 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2653 if (trace_buffered_event_ref
++)
2656 for_each_tracing_cpu(cpu
) {
2657 page
= alloc_pages_node(cpu_to_node(cpu
),
2658 GFP_KERNEL
| __GFP_NORETRY
, 0);
2662 event
= page_address(page
);
2663 memset(event
, 0, sizeof(*event
));
2665 per_cpu(trace_buffered_event
, cpu
) = event
;
2668 if (cpu
== smp_processor_id() &&
2669 __this_cpu_read(trace_buffered_event
) !=
2670 per_cpu(trace_buffered_event
, cpu
))
2677 trace_buffered_event_disable();
2680 static void enable_trace_buffered_event(void *data
)
2682 /* Probably not needed, but do it anyway */
2684 this_cpu_dec(trace_buffered_event_cnt
);
2687 static void disable_trace_buffered_event(void *data
)
2689 this_cpu_inc(trace_buffered_event_cnt
);
2693 * trace_buffered_event_disable - disable buffering events
2695 * When a filter is removed, it is faster to not use the buffered
2696 * events, and to commit directly into the ring buffer. Free up
2697 * the temp buffers when there are no more users. This requires
2698 * special synchronization with current events.
2700 void trace_buffered_event_disable(void)
2704 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2706 if (WARN_ON_ONCE(!trace_buffered_event_ref
))
2709 if (--trace_buffered_event_ref
)
2713 /* For each CPU, set the buffer as used. */
2714 smp_call_function_many(tracing_buffer_mask
,
2715 disable_trace_buffered_event
, NULL
, 1);
2718 /* Wait for all current users to finish */
2721 for_each_tracing_cpu(cpu
) {
2722 free_page((unsigned long)per_cpu(trace_buffered_event
, cpu
));
2723 per_cpu(trace_buffered_event
, cpu
) = NULL
;
2726 * Make sure trace_buffered_event is NULL before clearing
2727 * trace_buffered_event_cnt.
2732 /* Do the work on each cpu */
2733 smp_call_function_many(tracing_buffer_mask
,
2734 enable_trace_buffered_event
, NULL
, 1);
2738 static struct trace_buffer
*temp_buffer
;
2740 struct ring_buffer_event
*
2741 trace_event_buffer_lock_reserve(struct trace_buffer
**current_rb
,
2742 struct trace_event_file
*trace_file
,
2743 int type
, unsigned long len
,
2744 unsigned long flags
, int pc
)
2746 struct ring_buffer_event
*entry
;
2749 *current_rb
= trace_file
->tr
->array_buffer
.buffer
;
2751 if (!ring_buffer_time_stamp_abs(*current_rb
) && (trace_file
->flags
&
2752 (EVENT_FILE_FL_SOFT_DISABLED
| EVENT_FILE_FL_FILTERED
)) &&
2753 (entry
= this_cpu_read(trace_buffered_event
))) {
2754 /* Try to use the per cpu buffer first */
2755 val
= this_cpu_inc_return(trace_buffered_event_cnt
);
2756 if ((len
< (PAGE_SIZE
- sizeof(*entry
) - sizeof(entry
->array
[0]))) && val
== 1) {
2757 trace_event_setup(entry
, type
, flags
, pc
);
2758 entry
->array
[0] = len
;
2761 this_cpu_dec(trace_buffered_event_cnt
);
2764 entry
= __trace_buffer_lock_reserve(*current_rb
,
2765 type
, len
, flags
, pc
);
2767 * If tracing is off, but we have triggers enabled
2768 * we still need to look at the event data. Use the temp_buffer
2769 * to store the trace event for the trigger to use. It's recursive
2770 * safe and will not be recorded anywhere.
2772 if (!entry
&& trace_file
->flags
& EVENT_FILE_FL_TRIGGER_COND
) {
2773 *current_rb
= temp_buffer
;
2774 entry
= __trace_buffer_lock_reserve(*current_rb
,
2775 type
, len
, flags
, pc
);
2779 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
2781 static DEFINE_SPINLOCK(tracepoint_iter_lock
);
2782 static DEFINE_MUTEX(tracepoint_printk_mutex
);
2784 static void output_printk(struct trace_event_buffer
*fbuffer
)
2786 struct trace_event_call
*event_call
;
2787 struct trace_event_file
*file
;
2788 struct trace_event
*event
;
2789 unsigned long flags
;
2790 struct trace_iterator
*iter
= tracepoint_print_iter
;
2792 /* We should never get here if iter is NULL */
2793 if (WARN_ON_ONCE(!iter
))
2796 event_call
= fbuffer
->trace_file
->event_call
;
2797 if (!event_call
|| !event_call
->event
.funcs
||
2798 !event_call
->event
.funcs
->trace
)
2801 file
= fbuffer
->trace_file
;
2802 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &file
->flags
) ||
2803 (unlikely(file
->flags
& EVENT_FILE_FL_FILTERED
) &&
2804 !filter_match_preds(file
->filter
, fbuffer
->entry
)))
2807 event
= &fbuffer
->trace_file
->event_call
->event
;
2809 spin_lock_irqsave(&tracepoint_iter_lock
, flags
);
2810 trace_seq_init(&iter
->seq
);
2811 iter
->ent
= fbuffer
->entry
;
2812 event_call
->event
.funcs
->trace(iter
, 0, event
);
2813 trace_seq_putc(&iter
->seq
, 0);
2814 printk("%s", iter
->seq
.buffer
);
2816 spin_unlock_irqrestore(&tracepoint_iter_lock
, flags
);
2819 int tracepoint_printk_sysctl(struct ctl_table
*table
, int write
,
2820 void *buffer
, size_t *lenp
,
2823 int save_tracepoint_printk
;
2826 mutex_lock(&tracepoint_printk_mutex
);
2827 save_tracepoint_printk
= tracepoint_printk
;
2829 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
2832 * This will force exiting early, as tracepoint_printk
2833 * is always zero when tracepoint_printk_iter is not allocated
2835 if (!tracepoint_print_iter
)
2836 tracepoint_printk
= 0;
2838 if (save_tracepoint_printk
== tracepoint_printk
)
2841 if (tracepoint_printk
)
2842 static_key_enable(&tracepoint_printk_key
.key
);
2844 static_key_disable(&tracepoint_printk_key
.key
);
2847 mutex_unlock(&tracepoint_printk_mutex
);
2852 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
)
2854 if (static_key_false(&tracepoint_printk_key
.key
))
2855 output_printk(fbuffer
);
2857 if (static_branch_unlikely(&trace_event_exports_enabled
))
2858 ftrace_exports(fbuffer
->event
, TRACE_EXPORT_EVENT
);
2859 event_trigger_unlock_commit_regs(fbuffer
->trace_file
, fbuffer
->buffer
,
2860 fbuffer
->event
, fbuffer
->entry
,
2861 fbuffer
->flags
, fbuffer
->pc
, fbuffer
->regs
);
2863 EXPORT_SYMBOL_GPL(trace_event_buffer_commit
);
2868 * trace_buffer_unlock_commit_regs()
2869 * trace_event_buffer_commit()
2870 * trace_event_raw_event_xxx()
2872 # define STACK_SKIP 3
2874 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
2875 struct trace_buffer
*buffer
,
2876 struct ring_buffer_event
*event
,
2877 unsigned long flags
, int pc
,
2878 struct pt_regs
*regs
)
2880 __buffer_unlock_commit(buffer
, event
);
2883 * If regs is not set, then skip the necessary functions.
2884 * Note, we can still get here via blktrace, wakeup tracer
2885 * and mmiotrace, but that's ok if they lose a function or
2886 * two. They are not that meaningful.
2888 ftrace_trace_stack(tr
, buffer
, flags
, regs
? 0 : STACK_SKIP
, pc
, regs
);
2889 ftrace_trace_userstack(tr
, buffer
, flags
, pc
);
2893 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2896 trace_buffer_unlock_commit_nostack(struct trace_buffer
*buffer
,
2897 struct ring_buffer_event
*event
)
2899 __buffer_unlock_commit(buffer
, event
);
2903 trace_function(struct trace_array
*tr
,
2904 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
2907 struct trace_event_call
*call
= &event_function
;
2908 struct trace_buffer
*buffer
= tr
->array_buffer
.buffer
;
2909 struct ring_buffer_event
*event
;
2910 struct ftrace_entry
*entry
;
2912 event
= __trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
2916 entry
= ring_buffer_event_data(event
);
2918 entry
->parent_ip
= parent_ip
;
2920 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2921 if (static_branch_unlikely(&trace_function_exports_enabled
))
2922 ftrace_exports(event
, TRACE_EXPORT_FUNCTION
);
2923 __buffer_unlock_commit(buffer
, event
);
2927 #ifdef CONFIG_STACKTRACE
2929 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2930 #define FTRACE_KSTACK_NESTING 4
2932 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2934 struct ftrace_stack
{
2935 unsigned long calls
[FTRACE_KSTACK_ENTRIES
];
2939 struct ftrace_stacks
{
2940 struct ftrace_stack stacks
[FTRACE_KSTACK_NESTING
];
2943 static DEFINE_PER_CPU(struct ftrace_stacks
, ftrace_stacks
);
2944 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
2946 static void __ftrace_trace_stack(struct trace_buffer
*buffer
,
2947 unsigned long flags
,
2948 int skip
, int pc
, struct pt_regs
*regs
)
2950 struct trace_event_call
*call
= &event_kernel_stack
;
2951 struct ring_buffer_event
*event
;
2952 unsigned int size
, nr_entries
;
2953 struct ftrace_stack
*fstack
;
2954 struct stack_entry
*entry
;
2958 * Add one, for this function and the call to save_stack_trace()
2959 * If regs is set, then these functions will not be in the way.
2961 #ifndef CONFIG_UNWINDER_ORC
2966 preempt_disable_notrace();
2968 stackidx
= __this_cpu_inc_return(ftrace_stack_reserve
) - 1;
2970 /* This should never happen. If it does, yell once and skip */
2971 if (WARN_ON_ONCE(stackidx
>= FTRACE_KSTACK_NESTING
))
2975 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2976 * interrupt will either see the value pre increment or post
2977 * increment. If the interrupt happens pre increment it will have
2978 * restored the counter when it returns. We just need a barrier to
2979 * keep gcc from moving things around.
2983 fstack
= this_cpu_ptr(ftrace_stacks
.stacks
) + stackidx
;
2984 size
= ARRAY_SIZE(fstack
->calls
);
2987 nr_entries
= stack_trace_save_regs(regs
, fstack
->calls
,
2990 nr_entries
= stack_trace_save(fstack
->calls
, size
, skip
);
2993 size
= nr_entries
* sizeof(unsigned long);
2994 event
= __trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
2995 (sizeof(*entry
) - sizeof(entry
->caller
)) + size
,
2999 entry
= ring_buffer_event_data(event
);
3001 memcpy(&entry
->caller
, fstack
->calls
, size
);
3002 entry
->size
= nr_entries
;
3004 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
3005 __buffer_unlock_commit(buffer
, event
);
3008 /* Again, don't let gcc optimize things here */
3010 __this_cpu_dec(ftrace_stack_reserve
);
3011 preempt_enable_notrace();
3015 static inline void ftrace_trace_stack(struct trace_array
*tr
,
3016 struct trace_buffer
*buffer
,
3017 unsigned long flags
,
3018 int skip
, int pc
, struct pt_regs
*regs
)
3020 if (!(tr
->trace_flags
& TRACE_ITER_STACKTRACE
))
3023 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
3026 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
3029 struct trace_buffer
*buffer
= tr
->array_buffer
.buffer
;
3031 if (rcu_is_watching()) {
3032 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
3037 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3038 * but if the above rcu_is_watching() failed, then the NMI
3039 * triggered someplace critical, and rcu_irq_enter() should
3040 * not be called from NMI.
3042 if (unlikely(in_nmi()))
3045 rcu_irq_enter_irqson();
3046 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
3047 rcu_irq_exit_irqson();
3051 * trace_dump_stack - record a stack back trace in the trace buffer
3052 * @skip: Number of functions to skip (helper handlers)
3054 void trace_dump_stack(int skip
)
3056 unsigned long flags
;
3058 if (tracing_disabled
|| tracing_selftest_running
)
3061 local_save_flags(flags
);
3063 #ifndef CONFIG_UNWINDER_ORC
3064 /* Skip 1 to skip this function. */
3067 __ftrace_trace_stack(global_trace
.array_buffer
.buffer
,
3068 flags
, skip
, preempt_count(), NULL
);
3070 EXPORT_SYMBOL_GPL(trace_dump_stack
);
3072 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3073 static DEFINE_PER_CPU(int, user_stack_count
);
3076 ftrace_trace_userstack(struct trace_array
*tr
,
3077 struct trace_buffer
*buffer
, unsigned long flags
, int pc
)
3079 struct trace_event_call
*call
= &event_user_stack
;
3080 struct ring_buffer_event
*event
;
3081 struct userstack_entry
*entry
;
3083 if (!(tr
->trace_flags
& TRACE_ITER_USERSTACKTRACE
))
3087 * NMIs can not handle page faults, even with fix ups.
3088 * The save user stack can (and often does) fault.
3090 if (unlikely(in_nmi()))
3094 * prevent recursion, since the user stack tracing may
3095 * trigger other kernel events.
3098 if (__this_cpu_read(user_stack_count
))
3101 __this_cpu_inc(user_stack_count
);
3103 event
= __trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
3104 sizeof(*entry
), flags
, pc
);
3106 goto out_drop_count
;
3107 entry
= ring_buffer_event_data(event
);
3109 entry
->tgid
= current
->tgid
;
3110 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
3112 stack_trace_save_user(entry
->caller
, FTRACE_STACK_ENTRIES
);
3113 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
3114 __buffer_unlock_commit(buffer
, event
);
3117 __this_cpu_dec(user_stack_count
);
3121 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3122 static void ftrace_trace_userstack(struct trace_array
*tr
,
3123 struct trace_buffer
*buffer
,
3124 unsigned long flags
, int pc
)
3127 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3129 #endif /* CONFIG_STACKTRACE */
3131 /* created for use with alloc_percpu */
3132 struct trace_buffer_struct
{
3134 char buffer
[4][TRACE_BUF_SIZE
];
3137 static struct trace_buffer_struct
*trace_percpu_buffer
;
3140 * This allows for lockless recording. If we're nested too deeply, then
3141 * this returns NULL.
3143 static char *get_trace_buf(void)
3145 struct trace_buffer_struct
*buffer
= this_cpu_ptr(trace_percpu_buffer
);
3147 if (!buffer
|| buffer
->nesting
>= 4)
3152 /* Interrupts must see nesting incremented before we use the buffer */
3154 return &buffer
->buffer
[buffer
->nesting
- 1][0];
3157 static void put_trace_buf(void)
3159 /* Don't let the decrement of nesting leak before this */
3161 this_cpu_dec(trace_percpu_buffer
->nesting
);
3164 static int alloc_percpu_trace_buffer(void)
3166 struct trace_buffer_struct
*buffers
;
3168 if (trace_percpu_buffer
)
3171 buffers
= alloc_percpu(struct trace_buffer_struct
);
3172 if (MEM_FAIL(!buffers
, "Could not allocate percpu trace_printk buffer"))
3175 trace_percpu_buffer
= buffers
;
3179 static int buffers_allocated
;
3181 void trace_printk_init_buffers(void)
3183 if (buffers_allocated
)
3186 if (alloc_percpu_trace_buffer())
3189 /* trace_printk() is for debug use only. Don't use it in production. */
3192 pr_warn("**********************************************************\n");
3193 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3195 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3197 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3198 pr_warn("** unsafe for production use. **\n");
3200 pr_warn("** If you see this message and you are not debugging **\n");
3201 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3203 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3204 pr_warn("**********************************************************\n");
3206 /* Expand the buffers to set size */
3207 tracing_update_buffers();
3209 buffers_allocated
= 1;
3212 * trace_printk_init_buffers() can be called by modules.
3213 * If that happens, then we need to start cmdline recording
3214 * directly here. If the global_trace.buffer is already
3215 * allocated here, then this was called by module code.
3217 if (global_trace
.array_buffer
.buffer
)
3218 tracing_start_cmdline_record();
3220 EXPORT_SYMBOL_GPL(trace_printk_init_buffers
);
3222 void trace_printk_start_comm(void)
3224 /* Start tracing comms if trace printk is set */
3225 if (!buffers_allocated
)
3227 tracing_start_cmdline_record();
3230 static void trace_printk_start_stop_comm(int enabled
)
3232 if (!buffers_allocated
)
3236 tracing_start_cmdline_record();
3238 tracing_stop_cmdline_record();
3242 * trace_vbprintk - write binary msg to tracing buffer
3243 * @ip: The address of the caller
3244 * @fmt: The string format to write to the buffer
3245 * @args: Arguments for @fmt
3247 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
3249 struct trace_event_call
*call
= &event_bprint
;
3250 struct ring_buffer_event
*event
;
3251 struct trace_buffer
*buffer
;
3252 struct trace_array
*tr
= &global_trace
;
3253 struct bprint_entry
*entry
;
3254 unsigned long flags
;
3256 int len
= 0, size
, pc
;
3258 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
3261 /* Don't pollute graph traces with trace_vprintk internals */
3262 pause_graph_tracing();
3264 pc
= preempt_count();
3265 preempt_disable_notrace();
3267 tbuffer
= get_trace_buf();
3273 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
3275 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
3278 local_save_flags(flags
);
3279 size
= sizeof(*entry
) + sizeof(u32
) * len
;
3280 buffer
= tr
->array_buffer
.buffer
;
3281 ring_buffer_nest_start(buffer
);
3282 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
3286 entry
= ring_buffer_event_data(event
);
3290 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
3291 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
3292 __buffer_unlock_commit(buffer
, event
);
3293 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
3297 ring_buffer_nest_end(buffer
);
3302 preempt_enable_notrace();
3303 unpause_graph_tracing();
3307 EXPORT_SYMBOL_GPL(trace_vbprintk
);
3311 __trace_array_vprintk(struct trace_buffer
*buffer
,
3312 unsigned long ip
, const char *fmt
, va_list args
)
3314 struct trace_event_call
*call
= &event_print
;
3315 struct ring_buffer_event
*event
;
3316 int len
= 0, size
, pc
;
3317 struct print_entry
*entry
;
3318 unsigned long flags
;
3321 if (tracing_disabled
|| tracing_selftest_running
)
3324 /* Don't pollute graph traces with trace_vprintk internals */
3325 pause_graph_tracing();
3327 pc
= preempt_count();
3328 preempt_disable_notrace();
3331 tbuffer
= get_trace_buf();
3337 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
3339 local_save_flags(flags
);
3340 size
= sizeof(*entry
) + len
+ 1;
3341 ring_buffer_nest_start(buffer
);
3342 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
3346 entry
= ring_buffer_event_data(event
);
3349 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
3350 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
3351 __buffer_unlock_commit(buffer
, event
);
3352 ftrace_trace_stack(&global_trace
, buffer
, flags
, 6, pc
, NULL
);
3356 ring_buffer_nest_end(buffer
);
3360 preempt_enable_notrace();
3361 unpause_graph_tracing();
3367 int trace_array_vprintk(struct trace_array
*tr
,
3368 unsigned long ip
, const char *fmt
, va_list args
)
3370 return __trace_array_vprintk(tr
->array_buffer
.buffer
, ip
, fmt
, args
);
3374 * trace_array_printk - Print a message to a specific instance
3375 * @tr: The instance trace_array descriptor
3376 * @ip: The instruction pointer that this is called from.
3377 * @fmt: The format to print (printf format)
3379 * If a subsystem sets up its own instance, they have the right to
3380 * printk strings into their tracing instance buffer using this
3381 * function. Note, this function will not write into the top level
3382 * buffer (use trace_printk() for that), as writing into the top level
3383 * buffer should only have events that can be individually disabled.
3384 * trace_printk() is only used for debugging a kernel, and should not
3385 * be ever encorporated in normal use.
3387 * trace_array_printk() can be used, as it will not add noise to the
3388 * top level tracing buffer.
3390 * Note, trace_array_init_printk() must be called on @tr before this
3394 int trace_array_printk(struct trace_array
*tr
,
3395 unsigned long ip
, const char *fmt
, ...)
3403 /* This is only allowed for created instances */
3404 if (tr
== &global_trace
)
3407 if (!(tr
->trace_flags
& TRACE_ITER_PRINTK
))
3411 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
3415 EXPORT_SYMBOL_GPL(trace_array_printk
);
3418 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3419 * @tr: The trace array to initialize the buffers for
3421 * As trace_array_printk() only writes into instances, they are OK to
3422 * have in the kernel (unlike trace_printk()). This needs to be called
3423 * before trace_array_printk() can be used on a trace_array.
3425 int trace_array_init_printk(struct trace_array
*tr
)
3430 /* This is only allowed for created instances */
3431 if (tr
== &global_trace
)
3434 return alloc_percpu_trace_buffer();
3436 EXPORT_SYMBOL_GPL(trace_array_init_printk
);
3439 int trace_array_printk_buf(struct trace_buffer
*buffer
,
3440 unsigned long ip
, const char *fmt
, ...)
3445 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3449 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
3455 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
3457 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
3459 EXPORT_SYMBOL_GPL(trace_vprintk
);
3461 static void trace_iterator_increment(struct trace_iterator
*iter
)
3463 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
3467 ring_buffer_iter_advance(buf_iter
);
3470 static struct trace_entry
*
3471 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
3472 unsigned long *lost_events
)
3474 struct ring_buffer_event
*event
;
3475 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
3478 event
= ring_buffer_iter_peek(buf_iter
, ts
);
3480 *lost_events
= ring_buffer_iter_dropped(buf_iter
) ?
3481 (unsigned long)-1 : 0;
3483 event
= ring_buffer_peek(iter
->array_buffer
->buffer
, cpu
, ts
,
3488 iter
->ent_size
= ring_buffer_event_length(event
);
3489 return ring_buffer_event_data(event
);
3495 static struct trace_entry
*
3496 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
3497 unsigned long *missing_events
, u64
*ent_ts
)
3499 struct trace_buffer
*buffer
= iter
->array_buffer
->buffer
;
3500 struct trace_entry
*ent
, *next
= NULL
;
3501 unsigned long lost_events
= 0, next_lost
= 0;
3502 int cpu_file
= iter
->cpu_file
;
3503 u64 next_ts
= 0, ts
;
3509 * If we are in a per_cpu trace file, don't bother by iterating over
3510 * all cpu and peek directly.
3512 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
3513 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
3515 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
3517 *ent_cpu
= cpu_file
;
3522 for_each_tracing_cpu(cpu
) {
3524 if (ring_buffer_empty_cpu(buffer
, cpu
))
3527 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
3530 * Pick the entry with the smallest timestamp:
3532 if (ent
&& (!next
|| ts
< next_ts
)) {
3536 next_lost
= lost_events
;
3537 next_size
= iter
->ent_size
;
3541 iter
->ent_size
= next_size
;
3544 *ent_cpu
= next_cpu
;
3550 *missing_events
= next_lost
;
3555 #define STATIC_TEMP_BUF_SIZE 128
3556 static char static_temp_buf
[STATIC_TEMP_BUF_SIZE
] __aligned(4);
3558 /* Find the next real entry, without updating the iterator itself */
3559 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
3560 int *ent_cpu
, u64
*ent_ts
)
3562 /* __find_next_entry will reset ent_size */
3563 int ent_size
= iter
->ent_size
;
3564 struct trace_entry
*entry
;
3567 * If called from ftrace_dump(), then the iter->temp buffer
3568 * will be the static_temp_buf and not created from kmalloc.
3569 * If the entry size is greater than the buffer, we can
3570 * not save it. Just return NULL in that case. This is only
3571 * used to add markers when two consecutive events' time
3572 * stamps have a large delta. See trace_print_lat_context()
3574 if (iter
->temp
== static_temp_buf
&&
3575 STATIC_TEMP_BUF_SIZE
< ent_size
)
3579 * The __find_next_entry() may call peek_next_entry(), which may
3580 * call ring_buffer_peek() that may make the contents of iter->ent
3581 * undefined. Need to copy iter->ent now.
3583 if (iter
->ent
&& iter
->ent
!= iter
->temp
) {
3584 if ((!iter
->temp
|| iter
->temp_size
< iter
->ent_size
) &&
3585 !WARN_ON_ONCE(iter
->temp
== static_temp_buf
)) {
3587 temp
= kmalloc(iter
->ent_size
, GFP_KERNEL
);
3592 iter
->temp_size
= iter
->ent_size
;
3594 memcpy(iter
->temp
, iter
->ent
, iter
->ent_size
);
3595 iter
->ent
= iter
->temp
;
3597 entry
= __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
3598 /* Put back the original ent_size */
3599 iter
->ent_size
= ent_size
;
3604 /* Find the next real entry, and increment the iterator to the next entry */
3605 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
3607 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
3608 &iter
->lost_events
, &iter
->ts
);
3611 trace_iterator_increment(iter
);
3613 return iter
->ent
? iter
: NULL
;
3616 static void trace_consume(struct trace_iterator
*iter
)
3618 ring_buffer_consume(iter
->array_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
3619 &iter
->lost_events
);
3622 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3624 struct trace_iterator
*iter
= m
->private;
3628 WARN_ON_ONCE(iter
->leftover
);
3632 /* can't go backwards */
3637 ent
= trace_find_next_entry_inc(iter
);
3641 while (ent
&& iter
->idx
< i
)
3642 ent
= trace_find_next_entry_inc(iter
);
3649 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
3651 struct ring_buffer_iter
*buf_iter
;
3652 unsigned long entries
= 0;
3655 per_cpu_ptr(iter
->array_buffer
->data
, cpu
)->skipped_entries
= 0;
3657 buf_iter
= trace_buffer_iter(iter
, cpu
);
3661 ring_buffer_iter_reset(buf_iter
);
3664 * We could have the case with the max latency tracers
3665 * that a reset never took place on a cpu. This is evident
3666 * by the timestamp being before the start of the buffer.
3668 while (ring_buffer_iter_peek(buf_iter
, &ts
)) {
3669 if (ts
>= iter
->array_buffer
->time_start
)
3672 ring_buffer_iter_advance(buf_iter
);
3675 per_cpu_ptr(iter
->array_buffer
->data
, cpu
)->skipped_entries
= entries
;
3679 * The current tracer is copied to avoid a global locking
3682 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3684 struct trace_iterator
*iter
= m
->private;
3685 struct trace_array
*tr
= iter
->tr
;
3686 int cpu_file
= iter
->cpu_file
;
3692 * copy the tracer to avoid using a global lock all around.
3693 * iter->trace is a copy of current_trace, the pointer to the
3694 * name may be used instead of a strcmp(), as iter->trace->name
3695 * will point to the same string as current_trace->name.
3697 mutex_lock(&trace_types_lock
);
3698 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
3699 *iter
->trace
= *tr
->current_trace
;
3700 mutex_unlock(&trace_types_lock
);
3702 #ifdef CONFIG_TRACER_MAX_TRACE
3703 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3704 return ERR_PTR(-EBUSY
);
3707 if (*pos
!= iter
->pos
) {
3712 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
3713 for_each_tracing_cpu(cpu
)
3714 tracing_iter_reset(iter
, cpu
);
3716 tracing_iter_reset(iter
, cpu_file
);
3719 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
3724 * If we overflowed the seq_file before, then we want
3725 * to just reuse the trace_seq buffer again.
3731 p
= s_next(m
, p
, &l
);
3735 trace_event_read_lock();
3736 trace_access_lock(cpu_file
);
3740 static void s_stop(struct seq_file
*m
, void *p
)
3742 struct trace_iterator
*iter
= m
->private;
3744 #ifdef CONFIG_TRACER_MAX_TRACE
3745 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3749 trace_access_unlock(iter
->cpu_file
);
3750 trace_event_read_unlock();
3754 get_total_entries_cpu(struct array_buffer
*buf
, unsigned long *total
,
3755 unsigned long *entries
, int cpu
)
3757 unsigned long count
;
3759 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
3761 * If this buffer has skipped entries, then we hold all
3762 * entries for the trace and we need to ignore the
3763 * ones before the time stamp.
3765 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
3766 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
3767 /* total is the same as the entries */
3771 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
3776 get_total_entries(struct array_buffer
*buf
,
3777 unsigned long *total
, unsigned long *entries
)
3785 for_each_tracing_cpu(cpu
) {
3786 get_total_entries_cpu(buf
, &t
, &e
, cpu
);
3792 unsigned long trace_total_entries_cpu(struct trace_array
*tr
, int cpu
)
3794 unsigned long total
, entries
;
3799 get_total_entries_cpu(&tr
->array_buffer
, &total
, &entries
, cpu
);
3804 unsigned long trace_total_entries(struct trace_array
*tr
)
3806 unsigned long total
, entries
;
3811 get_total_entries(&tr
->array_buffer
, &total
, &entries
);
3816 static void print_lat_help_header(struct seq_file
*m
)
3818 seq_puts(m
, "# _------=> CPU# \n"
3819 "# / _-----=> irqs-off \n"
3820 "# | / _----=> need-resched \n"
3821 "# || / _---=> hardirq/softirq \n"
3822 "# ||| / _--=> preempt-depth \n"
3824 "# cmd pid ||||| time | caller \n"
3825 "# \\ / ||||| \\ | / \n");
3828 static void print_event_info(struct array_buffer
*buf
, struct seq_file
*m
)
3830 unsigned long total
;
3831 unsigned long entries
;
3833 get_total_entries(buf
, &total
, &entries
);
3834 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3835 entries
, total
, num_online_cpus());
3839 static void print_func_help_header(struct array_buffer
*buf
, struct seq_file
*m
,
3842 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3844 print_event_info(buf
, m
);
3846 seq_printf(m
, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid
? " TGID " : "");
3847 seq_printf(m
, "# | | %s | | |\n", tgid
? " | " : "");
3850 static void print_func_help_header_irq(struct array_buffer
*buf
, struct seq_file
*m
,
3853 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3854 const char *space
= " ";
3855 int prec
= tgid
? 12 : 2;
3857 print_event_info(buf
, m
);
3859 seq_printf(m
, "# %.*s _-----=> irqs-off\n", prec
, space
);
3860 seq_printf(m
, "# %.*s / _----=> need-resched\n", prec
, space
);
3861 seq_printf(m
, "# %.*s| / _---=> hardirq/softirq\n", prec
, space
);
3862 seq_printf(m
, "# %.*s|| / _--=> preempt-depth\n", prec
, space
);
3863 seq_printf(m
, "# %.*s||| / delay\n", prec
, space
);
3864 seq_printf(m
, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec
, " TGID ");
3865 seq_printf(m
, "# | | %.*s | |||| | |\n", prec
, " | ");
3869 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
3871 unsigned long sym_flags
= (global_trace
.trace_flags
& TRACE_ITER_SYM_MASK
);
3872 struct array_buffer
*buf
= iter
->array_buffer
;
3873 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
3874 struct tracer
*type
= iter
->trace
;
3875 unsigned long entries
;
3876 unsigned long total
;
3877 const char *name
= "preemption";
3881 get_total_entries(buf
, &total
, &entries
);
3883 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
3885 seq_puts(m
, "# -----------------------------------"
3886 "---------------------------------\n");
3887 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3888 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3889 nsecs_to_usecs(data
->saved_latency
),
3893 #if defined(CONFIG_PREEMPT_NONE)
3895 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3897 #elif defined(CONFIG_PREEMPT)
3899 #elif defined(CONFIG_PREEMPT_RT)
3904 /* These are reserved for later use */
3907 seq_printf(m
, " #P:%d)\n", num_online_cpus());
3911 seq_puts(m
, "# -----------------\n");
3912 seq_printf(m
, "# | task: %.16s-%d "
3913 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3914 data
->comm
, data
->pid
,
3915 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
3916 data
->policy
, data
->rt_priority
);
3917 seq_puts(m
, "# -----------------\n");
3919 if (data
->critical_start
) {
3920 seq_puts(m
, "# => started at: ");
3921 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
3922 trace_print_seq(m
, &iter
->seq
);
3923 seq_puts(m
, "\n# => ended at: ");
3924 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
3925 trace_print_seq(m
, &iter
->seq
);
3926 seq_puts(m
, "\n#\n");
3932 static void test_cpu_buff_start(struct trace_iterator
*iter
)
3934 struct trace_seq
*s
= &iter
->seq
;
3935 struct trace_array
*tr
= iter
->tr
;
3937 if (!(tr
->trace_flags
& TRACE_ITER_ANNOTATE
))
3940 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
3943 if (cpumask_available(iter
->started
) &&
3944 cpumask_test_cpu(iter
->cpu
, iter
->started
))
3947 if (per_cpu_ptr(iter
->array_buffer
->data
, iter
->cpu
)->skipped_entries
)
3950 if (cpumask_available(iter
->started
))
3951 cpumask_set_cpu(iter
->cpu
, iter
->started
);
3953 /* Don't print started cpu buffer for the first entry of the trace */
3955 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
3959 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
3961 struct trace_array
*tr
= iter
->tr
;
3962 struct trace_seq
*s
= &iter
->seq
;
3963 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
3964 struct trace_entry
*entry
;
3965 struct trace_event
*event
;
3969 test_cpu_buff_start(iter
);
3971 event
= ftrace_find_event(entry
->type
);
3973 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3974 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3975 trace_print_lat_context(iter
);
3977 trace_print_context(iter
);
3980 if (trace_seq_has_overflowed(s
))
3981 return TRACE_TYPE_PARTIAL_LINE
;
3984 return event
->funcs
->trace(iter
, sym_flags
, event
);
3986 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
3988 return trace_handle_return(s
);
3991 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
3993 struct trace_array
*tr
= iter
->tr
;
3994 struct trace_seq
*s
= &iter
->seq
;
3995 struct trace_entry
*entry
;
3996 struct trace_event
*event
;
4000 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
)
4001 trace_seq_printf(s
, "%d %d %llu ",
4002 entry
->pid
, iter
->cpu
, iter
->ts
);
4004 if (trace_seq_has_overflowed(s
))
4005 return TRACE_TYPE_PARTIAL_LINE
;
4007 event
= ftrace_find_event(entry
->type
);
4009 return event
->funcs
->raw(iter
, 0, event
);
4011 trace_seq_printf(s
, "%d ?\n", entry
->type
);
4013 return trace_handle_return(s
);
4016 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
4018 struct trace_array
*tr
= iter
->tr
;
4019 struct trace_seq
*s
= &iter
->seq
;
4020 unsigned char newline
= '\n';
4021 struct trace_entry
*entry
;
4022 struct trace_event
*event
;
4026 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
4027 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
4028 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
4029 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
4030 if (trace_seq_has_overflowed(s
))
4031 return TRACE_TYPE_PARTIAL_LINE
;
4034 event
= ftrace_find_event(entry
->type
);
4036 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
4037 if (ret
!= TRACE_TYPE_HANDLED
)
4041 SEQ_PUT_FIELD(s
, newline
);
4043 return trace_handle_return(s
);
4046 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
4048 struct trace_array
*tr
= iter
->tr
;
4049 struct trace_seq
*s
= &iter
->seq
;
4050 struct trace_entry
*entry
;
4051 struct trace_event
*event
;
4055 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
4056 SEQ_PUT_FIELD(s
, entry
->pid
);
4057 SEQ_PUT_FIELD(s
, iter
->cpu
);
4058 SEQ_PUT_FIELD(s
, iter
->ts
);
4059 if (trace_seq_has_overflowed(s
))
4060 return TRACE_TYPE_PARTIAL_LINE
;
4063 event
= ftrace_find_event(entry
->type
);
4064 return event
? event
->funcs
->binary(iter
, 0, event
) :
4068 int trace_empty(struct trace_iterator
*iter
)
4070 struct ring_buffer_iter
*buf_iter
;
4073 /* If we are looking at one CPU buffer, only check that one */
4074 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
4075 cpu
= iter
->cpu_file
;
4076 buf_iter
= trace_buffer_iter(iter
, cpu
);
4078 if (!ring_buffer_iter_empty(buf_iter
))
4081 if (!ring_buffer_empty_cpu(iter
->array_buffer
->buffer
, cpu
))
4087 for_each_tracing_cpu(cpu
) {
4088 buf_iter
= trace_buffer_iter(iter
, cpu
);
4090 if (!ring_buffer_iter_empty(buf_iter
))
4093 if (!ring_buffer_empty_cpu(iter
->array_buffer
->buffer
, cpu
))
4101 /* Called with trace_event_read_lock() held. */
4102 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
4104 struct trace_array
*tr
= iter
->tr
;
4105 unsigned long trace_flags
= tr
->trace_flags
;
4106 enum print_line_t ret
;
4108 if (iter
->lost_events
) {
4109 if (iter
->lost_events
== (unsigned long)-1)
4110 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST EVENTS]\n",
4113 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
4114 iter
->cpu
, iter
->lost_events
);
4115 if (trace_seq_has_overflowed(&iter
->seq
))
4116 return TRACE_TYPE_PARTIAL_LINE
;
4119 if (iter
->trace
&& iter
->trace
->print_line
) {
4120 ret
= iter
->trace
->print_line(iter
);
4121 if (ret
!= TRACE_TYPE_UNHANDLED
)
4125 if (iter
->ent
->type
== TRACE_BPUTS
&&
4126 trace_flags
& TRACE_ITER_PRINTK
&&
4127 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
4128 return trace_print_bputs_msg_only(iter
);
4130 if (iter
->ent
->type
== TRACE_BPRINT
&&
4131 trace_flags
& TRACE_ITER_PRINTK
&&
4132 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
4133 return trace_print_bprintk_msg_only(iter
);
4135 if (iter
->ent
->type
== TRACE_PRINT
&&
4136 trace_flags
& TRACE_ITER_PRINTK
&&
4137 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
4138 return trace_print_printk_msg_only(iter
);
4140 if (trace_flags
& TRACE_ITER_BIN
)
4141 return print_bin_fmt(iter
);
4143 if (trace_flags
& TRACE_ITER_HEX
)
4144 return print_hex_fmt(iter
);
4146 if (trace_flags
& TRACE_ITER_RAW
)
4147 return print_raw_fmt(iter
);
4149 return print_trace_fmt(iter
);
4152 void trace_latency_header(struct seq_file
*m
)
4154 struct trace_iterator
*iter
= m
->private;
4155 struct trace_array
*tr
= iter
->tr
;
4157 /* print nothing if the buffers are empty */
4158 if (trace_empty(iter
))
4161 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
4162 print_trace_header(m
, iter
);
4164 if (!(tr
->trace_flags
& TRACE_ITER_VERBOSE
))
4165 print_lat_help_header(m
);
4168 void trace_default_header(struct seq_file
*m
)
4170 struct trace_iterator
*iter
= m
->private;
4171 struct trace_array
*tr
= iter
->tr
;
4172 unsigned long trace_flags
= tr
->trace_flags
;
4174 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
4177 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
4178 /* print nothing if the buffers are empty */
4179 if (trace_empty(iter
))
4181 print_trace_header(m
, iter
);
4182 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
4183 print_lat_help_header(m
);
4185 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
4186 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
4187 print_func_help_header_irq(iter
->array_buffer
,
4190 print_func_help_header(iter
->array_buffer
, m
,
4196 static void test_ftrace_alive(struct seq_file
*m
)
4198 if (!ftrace_is_dead())
4200 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4201 "# MAY BE MISSING FUNCTION EVENTS\n");
4204 #ifdef CONFIG_TRACER_MAX_TRACE
4205 static void show_snapshot_main_help(struct seq_file
*m
)
4207 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4208 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4209 "# Takes a snapshot of the main buffer.\n"
4210 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4211 "# (Doesn't have to be '2' works with any number that\n"
4212 "# is not a '0' or '1')\n");
4215 static void show_snapshot_percpu_help(struct seq_file
*m
)
4217 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4218 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4219 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4220 "# Takes a snapshot of the main buffer for this cpu.\n");
4222 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
4223 "# Must use main snapshot file to allocate.\n");
4225 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4226 "# (Doesn't have to be '2' works with any number that\n"
4227 "# is not a '0' or '1')\n");
4230 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
4232 if (iter
->tr
->allocated_snapshot
)
4233 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
4235 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
4237 seq_puts(m
, "# Snapshot commands:\n");
4238 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
4239 show_snapshot_main_help(m
);
4241 show_snapshot_percpu_help(m
);
4244 /* Should never be called */
4245 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
4248 static int s_show(struct seq_file
*m
, void *v
)
4250 struct trace_iterator
*iter
= v
;
4253 if (iter
->ent
== NULL
) {
4255 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
4257 test_ftrace_alive(m
);
4259 if (iter
->snapshot
&& trace_empty(iter
))
4260 print_snapshot_help(m
, iter
);
4261 else if (iter
->trace
&& iter
->trace
->print_header
)
4262 iter
->trace
->print_header(m
);
4264 trace_default_header(m
);
4266 } else if (iter
->leftover
) {
4268 * If we filled the seq_file buffer earlier, we
4269 * want to just show it now.
4271 ret
= trace_print_seq(m
, &iter
->seq
);
4273 /* ret should this time be zero, but you never know */
4274 iter
->leftover
= ret
;
4277 print_trace_line(iter
);
4278 ret
= trace_print_seq(m
, &iter
->seq
);
4280 * If we overflow the seq_file buffer, then it will
4281 * ask us for this data again at start up.
4283 * ret is 0 if seq_file write succeeded.
4286 iter
->leftover
= ret
;
4293 * Should be used after trace_array_get(), trace_types_lock
4294 * ensures that i_cdev was already initialized.
4296 static inline int tracing_get_cpu(struct inode
*inode
)
4298 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
4299 return (long)inode
->i_cdev
- 1;
4300 return RING_BUFFER_ALL_CPUS
;
4303 static const struct seq_operations tracer_seq_ops
= {
4310 static struct trace_iterator
*
4311 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
4313 struct trace_array
*tr
= inode
->i_private
;
4314 struct trace_iterator
*iter
;
4317 if (tracing_disabled
)
4318 return ERR_PTR(-ENODEV
);
4320 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
4322 return ERR_PTR(-ENOMEM
);
4324 iter
->buffer_iter
= kcalloc(nr_cpu_ids
, sizeof(*iter
->buffer_iter
),
4326 if (!iter
->buffer_iter
)
4330 * trace_find_next_entry() may need to save off iter->ent.
4331 * It will place it into the iter->temp buffer. As most
4332 * events are less than 128, allocate a buffer of that size.
4333 * If one is greater, then trace_find_next_entry() will
4334 * allocate a new buffer to adjust for the bigger iter->ent.
4335 * It's not critical if it fails to get allocated here.
4337 iter
->temp
= kmalloc(128, GFP_KERNEL
);
4339 iter
->temp_size
= 128;
4342 * We make a copy of the current tracer to avoid concurrent
4343 * changes on it while we are reading.
4345 mutex_lock(&trace_types_lock
);
4346 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4350 *iter
->trace
= *tr
->current_trace
;
4352 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
4357 #ifdef CONFIG_TRACER_MAX_TRACE
4358 /* Currently only the top directory has a snapshot */
4359 if (tr
->current_trace
->print_max
|| snapshot
)
4360 iter
->array_buffer
= &tr
->max_buffer
;
4363 iter
->array_buffer
= &tr
->array_buffer
;
4364 iter
->snapshot
= snapshot
;
4366 iter
->cpu_file
= tracing_get_cpu(inode
);
4367 mutex_init(&iter
->mutex
);
4369 /* Notify the tracer early; before we stop tracing. */
4370 if (iter
->trace
->open
)
4371 iter
->trace
->open(iter
);
4373 /* Annotate start of buffers if we had overruns */
4374 if (ring_buffer_overruns(iter
->array_buffer
->buffer
))
4375 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
4377 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4378 if (trace_clocks
[tr
->clock_id
].in_ns
)
4379 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4382 * If pause-on-trace is enabled, then stop the trace while
4383 * dumping, unless this is the "snapshot" file
4385 if (!iter
->snapshot
&& (tr
->trace_flags
& TRACE_ITER_PAUSE_ON_TRACE
))
4386 tracing_stop_tr(tr
);
4388 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
4389 for_each_tracing_cpu(cpu
) {
4390 iter
->buffer_iter
[cpu
] =
4391 ring_buffer_read_prepare(iter
->array_buffer
->buffer
,
4394 ring_buffer_read_prepare_sync();
4395 for_each_tracing_cpu(cpu
) {
4396 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
4397 tracing_iter_reset(iter
, cpu
);
4400 cpu
= iter
->cpu_file
;
4401 iter
->buffer_iter
[cpu
] =
4402 ring_buffer_read_prepare(iter
->array_buffer
->buffer
,
4404 ring_buffer_read_prepare_sync();
4405 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
4406 tracing_iter_reset(iter
, cpu
);
4409 mutex_unlock(&trace_types_lock
);
4414 mutex_unlock(&trace_types_lock
);
4417 kfree(iter
->buffer_iter
);
4419 seq_release_private(inode
, file
);
4420 return ERR_PTR(-ENOMEM
);
4423 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
4427 ret
= tracing_check_open_get_tr(NULL
);
4431 filp
->private_data
= inode
->i_private
;
4435 bool tracing_is_disabled(void)
4437 return (tracing_disabled
) ? true: false;
4441 * Open and update trace_array ref count.
4442 * Must have the current trace_array passed to it.
4444 int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
4446 struct trace_array
*tr
= inode
->i_private
;
4449 ret
= tracing_check_open_get_tr(tr
);
4453 filp
->private_data
= inode
->i_private
;
4458 static int tracing_release(struct inode
*inode
, struct file
*file
)
4460 struct trace_array
*tr
= inode
->i_private
;
4461 struct seq_file
*m
= file
->private_data
;
4462 struct trace_iterator
*iter
;
4465 if (!(file
->f_mode
& FMODE_READ
)) {
4466 trace_array_put(tr
);
4470 /* Writes do not use seq_file */
4472 mutex_lock(&trace_types_lock
);
4474 for_each_tracing_cpu(cpu
) {
4475 if (iter
->buffer_iter
[cpu
])
4476 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
4479 if (iter
->trace
&& iter
->trace
->close
)
4480 iter
->trace
->close(iter
);
4482 if (!iter
->snapshot
&& tr
->stop_count
)
4483 /* reenable tracing if it was previously enabled */
4484 tracing_start_tr(tr
);
4486 __trace_array_put(tr
);
4488 mutex_unlock(&trace_types_lock
);
4490 mutex_destroy(&iter
->mutex
);
4491 free_cpumask_var(iter
->started
);
4494 kfree(iter
->buffer_iter
);
4495 seq_release_private(inode
, file
);
4500 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
4502 struct trace_array
*tr
= inode
->i_private
;
4504 trace_array_put(tr
);
4508 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
4510 struct trace_array
*tr
= inode
->i_private
;
4512 trace_array_put(tr
);
4514 return single_release(inode
, file
);
4517 static int tracing_open(struct inode
*inode
, struct file
*file
)
4519 struct trace_array
*tr
= inode
->i_private
;
4520 struct trace_iterator
*iter
;
4523 ret
= tracing_check_open_get_tr(tr
);
4527 /* If this file was open for write, then erase contents */
4528 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
4529 int cpu
= tracing_get_cpu(inode
);
4530 struct array_buffer
*trace_buf
= &tr
->array_buffer
;
4532 #ifdef CONFIG_TRACER_MAX_TRACE
4533 if (tr
->current_trace
->print_max
)
4534 trace_buf
= &tr
->max_buffer
;
4537 if (cpu
== RING_BUFFER_ALL_CPUS
)
4538 tracing_reset_online_cpus(trace_buf
);
4540 tracing_reset_cpu(trace_buf
, cpu
);
4543 if (file
->f_mode
& FMODE_READ
) {
4544 iter
= __tracing_open(inode
, file
, false);
4546 ret
= PTR_ERR(iter
);
4547 else if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
4548 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4552 trace_array_put(tr
);
4558 * Some tracers are not suitable for instance buffers.
4559 * A tracer is always available for the global array (toplevel)
4560 * or if it explicitly states that it is.
4563 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
4565 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
4568 /* Find the next tracer that this trace array may use */
4569 static struct tracer
*
4570 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
4572 while (t
&& !trace_ok_for_array(t
, tr
))
4579 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4581 struct trace_array
*tr
= m
->private;
4582 struct tracer
*t
= v
;
4587 t
= get_tracer_for_array(tr
, t
->next
);
4592 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
4594 struct trace_array
*tr
= m
->private;
4598 mutex_lock(&trace_types_lock
);
4600 t
= get_tracer_for_array(tr
, trace_types
);
4601 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
4607 static void t_stop(struct seq_file
*m
, void *p
)
4609 mutex_unlock(&trace_types_lock
);
4612 static int t_show(struct seq_file
*m
, void *v
)
4614 struct tracer
*t
= v
;
4619 seq_puts(m
, t
->name
);
4628 static const struct seq_operations show_traces_seq_ops
= {
4635 static int show_traces_open(struct inode
*inode
, struct file
*file
)
4637 struct trace_array
*tr
= inode
->i_private
;
4641 ret
= tracing_check_open_get_tr(tr
);
4645 ret
= seq_open(file
, &show_traces_seq_ops
);
4647 trace_array_put(tr
);
4651 m
= file
->private_data
;
4657 static int show_traces_release(struct inode
*inode
, struct file
*file
)
4659 struct trace_array
*tr
= inode
->i_private
;
4661 trace_array_put(tr
);
4662 return seq_release(inode
, file
);
4666 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
4667 size_t count
, loff_t
*ppos
)
4672 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
4676 if (file
->f_mode
& FMODE_READ
)
4677 ret
= seq_lseek(file
, offset
, whence
);
4679 file
->f_pos
= ret
= 0;
4684 static const struct file_operations tracing_fops
= {
4685 .open
= tracing_open
,
4687 .write
= tracing_write_stub
,
4688 .llseek
= tracing_lseek
,
4689 .release
= tracing_release
,
4692 static const struct file_operations show_traces_fops
= {
4693 .open
= show_traces_open
,
4695 .llseek
= seq_lseek
,
4696 .release
= show_traces_release
,
4700 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
4701 size_t count
, loff_t
*ppos
)
4703 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4707 len
= snprintf(NULL
, 0, "%*pb\n",
4708 cpumask_pr_args(tr
->tracing_cpumask
)) + 1;
4709 mask_str
= kmalloc(len
, GFP_KERNEL
);
4713 len
= snprintf(mask_str
, len
, "%*pb\n",
4714 cpumask_pr_args(tr
->tracing_cpumask
));
4719 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, len
);
4727 int tracing_set_cpumask(struct trace_array
*tr
,
4728 cpumask_var_t tracing_cpumask_new
)
4735 local_irq_disable();
4736 arch_spin_lock(&tr
->max_lock
);
4737 for_each_tracing_cpu(cpu
) {
4739 * Increase/decrease the disabled counter if we are
4740 * about to flip a bit in the cpumask:
4742 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4743 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4744 atomic_inc(&per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->disabled
);
4745 ring_buffer_record_disable_cpu(tr
->array_buffer
.buffer
, cpu
);
4747 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4748 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4749 atomic_dec(&per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->disabled
);
4750 ring_buffer_record_enable_cpu(tr
->array_buffer
.buffer
, cpu
);
4753 arch_spin_unlock(&tr
->max_lock
);
4756 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
4762 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
4763 size_t count
, loff_t
*ppos
)
4765 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4766 cpumask_var_t tracing_cpumask_new
;
4769 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
4772 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
4776 err
= tracing_set_cpumask(tr
, tracing_cpumask_new
);
4780 free_cpumask_var(tracing_cpumask_new
);
4785 free_cpumask_var(tracing_cpumask_new
);
4790 static const struct file_operations tracing_cpumask_fops
= {
4791 .open
= tracing_open_generic_tr
,
4792 .read
= tracing_cpumask_read
,
4793 .write
= tracing_cpumask_write
,
4794 .release
= tracing_release_generic_tr
,
4795 .llseek
= generic_file_llseek
,
4798 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
4800 struct tracer_opt
*trace_opts
;
4801 struct trace_array
*tr
= m
->private;
4805 mutex_lock(&trace_types_lock
);
4806 tracer_flags
= tr
->current_trace
->flags
->val
;
4807 trace_opts
= tr
->current_trace
->flags
->opts
;
4809 for (i
= 0; trace_options
[i
]; i
++) {
4810 if (tr
->trace_flags
& (1 << i
))
4811 seq_printf(m
, "%s\n", trace_options
[i
]);
4813 seq_printf(m
, "no%s\n", trace_options
[i
]);
4816 for (i
= 0; trace_opts
[i
].name
; i
++) {
4817 if (tracer_flags
& trace_opts
[i
].bit
)
4818 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
4820 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
4822 mutex_unlock(&trace_types_lock
);
4827 static int __set_tracer_option(struct trace_array
*tr
,
4828 struct tracer_flags
*tracer_flags
,
4829 struct tracer_opt
*opts
, int neg
)
4831 struct tracer
*trace
= tracer_flags
->trace
;
4834 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
4839 tracer_flags
->val
&= ~opts
->bit
;
4841 tracer_flags
->val
|= opts
->bit
;
4845 /* Try to assign a tracer specific option */
4846 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
4848 struct tracer
*trace
= tr
->current_trace
;
4849 struct tracer_flags
*tracer_flags
= trace
->flags
;
4850 struct tracer_opt
*opts
= NULL
;
4853 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
4854 opts
= &tracer_flags
->opts
[i
];
4856 if (strcmp(cmp
, opts
->name
) == 0)
4857 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
4863 /* Some tracers require overwrite to stay enabled */
4864 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
4866 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
4872 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
4876 if ((mask
== TRACE_ITER_RECORD_TGID
) ||
4877 (mask
== TRACE_ITER_RECORD_CMD
))
4878 lockdep_assert_held(&event_mutex
);
4880 /* do nothing if flag is already set */
4881 if (!!(tr
->trace_flags
& mask
) == !!enabled
)
4884 /* Give the tracer a chance to approve the change */
4885 if (tr
->current_trace
->flag_changed
)
4886 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
4890 tr
->trace_flags
|= mask
;
4892 tr
->trace_flags
&= ~mask
;
4894 if (mask
== TRACE_ITER_RECORD_CMD
)
4895 trace_event_enable_cmd_record(enabled
);
4897 if (mask
== TRACE_ITER_RECORD_TGID
) {
4899 tgid_map_max
= pid_max
;
4900 map
= kvcalloc(tgid_map_max
+ 1, sizeof(*tgid_map
),
4904 * Pairs with smp_load_acquire() in
4905 * trace_find_tgid_ptr() to ensure that if it observes
4906 * the tgid_map we just allocated then it also observes
4907 * the corresponding tgid_map_max value.
4909 smp_store_release(&tgid_map
, map
);
4912 tr
->trace_flags
&= ~TRACE_ITER_RECORD_TGID
;
4916 trace_event_enable_tgid_record(enabled
);
4919 if (mask
== TRACE_ITER_EVENT_FORK
)
4920 trace_event_follow_fork(tr
, enabled
);
4922 if (mask
== TRACE_ITER_FUNC_FORK
)
4923 ftrace_pid_follow_fork(tr
, enabled
);
4925 if (mask
== TRACE_ITER_OVERWRITE
) {
4926 ring_buffer_change_overwrite(tr
->array_buffer
.buffer
, enabled
);
4927 #ifdef CONFIG_TRACER_MAX_TRACE
4928 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
4932 if (mask
== TRACE_ITER_PRINTK
) {
4933 trace_printk_start_stop_comm(enabled
);
4934 trace_printk_control(enabled
);
4940 int trace_set_options(struct trace_array
*tr
, char *option
)
4945 size_t orig_len
= strlen(option
);
4948 cmp
= strstrip(option
);
4950 len
= str_has_prefix(cmp
, "no");
4956 mutex_lock(&event_mutex
);
4957 mutex_lock(&trace_types_lock
);
4959 ret
= match_string(trace_options
, -1, cmp
);
4960 /* If no option could be set, test the specific tracer options */
4962 ret
= set_tracer_option(tr
, cmp
, neg
);
4964 ret
= set_tracer_flag(tr
, 1 << ret
, !neg
);
4966 mutex_unlock(&trace_types_lock
);
4967 mutex_unlock(&event_mutex
);
4970 * If the first trailing whitespace is replaced with '\0' by strstrip,
4971 * turn it back into a space.
4973 if (orig_len
> strlen(option
))
4974 option
[strlen(option
)] = ' ';
4979 static void __init
apply_trace_boot_options(void)
4981 char *buf
= trace_boot_options_buf
;
4985 option
= strsep(&buf
, ",");
4991 trace_set_options(&global_trace
, option
);
4993 /* Put back the comma to allow this to be called again */
5000 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
5001 size_t cnt
, loff_t
*ppos
)
5003 struct seq_file
*m
= filp
->private_data
;
5004 struct trace_array
*tr
= m
->private;
5008 if (cnt
>= sizeof(buf
))
5011 if (copy_from_user(buf
, ubuf
, cnt
))
5016 ret
= trace_set_options(tr
, buf
);
5025 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
5027 struct trace_array
*tr
= inode
->i_private
;
5030 ret
= tracing_check_open_get_tr(tr
);
5034 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
5036 trace_array_put(tr
);
5041 static const struct file_operations tracing_iter_fops
= {
5042 .open
= tracing_trace_options_open
,
5044 .llseek
= seq_lseek
,
5045 .release
= tracing_single_release_tr
,
5046 .write
= tracing_trace_options_write
,
5049 static const char readme_msg
[] =
5050 "tracing mini-HOWTO:\n\n"
5051 "# echo 0 > tracing_on : quick way to disable tracing\n"
5052 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5053 " Important files:\n"
5054 " trace\t\t\t- The static contents of the buffer\n"
5055 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5056 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5057 " current_tracer\t- function and latency tracers\n"
5058 " available_tracers\t- list of configured tracers for current_tracer\n"
5059 " error_log\t- error log for failed commands (that support it)\n"
5060 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5061 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5062 " trace_clock\t\t-change the clock used to order events\n"
5063 " local: Per cpu clock but may not be synced across CPUs\n"
5064 " global: Synced across CPUs but slows tracing down.\n"
5065 " counter: Not a clock, but just an increment\n"
5066 " uptime: Jiffy counter from time of boot\n"
5067 " perf: Same clock that perf events use\n"
5068 #ifdef CONFIG_X86_64
5069 " x86-tsc: TSC cycle counter\n"
5071 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5072 " delta: Delta difference against a buffer-wide timestamp\n"
5073 " absolute: Absolute (standalone) timestamp\n"
5074 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5075 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5076 " tracing_cpumask\t- Limit which CPUs to trace\n"
5077 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5078 "\t\t\t Remove sub-buffer with rmdir\n"
5079 " trace_options\t\t- Set format or modify how tracing happens\n"
5080 "\t\t\t Disable an option by prefixing 'no' to the\n"
5081 "\t\t\t option name\n"
5082 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5083 #ifdef CONFIG_DYNAMIC_FTRACE
5084 "\n available_filter_functions - list of functions that can be filtered on\n"
5085 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5086 "\t\t\t functions\n"
5087 "\t accepts: func_full_name or glob-matching-pattern\n"
5088 "\t modules: Can select a group via module\n"
5089 "\t Format: :mod:<module-name>\n"
5090 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5091 "\t triggers: a command to perform when function is hit\n"
5092 "\t Format: <function>:<trigger>[:count]\n"
5093 "\t trigger: traceon, traceoff\n"
5094 "\t\t enable_event:<system>:<event>\n"
5095 "\t\t disable_event:<system>:<event>\n"
5096 #ifdef CONFIG_STACKTRACE
5099 #ifdef CONFIG_TRACER_SNAPSHOT
5104 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5105 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5106 "\t The first one will disable tracing every time do_fault is hit\n"
5107 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5108 "\t The first time do trap is hit and it disables tracing, the\n"
5109 "\t counter will decrement to 2. If tracing is already disabled,\n"
5110 "\t the counter will not decrement. It only decrements when the\n"
5111 "\t trigger did work\n"
5112 "\t To remove trigger without count:\n"
5113 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5114 "\t To remove trigger with a count:\n"
5115 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5116 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5117 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5118 "\t modules: Can select a group via module command :mod:\n"
5119 "\t Does not accept triggers\n"
5120 #endif /* CONFIG_DYNAMIC_FTRACE */
5121 #ifdef CONFIG_FUNCTION_TRACER
5122 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5124 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5127 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5128 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5129 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5130 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5132 #ifdef CONFIG_TRACER_SNAPSHOT
5133 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5134 "\t\t\t snapshot buffer. Read the contents for more\n"
5135 "\t\t\t information\n"
5137 #ifdef CONFIG_STACK_TRACER
5138 " stack_trace\t\t- Shows the max stack trace when active\n"
5139 " stack_max_size\t- Shows current max stack size that was traced\n"
5140 "\t\t\t Write into this file to reset the max size (trigger a\n"
5141 "\t\t\t new trace)\n"
5142 #ifdef CONFIG_DYNAMIC_FTRACE
5143 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5146 #endif /* CONFIG_STACK_TRACER */
5147 #ifdef CONFIG_DYNAMIC_EVENTS
5148 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5149 "\t\t\t Write into this file to define/undefine new trace events.\n"
5151 #ifdef CONFIG_KPROBE_EVENTS
5152 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5153 "\t\t\t Write into this file to define/undefine new trace events.\n"
5155 #ifdef CONFIG_UPROBE_EVENTS
5156 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5157 "\t\t\t Write into this file to define/undefine new trace events.\n"
5159 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5160 "\t accepts: event-definitions (one definition per line)\n"
5161 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5162 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5163 #ifdef CONFIG_HIST_TRIGGERS
5164 "\t s:[synthetic/]<event> <field> [<field>]\n"
5166 "\t -:[<group>/]<event>\n"
5167 #ifdef CONFIG_KPROBE_EVENTS
5168 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5169 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5171 #ifdef CONFIG_UPROBE_EVENTS
5172 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5174 "\t args: <name>=fetcharg[:type]\n"
5175 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5176 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5177 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5179 "\t $stack<index>, $stack, $retval, $comm,\n"
5181 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5182 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5183 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5184 "\t <type>\\[<array-size>\\]\n"
5185 #ifdef CONFIG_HIST_TRIGGERS
5186 "\t field: <stype> <name>;\n"
5187 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5188 "\t [unsigned] char/int/long\n"
5191 " events/\t\t- Directory containing all trace event subsystems:\n"
5192 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5193 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5194 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5196 " filter\t\t- If set, only events passing filter are traced\n"
5197 " events/<system>/<event>/\t- Directory containing control files for\n"
5199 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5200 " filter\t\t- If set, only events passing filter are traced\n"
5201 " trigger\t\t- If set, a command to perform when event is hit\n"
5202 "\t Format: <trigger>[:count][if <filter>]\n"
5203 "\t trigger: traceon, traceoff\n"
5204 "\t enable_event:<system>:<event>\n"
5205 "\t disable_event:<system>:<event>\n"
5206 #ifdef CONFIG_HIST_TRIGGERS
5207 "\t enable_hist:<system>:<event>\n"
5208 "\t disable_hist:<system>:<event>\n"
5210 #ifdef CONFIG_STACKTRACE
5213 #ifdef CONFIG_TRACER_SNAPSHOT
5216 #ifdef CONFIG_HIST_TRIGGERS
5217 "\t\t hist (see below)\n"
5219 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5220 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5221 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5222 "\t events/block/block_unplug/trigger\n"
5223 "\t The first disables tracing every time block_unplug is hit.\n"
5224 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5225 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5226 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5227 "\t Like function triggers, the counter is only decremented if it\n"
5228 "\t enabled or disabled tracing.\n"
5229 "\t To remove a trigger without a count:\n"
5230 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5231 "\t To remove a trigger with a count:\n"
5232 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5233 "\t Filters can be ignored when removing a trigger.\n"
5234 #ifdef CONFIG_HIST_TRIGGERS
5235 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5236 "\t Format: hist:keys=<field1[,field2,...]>\n"
5237 "\t [:values=<field1[,field2,...]>]\n"
5238 "\t [:sort=<field1[,field2,...]>]\n"
5239 "\t [:size=#entries]\n"
5240 "\t [:pause][:continue][:clear]\n"
5241 "\t [:name=histname1]\n"
5242 "\t [:<handler>.<action>]\n"
5243 "\t [if <filter>]\n\n"
5244 "\t When a matching event is hit, an entry is added to a hash\n"
5245 "\t table using the key(s) and value(s) named, and the value of a\n"
5246 "\t sum called 'hitcount' is incremented. Keys and values\n"
5247 "\t correspond to fields in the event's format description. Keys\n"
5248 "\t can be any field, or the special string 'stacktrace'.\n"
5249 "\t Compound keys consisting of up to two fields can be specified\n"
5250 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5251 "\t fields. Sort keys consisting of up to two fields can be\n"
5252 "\t specified using the 'sort' keyword. The sort direction can\n"
5253 "\t be modified by appending '.descending' or '.ascending' to a\n"
5254 "\t sort field. The 'size' parameter can be used to specify more\n"
5255 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5256 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5257 "\t its histogram data will be shared with other triggers of the\n"
5258 "\t same name, and trigger hits will update this common data.\n\n"
5259 "\t Reading the 'hist' file for the event will dump the hash\n"
5260 "\t table in its entirety to stdout. If there are multiple hist\n"
5261 "\t triggers attached to an event, there will be a table for each\n"
5262 "\t trigger in the output. The table displayed for a named\n"
5263 "\t trigger will be the same as any other instance having the\n"
5264 "\t same name. The default format used to display a given field\n"
5265 "\t can be modified by appending any of the following modifiers\n"
5266 "\t to the field name, as applicable:\n\n"
5267 "\t .hex display a number as a hex value\n"
5268 "\t .sym display an address as a symbol\n"
5269 "\t .sym-offset display an address as a symbol and offset\n"
5270 "\t .execname display a common_pid as a program name\n"
5271 "\t .syscall display a syscall id as a syscall name\n"
5272 "\t .log2 display log2 value rather than raw number\n"
5273 "\t .usecs display a common_timestamp in microseconds\n\n"
5274 "\t The 'pause' parameter can be used to pause an existing hist\n"
5275 "\t trigger or to start a hist trigger but not log any events\n"
5276 "\t until told to do so. 'continue' can be used to start or\n"
5277 "\t restart a paused hist trigger.\n\n"
5278 "\t The 'clear' parameter will clear the contents of a running\n"
5279 "\t hist trigger and leave its current paused/active state\n"
5281 "\t The enable_hist and disable_hist triggers can be used to\n"
5282 "\t have one event conditionally start and stop another event's\n"
5283 "\t already-attached hist trigger. The syntax is analogous to\n"
5284 "\t the enable_event and disable_event triggers.\n\n"
5285 "\t Hist trigger handlers and actions are executed whenever a\n"
5286 "\t a histogram entry is added or updated. They take the form:\n\n"
5287 "\t <handler>.<action>\n\n"
5288 "\t The available handlers are:\n\n"
5289 "\t onmatch(matching.event) - invoke on addition or update\n"
5290 "\t onmax(var) - invoke if var exceeds current max\n"
5291 "\t onchange(var) - invoke action if var changes\n\n"
5292 "\t The available actions are:\n\n"
5293 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5294 "\t save(field,...) - save current event fields\n"
5295 #ifdef CONFIG_TRACER_SNAPSHOT
5296 "\t snapshot() - snapshot the trace buffer\n\n"
5298 #ifdef CONFIG_SYNTH_EVENTS
5299 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5300 "\t Write into this file to define/undefine new synthetic events.\n"
5301 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5307 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
5308 size_t cnt
, loff_t
*ppos
)
5310 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
5311 readme_msg
, strlen(readme_msg
));
5314 static const struct file_operations tracing_readme_fops
= {
5315 .open
= tracing_open_generic
,
5316 .read
= tracing_readme_read
,
5317 .llseek
= generic_file_llseek
,
5320 static void *saved_tgids_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5324 return trace_find_tgid_ptr(pid
);
5327 static void *saved_tgids_start(struct seq_file
*m
, loff_t
*pos
)
5331 return trace_find_tgid_ptr(pid
);
5334 static void saved_tgids_stop(struct seq_file
*m
, void *v
)
5338 static int saved_tgids_show(struct seq_file
*m
, void *v
)
5340 int *entry
= (int *)v
;
5341 int pid
= entry
- tgid_map
;
5347 seq_printf(m
, "%d %d\n", pid
, tgid
);
5351 static const struct seq_operations tracing_saved_tgids_seq_ops
= {
5352 .start
= saved_tgids_start
,
5353 .stop
= saved_tgids_stop
,
5354 .next
= saved_tgids_next
,
5355 .show
= saved_tgids_show
,
5358 static int tracing_saved_tgids_open(struct inode
*inode
, struct file
*filp
)
5362 ret
= tracing_check_open_get_tr(NULL
);
5366 return seq_open(filp
, &tracing_saved_tgids_seq_ops
);
5370 static const struct file_operations tracing_saved_tgids_fops
= {
5371 .open
= tracing_saved_tgids_open
,
5373 .llseek
= seq_lseek
,
5374 .release
= seq_release
,
5377 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5379 unsigned int *ptr
= v
;
5381 if (*pos
|| m
->count
)
5386 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
5388 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
5397 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
5403 arch_spin_lock(&trace_cmdline_lock
);
5405 v
= &savedcmd
->map_cmdline_to_pid
[0];
5407 v
= saved_cmdlines_next(m
, v
, &l
);
5415 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
5417 arch_spin_unlock(&trace_cmdline_lock
);
5421 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
5423 char buf
[TASK_COMM_LEN
];
5424 unsigned int *pid
= v
;
5426 __trace_find_cmdline(*pid
, buf
);
5427 seq_printf(m
, "%d %s\n", *pid
, buf
);
5431 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
5432 .start
= saved_cmdlines_start
,
5433 .next
= saved_cmdlines_next
,
5434 .stop
= saved_cmdlines_stop
,
5435 .show
= saved_cmdlines_show
,
5438 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
5442 ret
= tracing_check_open_get_tr(NULL
);
5446 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
5449 static const struct file_operations tracing_saved_cmdlines_fops
= {
5450 .open
= tracing_saved_cmdlines_open
,
5452 .llseek
= seq_lseek
,
5453 .release
= seq_release
,
5457 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
5458 size_t cnt
, loff_t
*ppos
)
5463 arch_spin_lock(&trace_cmdline_lock
);
5464 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
5465 arch_spin_unlock(&trace_cmdline_lock
);
5467 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5470 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
5472 kfree(s
->saved_cmdlines
);
5473 kfree(s
->map_cmdline_to_pid
);
5477 static int tracing_resize_saved_cmdlines(unsigned int val
)
5479 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
5481 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5485 if (allocate_cmdlines_buffer(val
, s
) < 0) {
5490 arch_spin_lock(&trace_cmdline_lock
);
5491 savedcmd_temp
= savedcmd
;
5493 arch_spin_unlock(&trace_cmdline_lock
);
5494 free_saved_cmdlines_buffer(savedcmd_temp
);
5500 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
5501 size_t cnt
, loff_t
*ppos
)
5506 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5510 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5511 if (!val
|| val
> PID_MAX_DEFAULT
)
5514 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
5523 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
5524 .open
= tracing_open_generic
,
5525 .read
= tracing_saved_cmdlines_size_read
,
5526 .write
= tracing_saved_cmdlines_size_write
,
5529 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5530 static union trace_eval_map_item
*
5531 update_eval_map(union trace_eval_map_item
*ptr
)
5533 if (!ptr
->map
.eval_string
) {
5534 if (ptr
->tail
.next
) {
5535 ptr
= ptr
->tail
.next
;
5536 /* Set ptr to the next real item (skip head) */
5544 static void *eval_map_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5546 union trace_eval_map_item
*ptr
= v
;
5549 * Paranoid! If ptr points to end, we don't want to increment past it.
5550 * This really should never happen.
5553 ptr
= update_eval_map(ptr
);
5554 if (WARN_ON_ONCE(!ptr
))
5558 ptr
= update_eval_map(ptr
);
5563 static void *eval_map_start(struct seq_file
*m
, loff_t
*pos
)
5565 union trace_eval_map_item
*v
;
5568 mutex_lock(&trace_eval_mutex
);
5570 v
= trace_eval_maps
;
5574 while (v
&& l
< *pos
) {
5575 v
= eval_map_next(m
, v
, &l
);
5581 static void eval_map_stop(struct seq_file
*m
, void *v
)
5583 mutex_unlock(&trace_eval_mutex
);
5586 static int eval_map_show(struct seq_file
*m
, void *v
)
5588 union trace_eval_map_item
*ptr
= v
;
5590 seq_printf(m
, "%s %ld (%s)\n",
5591 ptr
->map
.eval_string
, ptr
->map
.eval_value
,
5597 static const struct seq_operations tracing_eval_map_seq_ops
= {
5598 .start
= eval_map_start
,
5599 .next
= eval_map_next
,
5600 .stop
= eval_map_stop
,
5601 .show
= eval_map_show
,
5604 static int tracing_eval_map_open(struct inode
*inode
, struct file
*filp
)
5608 ret
= tracing_check_open_get_tr(NULL
);
5612 return seq_open(filp
, &tracing_eval_map_seq_ops
);
5615 static const struct file_operations tracing_eval_map_fops
= {
5616 .open
= tracing_eval_map_open
,
5618 .llseek
= seq_lseek
,
5619 .release
= seq_release
,
5622 static inline union trace_eval_map_item
*
5623 trace_eval_jmp_to_tail(union trace_eval_map_item
*ptr
)
5625 /* Return tail of array given the head */
5626 return ptr
+ ptr
->head
.length
+ 1;
5630 trace_insert_eval_map_file(struct module
*mod
, struct trace_eval_map
**start
,
5633 struct trace_eval_map
**stop
;
5634 struct trace_eval_map
**map
;
5635 union trace_eval_map_item
*map_array
;
5636 union trace_eval_map_item
*ptr
;
5641 * The trace_eval_maps contains the map plus a head and tail item,
5642 * where the head holds the module and length of array, and the
5643 * tail holds a pointer to the next list.
5645 map_array
= kmalloc_array(len
+ 2, sizeof(*map_array
), GFP_KERNEL
);
5647 pr_warn("Unable to allocate trace eval mapping\n");
5651 mutex_lock(&trace_eval_mutex
);
5653 if (!trace_eval_maps
)
5654 trace_eval_maps
= map_array
;
5656 ptr
= trace_eval_maps
;
5658 ptr
= trace_eval_jmp_to_tail(ptr
);
5659 if (!ptr
->tail
.next
)
5661 ptr
= ptr
->tail
.next
;
5664 ptr
->tail
.next
= map_array
;
5666 map_array
->head
.mod
= mod
;
5667 map_array
->head
.length
= len
;
5670 for (map
= start
; (unsigned long)map
< (unsigned long)stop
; map
++) {
5671 map_array
->map
= **map
;
5674 memset(map_array
, 0, sizeof(*map_array
));
5676 mutex_unlock(&trace_eval_mutex
);
5679 static void trace_create_eval_file(struct dentry
*d_tracer
)
5681 trace_create_file("eval_map", 0444, d_tracer
,
5682 NULL
, &tracing_eval_map_fops
);
5685 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5686 static inline void trace_create_eval_file(struct dentry
*d_tracer
) { }
5687 static inline void trace_insert_eval_map_file(struct module
*mod
,
5688 struct trace_eval_map
**start
, int len
) { }
5689 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5691 static void trace_insert_eval_map(struct module
*mod
,
5692 struct trace_eval_map
**start
, int len
)
5694 struct trace_eval_map
**map
;
5701 trace_event_eval_update(map
, len
);
5703 trace_insert_eval_map_file(mod
, start
, len
);
5707 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
5708 size_t cnt
, loff_t
*ppos
)
5710 struct trace_array
*tr
= filp
->private_data
;
5711 char buf
[MAX_TRACER_SIZE
+2];
5714 mutex_lock(&trace_types_lock
);
5715 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
5716 mutex_unlock(&trace_types_lock
);
5718 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5721 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
5723 tracing_reset_online_cpus(&tr
->array_buffer
);
5727 static void set_buffer_entries(struct array_buffer
*buf
, unsigned long val
)
5731 for_each_tracing_cpu(cpu
)
5732 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
5735 #ifdef CONFIG_TRACER_MAX_TRACE
5736 /* resize @tr's buffer to the size of @size_tr's entries */
5737 static int resize_buffer_duplicate_size(struct array_buffer
*trace_buf
,
5738 struct array_buffer
*size_buf
, int cpu_id
)
5742 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
5743 for_each_tracing_cpu(cpu
) {
5744 ret
= ring_buffer_resize(trace_buf
->buffer
,
5745 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
5748 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
5749 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
5752 ret
= ring_buffer_resize(trace_buf
->buffer
,
5753 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
5755 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
5756 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
5761 #endif /* CONFIG_TRACER_MAX_TRACE */
5763 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
5764 unsigned long size
, int cpu
)
5769 * If kernel or user changes the size of the ring buffer
5770 * we use the size that was given, and we can forget about
5771 * expanding it later.
5773 ring_buffer_expanded
= true;
5775 /* May be called before buffers are initialized */
5776 if (!tr
->array_buffer
.buffer
)
5779 ret
= ring_buffer_resize(tr
->array_buffer
.buffer
, size
, cpu
);
5783 #ifdef CONFIG_TRACER_MAX_TRACE
5784 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
5785 !tr
->current_trace
->use_max_tr
)
5788 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
5790 int r
= resize_buffer_duplicate_size(&tr
->array_buffer
,
5791 &tr
->array_buffer
, cpu
);
5794 * AARGH! We are left with different
5795 * size max buffer!!!!
5796 * The max buffer is our "snapshot" buffer.
5797 * When a tracer needs a snapshot (one of the
5798 * latency tracers), it swaps the max buffer
5799 * with the saved snap shot. We succeeded to
5800 * update the size of the main buffer, but failed to
5801 * update the size of the max buffer. But when we tried
5802 * to reset the main buffer to the original size, we
5803 * failed there too. This is very unlikely to
5804 * happen, but if it does, warn and kill all
5808 tracing_disabled
= 1;
5813 if (cpu
== RING_BUFFER_ALL_CPUS
)
5814 set_buffer_entries(&tr
->max_buffer
, size
);
5816 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
5819 #endif /* CONFIG_TRACER_MAX_TRACE */
5821 if (cpu
== RING_BUFFER_ALL_CPUS
)
5822 set_buffer_entries(&tr
->array_buffer
, size
);
5824 per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->entries
= size
;
5829 ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
5830 unsigned long size
, int cpu_id
)
5834 mutex_lock(&trace_types_lock
);
5836 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
5837 /* make sure, this cpu is enabled in the mask */
5838 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
5844 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
5849 mutex_unlock(&trace_types_lock
);
5856 * tracing_update_buffers - used by tracing facility to expand ring buffers
5858 * To save on memory when the tracing is never used on a system with it
5859 * configured in. The ring buffers are set to a minimum size. But once
5860 * a user starts to use the tracing facility, then they need to grow
5861 * to their default size.
5863 * This function is to be called when a tracer is about to be used.
5865 int tracing_update_buffers(void)
5869 mutex_lock(&trace_types_lock
);
5870 if (!ring_buffer_expanded
)
5871 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
5872 RING_BUFFER_ALL_CPUS
);
5873 mutex_unlock(&trace_types_lock
);
5878 struct trace_option_dentry
;
5881 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
5884 * Used to clear out the tracer before deletion of an instance.
5885 * Must have trace_types_lock held.
5887 static void tracing_set_nop(struct trace_array
*tr
)
5889 if (tr
->current_trace
== &nop_trace
)
5892 tr
->current_trace
->enabled
--;
5894 if (tr
->current_trace
->reset
)
5895 tr
->current_trace
->reset(tr
);
5897 tr
->current_trace
= &nop_trace
;
5900 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
)
5902 /* Only enable if the directory has been created already. */
5906 create_trace_option_files(tr
, t
);
5909 int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
5912 #ifdef CONFIG_TRACER_MAX_TRACE
5917 mutex_lock(&trace_types_lock
);
5919 if (!ring_buffer_expanded
) {
5920 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
5921 RING_BUFFER_ALL_CPUS
);
5927 for (t
= trace_types
; t
; t
= t
->next
) {
5928 if (strcmp(t
->name
, buf
) == 0)
5935 if (t
== tr
->current_trace
)
5938 #ifdef CONFIG_TRACER_SNAPSHOT
5939 if (t
->use_max_tr
) {
5940 arch_spin_lock(&tr
->max_lock
);
5941 if (tr
->cond_snapshot
)
5943 arch_spin_unlock(&tr
->max_lock
);
5948 /* Some tracers won't work on kernel command line */
5949 if (system_state
< SYSTEM_RUNNING
&& t
->noboot
) {
5950 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5955 /* Some tracers are only allowed for the top level buffer */
5956 if (!trace_ok_for_array(t
, tr
)) {
5961 /* If trace pipe files are being read, we can't change the tracer */
5962 if (tr
->trace_ref
) {
5967 trace_branch_disable();
5969 tr
->current_trace
->enabled
--;
5971 if (tr
->current_trace
->reset
)
5972 tr
->current_trace
->reset(tr
);
5974 /* Current trace needs to be nop_trace before synchronize_rcu */
5975 tr
->current_trace
= &nop_trace
;
5977 #ifdef CONFIG_TRACER_MAX_TRACE
5978 had_max_tr
= tr
->allocated_snapshot
;
5980 if (had_max_tr
&& !t
->use_max_tr
) {
5982 * We need to make sure that the update_max_tr sees that
5983 * current_trace changed to nop_trace to keep it from
5984 * swapping the buffers after we resize it.
5985 * The update_max_tr is called from interrupts disabled
5986 * so a synchronized_sched() is sufficient.
5993 #ifdef CONFIG_TRACER_MAX_TRACE
5994 if (t
->use_max_tr
&& !had_max_tr
) {
5995 ret
= tracing_alloc_snapshot_instance(tr
);
6002 ret
= tracer_init(t
, tr
);
6007 tr
->current_trace
= t
;
6008 tr
->current_trace
->enabled
++;
6009 trace_branch_enable(tr
);
6011 mutex_unlock(&trace_types_lock
);
6017 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
6018 size_t cnt
, loff_t
*ppos
)
6020 struct trace_array
*tr
= filp
->private_data
;
6021 char buf
[MAX_TRACER_SIZE
+1];
6028 if (cnt
> MAX_TRACER_SIZE
)
6029 cnt
= MAX_TRACER_SIZE
;
6031 if (copy_from_user(buf
, ubuf
, cnt
))
6036 /* strip ending whitespace. */
6037 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
6040 err
= tracing_set_tracer(tr
, buf
);
6050 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
6051 size_t cnt
, loff_t
*ppos
)
6056 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
6057 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
6058 if (r
> sizeof(buf
))
6060 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6064 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
6065 size_t cnt
, loff_t
*ppos
)
6070 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6080 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
6081 size_t cnt
, loff_t
*ppos
)
6083 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
6087 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
6088 size_t cnt
, loff_t
*ppos
)
6090 struct trace_array
*tr
= filp
->private_data
;
6093 mutex_lock(&trace_types_lock
);
6094 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
6098 if (tr
->current_trace
->update_thresh
) {
6099 ret
= tr
->current_trace
->update_thresh(tr
);
6106 mutex_unlock(&trace_types_lock
);
6111 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6114 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
6115 size_t cnt
, loff_t
*ppos
)
6117 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
6121 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
6122 size_t cnt
, loff_t
*ppos
)
6124 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
6129 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
6131 struct trace_array
*tr
= inode
->i_private
;
6132 struct trace_iterator
*iter
;
6135 ret
= tracing_check_open_get_tr(tr
);
6139 mutex_lock(&trace_types_lock
);
6141 /* create a buffer to store the information to pass to userspace */
6142 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
6145 __trace_array_put(tr
);
6149 trace_seq_init(&iter
->seq
);
6150 iter
->trace
= tr
->current_trace
;
6152 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
6157 /* trace pipe does not show start of buffer */
6158 cpumask_setall(iter
->started
);
6160 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
6161 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
6163 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6164 if (trace_clocks
[tr
->clock_id
].in_ns
)
6165 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
6168 iter
->array_buffer
= &tr
->array_buffer
;
6169 iter
->cpu_file
= tracing_get_cpu(inode
);
6170 mutex_init(&iter
->mutex
);
6171 filp
->private_data
= iter
;
6173 if (iter
->trace
->pipe_open
)
6174 iter
->trace
->pipe_open(iter
);
6176 nonseekable_open(inode
, filp
);
6180 mutex_unlock(&trace_types_lock
);
6185 __trace_array_put(tr
);
6186 mutex_unlock(&trace_types_lock
);
6190 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
6192 struct trace_iterator
*iter
= file
->private_data
;
6193 struct trace_array
*tr
= inode
->i_private
;
6195 mutex_lock(&trace_types_lock
);
6199 if (iter
->trace
->pipe_close
)
6200 iter
->trace
->pipe_close(iter
);
6202 mutex_unlock(&trace_types_lock
);
6204 free_cpumask_var(iter
->started
);
6205 mutex_destroy(&iter
->mutex
);
6208 trace_array_put(tr
);
6214 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
6216 struct trace_array
*tr
= iter
->tr
;
6218 /* Iterators are static, they should be filled or empty */
6219 if (trace_buffer_iter(iter
, iter
->cpu_file
))
6220 return EPOLLIN
| EPOLLRDNORM
;
6222 if (tr
->trace_flags
& TRACE_ITER_BLOCK
)
6224 * Always select as readable when in blocking mode
6226 return EPOLLIN
| EPOLLRDNORM
;
6228 return ring_buffer_poll_wait(iter
->array_buffer
->buffer
, iter
->cpu_file
,
6233 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
6235 struct trace_iterator
*iter
= filp
->private_data
;
6237 return trace_poll(iter
, filp
, poll_table
);
6240 /* Must be called with iter->mutex held. */
6241 static int tracing_wait_pipe(struct file
*filp
)
6243 struct trace_iterator
*iter
= filp
->private_data
;
6246 while (trace_empty(iter
)) {
6248 if ((filp
->f_flags
& O_NONBLOCK
)) {
6253 * We block until we read something and tracing is disabled.
6254 * We still block if tracing is disabled, but we have never
6255 * read anything. This allows a user to cat this file, and
6256 * then enable tracing. But after we have read something,
6257 * we give an EOF when tracing is again disabled.
6259 * iter->pos will be 0 if we haven't read anything.
6261 if (!tracer_tracing_is_on(iter
->tr
) && iter
->pos
)
6264 mutex_unlock(&iter
->mutex
);
6266 ret
= wait_on_pipe(iter
, 0);
6268 mutex_lock(&iter
->mutex
);
6281 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
6282 size_t cnt
, loff_t
*ppos
)
6284 struct trace_iterator
*iter
= filp
->private_data
;
6288 * Avoid more than one consumer on a single file descriptor
6289 * This is just a matter of traces coherency, the ring buffer itself
6292 mutex_lock(&iter
->mutex
);
6294 /* return any leftover data */
6295 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
6299 trace_seq_init(&iter
->seq
);
6301 if (iter
->trace
->read
) {
6302 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
6308 sret
= tracing_wait_pipe(filp
);
6312 /* stop when tracing is finished */
6313 if (trace_empty(iter
)) {
6318 if (cnt
>= PAGE_SIZE
)
6319 cnt
= PAGE_SIZE
- 1;
6321 /* reset all but tr, trace, and overruns */
6322 memset(&iter
->seq
, 0,
6323 sizeof(struct trace_iterator
) -
6324 offsetof(struct trace_iterator
, seq
));
6325 cpumask_clear(iter
->started
);
6326 trace_seq_init(&iter
->seq
);
6329 trace_event_read_lock();
6330 trace_access_lock(iter
->cpu_file
);
6331 while (trace_find_next_entry_inc(iter
) != NULL
) {
6332 enum print_line_t ret
;
6333 int save_len
= iter
->seq
.seq
.len
;
6335 ret
= print_trace_line(iter
);
6336 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
6337 /* don't print partial lines */
6338 iter
->seq
.seq
.len
= save_len
;
6341 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6342 trace_consume(iter
);
6344 if (trace_seq_used(&iter
->seq
) >= cnt
)
6348 * Setting the full flag means we reached the trace_seq buffer
6349 * size and we should leave by partial output condition above.
6350 * One of the trace_seq_* functions is not used properly.
6352 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
6355 trace_access_unlock(iter
->cpu_file
);
6356 trace_event_read_unlock();
6358 /* Now copy what we have to the user */
6359 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
6360 if (iter
->seq
.seq
.readpos
>= trace_seq_used(&iter
->seq
))
6361 trace_seq_init(&iter
->seq
);
6364 * If there was nothing to send to user, in spite of consuming trace
6365 * entries, go back to wait for more entries.
6371 mutex_unlock(&iter
->mutex
);
6376 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
6379 __free_page(spd
->pages
[idx
]);
6383 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
6389 /* Seq buffer is page-sized, exactly what we need. */
6391 save_len
= iter
->seq
.seq
.len
;
6392 ret
= print_trace_line(iter
);
6394 if (trace_seq_has_overflowed(&iter
->seq
)) {
6395 iter
->seq
.seq
.len
= save_len
;
6400 * This should not be hit, because it should only
6401 * be set if the iter->seq overflowed. But check it
6402 * anyway to be safe.
6404 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
6405 iter
->seq
.seq
.len
= save_len
;
6409 count
= trace_seq_used(&iter
->seq
) - save_len
;
6412 iter
->seq
.seq
.len
= save_len
;
6416 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6417 trace_consume(iter
);
6419 if (!trace_find_next_entry_inc(iter
)) {
6429 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
6431 struct pipe_inode_info
*pipe
,
6435 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
6436 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
6437 struct trace_iterator
*iter
= filp
->private_data
;
6438 struct splice_pipe_desc spd
= {
6440 .partial
= partial_def
,
6441 .nr_pages
= 0, /* This gets updated below. */
6442 .nr_pages_max
= PIPE_DEF_BUFFERS
,
6443 .ops
= &default_pipe_buf_ops
,
6444 .spd_release
= tracing_spd_release_pipe
,
6450 if (splice_grow_spd(pipe
, &spd
))
6453 mutex_lock(&iter
->mutex
);
6455 if (iter
->trace
->splice_read
) {
6456 ret
= iter
->trace
->splice_read(iter
, filp
,
6457 ppos
, pipe
, len
, flags
);
6462 ret
= tracing_wait_pipe(filp
);
6466 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
6471 trace_event_read_lock();
6472 trace_access_lock(iter
->cpu_file
);
6474 /* Fill as many pages as possible. */
6475 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
6476 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
6480 rem
= tracing_fill_pipe_page(rem
, iter
);
6482 /* Copy the data into the page, so we can start over. */
6483 ret
= trace_seq_to_buffer(&iter
->seq
,
6484 page_address(spd
.pages
[i
]),
6485 trace_seq_used(&iter
->seq
));
6487 __free_page(spd
.pages
[i
]);
6490 spd
.partial
[i
].offset
= 0;
6491 spd
.partial
[i
].len
= trace_seq_used(&iter
->seq
);
6493 trace_seq_init(&iter
->seq
);
6496 trace_access_unlock(iter
->cpu_file
);
6497 trace_event_read_unlock();
6498 mutex_unlock(&iter
->mutex
);
6503 ret
= splice_to_pipe(pipe
, &spd
);
6507 splice_shrink_spd(&spd
);
6511 mutex_unlock(&iter
->mutex
);
6516 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
6517 size_t cnt
, loff_t
*ppos
)
6519 struct inode
*inode
= file_inode(filp
);
6520 struct trace_array
*tr
= inode
->i_private
;
6521 int cpu
= tracing_get_cpu(inode
);
6526 mutex_lock(&trace_types_lock
);
6528 if (cpu
== RING_BUFFER_ALL_CPUS
) {
6529 int cpu
, buf_size_same
;
6534 /* check if all cpu sizes are same */
6535 for_each_tracing_cpu(cpu
) {
6536 /* fill in the size from first enabled cpu */
6538 size
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->entries
;
6539 if (size
!= per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->entries
) {
6545 if (buf_size_same
) {
6546 if (!ring_buffer_expanded
)
6547 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
6549 trace_buf_size
>> 10);
6551 r
= sprintf(buf
, "%lu\n", size
>> 10);
6553 r
= sprintf(buf
, "X\n");
6555 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->entries
>> 10);
6557 mutex_unlock(&trace_types_lock
);
6559 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6564 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
6565 size_t cnt
, loff_t
*ppos
)
6567 struct inode
*inode
= file_inode(filp
);
6568 struct trace_array
*tr
= inode
->i_private
;
6572 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6576 /* must have at least 1 entry */
6580 /* value is in KB */
6582 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
6592 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
6593 size_t cnt
, loff_t
*ppos
)
6595 struct trace_array
*tr
= filp
->private_data
;
6598 unsigned long size
= 0, expanded_size
= 0;
6600 mutex_lock(&trace_types_lock
);
6601 for_each_tracing_cpu(cpu
) {
6602 size
+= per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->entries
>> 10;
6603 if (!ring_buffer_expanded
)
6604 expanded_size
+= trace_buf_size
>> 10;
6606 if (ring_buffer_expanded
)
6607 r
= sprintf(buf
, "%lu\n", size
);
6609 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
6610 mutex_unlock(&trace_types_lock
);
6612 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6616 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
6617 size_t cnt
, loff_t
*ppos
)
6620 * There is no need to read what the user has written, this function
6621 * is just to make sure that there is no error when "echo" is used
6630 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
6632 struct trace_array
*tr
= inode
->i_private
;
6634 /* disable tracing ? */
6635 if (tr
->trace_flags
& TRACE_ITER_STOP_ON_FREE
)
6636 tracer_tracing_off(tr
);
6637 /* resize the ring buffer to 0 */
6638 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
6640 trace_array_put(tr
);
6646 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
6647 size_t cnt
, loff_t
*fpos
)
6649 struct trace_array
*tr
= filp
->private_data
;
6650 struct ring_buffer_event
*event
;
6651 enum event_trigger_type tt
= ETT_NONE
;
6652 struct trace_buffer
*buffer
;
6653 struct print_entry
*entry
;
6654 unsigned long irq_flags
;
6659 /* Used in tracing_mark_raw_write() as well */
6660 #define FAULTED_STR "<faulted>"
6661 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6663 if (tracing_disabled
)
6666 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6669 if (cnt
> TRACE_BUF_SIZE
)
6670 cnt
= TRACE_BUF_SIZE
;
6672 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6674 local_save_flags(irq_flags
);
6675 size
= sizeof(*entry
) + cnt
+ 2; /* add '\0' and possible '\n' */
6677 /* If less than "<faulted>", then make sure we can still add that */
6678 if (cnt
< FAULTED_SIZE
)
6679 size
+= FAULTED_SIZE
- cnt
;
6681 buffer
= tr
->array_buffer
.buffer
;
6682 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
6683 irq_flags
, preempt_count());
6684 if (unlikely(!event
))
6685 /* Ring buffer disabled, return as if not open for write */
6688 entry
= ring_buffer_event_data(event
);
6689 entry
->ip
= _THIS_IP_
;
6691 len
= __copy_from_user_inatomic(&entry
->buf
, ubuf
, cnt
);
6693 memcpy(&entry
->buf
, FAULTED_STR
, FAULTED_SIZE
);
6699 if (tr
->trace_marker_file
&& !list_empty(&tr
->trace_marker_file
->triggers
)) {
6700 /* do not add \n before testing triggers, but add \0 */
6701 entry
->buf
[cnt
] = '\0';
6702 tt
= event_triggers_call(tr
->trace_marker_file
, entry
, event
);
6705 if (entry
->buf
[cnt
- 1] != '\n') {
6706 entry
->buf
[cnt
] = '\n';
6707 entry
->buf
[cnt
+ 1] = '\0';
6709 entry
->buf
[cnt
] = '\0';
6711 if (static_branch_unlikely(&trace_marker_exports_enabled
))
6712 ftrace_exports(event
, TRACE_EXPORT_MARKER
);
6713 __buffer_unlock_commit(buffer
, event
);
6716 event_triggers_post_call(tr
->trace_marker_file
, tt
);
6724 /* Limit it for now to 3K (including tag) */
6725 #define RAW_DATA_MAX_SIZE (1024*3)
6728 tracing_mark_raw_write(struct file
*filp
, const char __user
*ubuf
,
6729 size_t cnt
, loff_t
*fpos
)
6731 struct trace_array
*tr
= filp
->private_data
;
6732 struct ring_buffer_event
*event
;
6733 struct trace_buffer
*buffer
;
6734 struct raw_data_entry
*entry
;
6735 unsigned long irq_flags
;
6740 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6742 if (tracing_disabled
)
6745 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6748 /* The marker must at least have a tag id */
6749 if (cnt
< sizeof(unsigned int) || cnt
> RAW_DATA_MAX_SIZE
)
6752 if (cnt
> TRACE_BUF_SIZE
)
6753 cnt
= TRACE_BUF_SIZE
;
6755 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6757 local_save_flags(irq_flags
);
6758 size
= sizeof(*entry
) + cnt
;
6759 if (cnt
< FAULT_SIZE_ID
)
6760 size
+= FAULT_SIZE_ID
- cnt
;
6762 buffer
= tr
->array_buffer
.buffer
;
6763 event
= __trace_buffer_lock_reserve(buffer
, TRACE_RAW_DATA
, size
,
6764 irq_flags
, preempt_count());
6766 /* Ring buffer disabled, return as if not open for write */
6769 entry
= ring_buffer_event_data(event
);
6771 len
= __copy_from_user_inatomic(&entry
->id
, ubuf
, cnt
);
6774 memcpy(&entry
->buf
, FAULTED_STR
, FAULTED_SIZE
);
6779 __buffer_unlock_commit(buffer
, event
);
6787 static int tracing_clock_show(struct seq_file
*m
, void *v
)
6789 struct trace_array
*tr
= m
->private;
6792 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
6794 "%s%s%s%s", i
? " " : "",
6795 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
6796 i
== tr
->clock_id
? "]" : "");
6802 int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
6806 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
6807 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
6810 if (i
== ARRAY_SIZE(trace_clocks
))
6813 mutex_lock(&trace_types_lock
);
6817 ring_buffer_set_clock(tr
->array_buffer
.buffer
, trace_clocks
[i
].func
);
6820 * New clock may not be consistent with the previous clock.
6821 * Reset the buffer so that it doesn't have incomparable timestamps.
6823 tracing_reset_online_cpus(&tr
->array_buffer
);
6825 #ifdef CONFIG_TRACER_MAX_TRACE
6826 if (tr
->max_buffer
.buffer
)
6827 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
6828 tracing_reset_online_cpus(&tr
->max_buffer
);
6831 mutex_unlock(&trace_types_lock
);
6836 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
6837 size_t cnt
, loff_t
*fpos
)
6839 struct seq_file
*m
= filp
->private_data
;
6840 struct trace_array
*tr
= m
->private;
6842 const char *clockstr
;
6845 if (cnt
>= sizeof(buf
))
6848 if (copy_from_user(buf
, ubuf
, cnt
))
6853 clockstr
= strstrip(buf
);
6855 ret
= tracing_set_clock(tr
, clockstr
);
6864 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
6866 struct trace_array
*tr
= inode
->i_private
;
6869 ret
= tracing_check_open_get_tr(tr
);
6873 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
6875 trace_array_put(tr
);
6880 static int tracing_time_stamp_mode_show(struct seq_file
*m
, void *v
)
6882 struct trace_array
*tr
= m
->private;
6884 mutex_lock(&trace_types_lock
);
6886 if (ring_buffer_time_stamp_abs(tr
->array_buffer
.buffer
))
6887 seq_puts(m
, "delta [absolute]\n");
6889 seq_puts(m
, "[delta] absolute\n");
6891 mutex_unlock(&trace_types_lock
);
6896 static int tracing_time_stamp_mode_open(struct inode
*inode
, struct file
*file
)
6898 struct trace_array
*tr
= inode
->i_private
;
6901 ret
= tracing_check_open_get_tr(tr
);
6905 ret
= single_open(file
, tracing_time_stamp_mode_show
, inode
->i_private
);
6907 trace_array_put(tr
);
6912 int tracing_set_time_stamp_abs(struct trace_array
*tr
, bool abs
)
6916 mutex_lock(&trace_types_lock
);
6918 if (abs
&& tr
->time_stamp_abs_ref
++)
6922 if (WARN_ON_ONCE(!tr
->time_stamp_abs_ref
)) {
6927 if (--tr
->time_stamp_abs_ref
)
6931 ring_buffer_set_time_stamp_abs(tr
->array_buffer
.buffer
, abs
);
6933 #ifdef CONFIG_TRACER_MAX_TRACE
6934 if (tr
->max_buffer
.buffer
)
6935 ring_buffer_set_time_stamp_abs(tr
->max_buffer
.buffer
, abs
);
6938 mutex_unlock(&trace_types_lock
);
6943 struct ftrace_buffer_info
{
6944 struct trace_iterator iter
;
6946 unsigned int spare_cpu
;
6950 #ifdef CONFIG_TRACER_SNAPSHOT
6951 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
6953 struct trace_array
*tr
= inode
->i_private
;
6954 struct trace_iterator
*iter
;
6958 ret
= tracing_check_open_get_tr(tr
);
6962 if (file
->f_mode
& FMODE_READ
) {
6963 iter
= __tracing_open(inode
, file
, true);
6965 ret
= PTR_ERR(iter
);
6967 /* Writes still need the seq_file to hold the private data */
6969 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
6972 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
6980 iter
->array_buffer
= &tr
->max_buffer
;
6981 iter
->cpu_file
= tracing_get_cpu(inode
);
6983 file
->private_data
= m
;
6987 trace_array_put(tr
);
6993 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6996 struct seq_file
*m
= filp
->private_data
;
6997 struct trace_iterator
*iter
= m
->private;
6998 struct trace_array
*tr
= iter
->tr
;
7002 ret
= tracing_update_buffers();
7006 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7010 mutex_lock(&trace_types_lock
);
7012 if (tr
->current_trace
->use_max_tr
) {
7017 arch_spin_lock(&tr
->max_lock
);
7018 if (tr
->cond_snapshot
)
7020 arch_spin_unlock(&tr
->max_lock
);
7026 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
7030 if (tr
->allocated_snapshot
)
7034 /* Only allow per-cpu swap if the ring buffer supports it */
7035 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7036 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
7041 if (tr
->allocated_snapshot
)
7042 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
7043 &tr
->array_buffer
, iter
->cpu_file
);
7045 ret
= tracing_alloc_snapshot_instance(tr
);
7048 local_irq_disable();
7049 /* Now, we're going to swap */
7050 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
7051 update_max_tr(tr
, current
, smp_processor_id(), NULL
);
7053 update_max_tr_single(tr
, current
, iter
->cpu_file
);
7057 if (tr
->allocated_snapshot
) {
7058 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
7059 tracing_reset_online_cpus(&tr
->max_buffer
);
7061 tracing_reset_cpu(&tr
->max_buffer
, iter
->cpu_file
);
7071 mutex_unlock(&trace_types_lock
);
7075 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
7077 struct seq_file
*m
= file
->private_data
;
7080 ret
= tracing_release(inode
, file
);
7082 if (file
->f_mode
& FMODE_READ
)
7085 /* If write only, the seq_file is just a stub */
7093 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
7094 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
7095 size_t count
, loff_t
*ppos
);
7096 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
7097 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
7098 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
7100 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
7102 struct ftrace_buffer_info
*info
;
7105 /* The following checks for tracefs lockdown */
7106 ret
= tracing_buffers_open(inode
, filp
);
7110 info
= filp
->private_data
;
7112 if (info
->iter
.trace
->use_max_tr
) {
7113 tracing_buffers_release(inode
, filp
);
7117 info
->iter
.snapshot
= true;
7118 info
->iter
.array_buffer
= &info
->iter
.tr
->max_buffer
;
7123 #endif /* CONFIG_TRACER_SNAPSHOT */
7126 static const struct file_operations tracing_thresh_fops
= {
7127 .open
= tracing_open_generic
,
7128 .read
= tracing_thresh_read
,
7129 .write
= tracing_thresh_write
,
7130 .llseek
= generic_file_llseek
,
7133 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7134 static const struct file_operations tracing_max_lat_fops
= {
7135 .open
= tracing_open_generic
,
7136 .read
= tracing_max_lat_read
,
7137 .write
= tracing_max_lat_write
,
7138 .llseek
= generic_file_llseek
,
7142 static const struct file_operations set_tracer_fops
= {
7143 .open
= tracing_open_generic
,
7144 .read
= tracing_set_trace_read
,
7145 .write
= tracing_set_trace_write
,
7146 .llseek
= generic_file_llseek
,
7149 static const struct file_operations tracing_pipe_fops
= {
7150 .open
= tracing_open_pipe
,
7151 .poll
= tracing_poll_pipe
,
7152 .read
= tracing_read_pipe
,
7153 .splice_read
= tracing_splice_read_pipe
,
7154 .release
= tracing_release_pipe
,
7155 .llseek
= no_llseek
,
7158 static const struct file_operations tracing_entries_fops
= {
7159 .open
= tracing_open_generic_tr
,
7160 .read
= tracing_entries_read
,
7161 .write
= tracing_entries_write
,
7162 .llseek
= generic_file_llseek
,
7163 .release
= tracing_release_generic_tr
,
7166 static const struct file_operations tracing_total_entries_fops
= {
7167 .open
= tracing_open_generic_tr
,
7168 .read
= tracing_total_entries_read
,
7169 .llseek
= generic_file_llseek
,
7170 .release
= tracing_release_generic_tr
,
7173 static const struct file_operations tracing_free_buffer_fops
= {
7174 .open
= tracing_open_generic_tr
,
7175 .write
= tracing_free_buffer_write
,
7176 .release
= tracing_free_buffer_release
,
7179 static const struct file_operations tracing_mark_fops
= {
7180 .open
= tracing_open_generic_tr
,
7181 .write
= tracing_mark_write
,
7182 .llseek
= generic_file_llseek
,
7183 .release
= tracing_release_generic_tr
,
7186 static const struct file_operations tracing_mark_raw_fops
= {
7187 .open
= tracing_open_generic_tr
,
7188 .write
= tracing_mark_raw_write
,
7189 .llseek
= generic_file_llseek
,
7190 .release
= tracing_release_generic_tr
,
7193 static const struct file_operations trace_clock_fops
= {
7194 .open
= tracing_clock_open
,
7196 .llseek
= seq_lseek
,
7197 .release
= tracing_single_release_tr
,
7198 .write
= tracing_clock_write
,
7201 static const struct file_operations trace_time_stamp_mode_fops
= {
7202 .open
= tracing_time_stamp_mode_open
,
7204 .llseek
= seq_lseek
,
7205 .release
= tracing_single_release_tr
,
7208 #ifdef CONFIG_TRACER_SNAPSHOT
7209 static const struct file_operations snapshot_fops
= {
7210 .open
= tracing_snapshot_open
,
7212 .write
= tracing_snapshot_write
,
7213 .llseek
= tracing_lseek
,
7214 .release
= tracing_snapshot_release
,
7217 static const struct file_operations snapshot_raw_fops
= {
7218 .open
= snapshot_raw_open
,
7219 .read
= tracing_buffers_read
,
7220 .release
= tracing_buffers_release
,
7221 .splice_read
= tracing_buffers_splice_read
,
7222 .llseek
= no_llseek
,
7225 #endif /* CONFIG_TRACER_SNAPSHOT */
7227 #define TRACING_LOG_ERRS_MAX 8
7228 #define TRACING_LOG_LOC_MAX 128
7230 #define CMD_PREFIX " Command: "
7233 const char **errs
; /* ptr to loc-specific array of err strings */
7234 u8 type
; /* index into errs -> specific err string */
7235 u8 pos
; /* MAX_FILTER_STR_VAL = 256 */
7239 struct tracing_log_err
{
7240 struct list_head list
;
7241 struct err_info info
;
7242 char loc
[TRACING_LOG_LOC_MAX
]; /* err location */
7243 char cmd
[MAX_FILTER_STR_VAL
]; /* what caused err */
7246 static DEFINE_MUTEX(tracing_err_log_lock
);
7248 static struct tracing_log_err
*get_tracing_log_err(struct trace_array
*tr
)
7250 struct tracing_log_err
*err
;
7252 if (tr
->n_err_log_entries
< TRACING_LOG_ERRS_MAX
) {
7253 err
= kzalloc(sizeof(*err
), GFP_KERNEL
);
7255 err
= ERR_PTR(-ENOMEM
);
7256 tr
->n_err_log_entries
++;
7261 err
= list_first_entry(&tr
->err_log
, struct tracing_log_err
, list
);
7262 list_del(&err
->list
);
7268 * err_pos - find the position of a string within a command for error careting
7269 * @cmd: The tracing command that caused the error
7270 * @str: The string to position the caret at within @cmd
7272 * Finds the position of the first occurence of @str within @cmd. The
7273 * return value can be passed to tracing_log_err() for caret placement
7276 * Returns the index within @cmd of the first occurence of @str or 0
7277 * if @str was not found.
7279 unsigned int err_pos(char *cmd
, const char *str
)
7283 if (WARN_ON(!strlen(cmd
)))
7286 found
= strstr(cmd
, str
);
7294 * tracing_log_err - write an error to the tracing error log
7295 * @tr: The associated trace array for the error (NULL for top level array)
7296 * @loc: A string describing where the error occurred
7297 * @cmd: The tracing command that caused the error
7298 * @errs: The array of loc-specific static error strings
7299 * @type: The index into errs[], which produces the specific static err string
7300 * @pos: The position the caret should be placed in the cmd
7302 * Writes an error into tracing/error_log of the form:
7304 * <loc>: error: <text>
7308 * tracing/error_log is a small log file containing the last
7309 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7310 * unless there has been a tracing error, and the error log can be
7311 * cleared and have its memory freed by writing the empty string in
7312 * truncation mode to it i.e. echo > tracing/error_log.
7314 * NOTE: the @errs array along with the @type param are used to
7315 * produce a static error string - this string is not copied and saved
7316 * when the error is logged - only a pointer to it is saved. See
7317 * existing callers for examples of how static strings are typically
7318 * defined for use with tracing_log_err().
7320 void tracing_log_err(struct trace_array
*tr
,
7321 const char *loc
, const char *cmd
,
7322 const char **errs
, u8 type
, u8 pos
)
7324 struct tracing_log_err
*err
;
7329 mutex_lock(&tracing_err_log_lock
);
7330 err
= get_tracing_log_err(tr
);
7331 if (PTR_ERR(err
) == -ENOMEM
) {
7332 mutex_unlock(&tracing_err_log_lock
);
7336 snprintf(err
->loc
, TRACING_LOG_LOC_MAX
, "%s: error: ", loc
);
7337 snprintf(err
->cmd
, MAX_FILTER_STR_VAL
,"\n" CMD_PREFIX
"%s\n", cmd
);
7339 err
->info
.errs
= errs
;
7340 err
->info
.type
= type
;
7341 err
->info
.pos
= pos
;
7342 err
->info
.ts
= local_clock();
7344 list_add_tail(&err
->list
, &tr
->err_log
);
7345 mutex_unlock(&tracing_err_log_lock
);
7348 static void clear_tracing_err_log(struct trace_array
*tr
)
7350 struct tracing_log_err
*err
, *next
;
7352 mutex_lock(&tracing_err_log_lock
);
7353 list_for_each_entry_safe(err
, next
, &tr
->err_log
, list
) {
7354 list_del(&err
->list
);
7358 tr
->n_err_log_entries
= 0;
7359 mutex_unlock(&tracing_err_log_lock
);
7362 static void *tracing_err_log_seq_start(struct seq_file
*m
, loff_t
*pos
)
7364 struct trace_array
*tr
= m
->private;
7366 mutex_lock(&tracing_err_log_lock
);
7368 return seq_list_start(&tr
->err_log
, *pos
);
7371 static void *tracing_err_log_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
7373 struct trace_array
*tr
= m
->private;
7375 return seq_list_next(v
, &tr
->err_log
, pos
);
7378 static void tracing_err_log_seq_stop(struct seq_file
*m
, void *v
)
7380 mutex_unlock(&tracing_err_log_lock
);
7383 static void tracing_err_log_show_pos(struct seq_file
*m
, u8 pos
)
7387 for (i
= 0; i
< sizeof(CMD_PREFIX
) - 1; i
++)
7389 for (i
= 0; i
< pos
; i
++)
7394 static int tracing_err_log_seq_show(struct seq_file
*m
, void *v
)
7396 struct tracing_log_err
*err
= v
;
7399 const char *err_text
= err
->info
.errs
[err
->info
.type
];
7400 u64 sec
= err
->info
.ts
;
7403 nsec
= do_div(sec
, NSEC_PER_SEC
);
7404 seq_printf(m
, "[%5llu.%06u] %s%s", sec
, nsec
/ 1000,
7405 err
->loc
, err_text
);
7406 seq_printf(m
, "%s", err
->cmd
);
7407 tracing_err_log_show_pos(m
, err
->info
.pos
);
7413 static const struct seq_operations tracing_err_log_seq_ops
= {
7414 .start
= tracing_err_log_seq_start
,
7415 .next
= tracing_err_log_seq_next
,
7416 .stop
= tracing_err_log_seq_stop
,
7417 .show
= tracing_err_log_seq_show
7420 static int tracing_err_log_open(struct inode
*inode
, struct file
*file
)
7422 struct trace_array
*tr
= inode
->i_private
;
7425 ret
= tracing_check_open_get_tr(tr
);
7429 /* If this file was opened for write, then erase contents */
7430 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
))
7431 clear_tracing_err_log(tr
);
7433 if (file
->f_mode
& FMODE_READ
) {
7434 ret
= seq_open(file
, &tracing_err_log_seq_ops
);
7436 struct seq_file
*m
= file
->private_data
;
7439 trace_array_put(tr
);
7445 static ssize_t
tracing_err_log_write(struct file
*file
,
7446 const char __user
*buffer
,
7447 size_t count
, loff_t
*ppos
)
7452 static int tracing_err_log_release(struct inode
*inode
, struct file
*file
)
7454 struct trace_array
*tr
= inode
->i_private
;
7456 trace_array_put(tr
);
7458 if (file
->f_mode
& FMODE_READ
)
7459 seq_release(inode
, file
);
7464 static const struct file_operations tracing_err_log_fops
= {
7465 .open
= tracing_err_log_open
,
7466 .write
= tracing_err_log_write
,
7468 .llseek
= seq_lseek
,
7469 .release
= tracing_err_log_release
,
7472 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
7474 struct trace_array
*tr
= inode
->i_private
;
7475 struct ftrace_buffer_info
*info
;
7478 ret
= tracing_check_open_get_tr(tr
);
7482 info
= kvzalloc(sizeof(*info
), GFP_KERNEL
);
7484 trace_array_put(tr
);
7488 mutex_lock(&trace_types_lock
);
7491 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
7492 info
->iter
.trace
= tr
->current_trace
;
7493 info
->iter
.array_buffer
= &tr
->array_buffer
;
7495 /* Force reading ring buffer for first read */
7496 info
->read
= (unsigned int)-1;
7498 filp
->private_data
= info
;
7502 mutex_unlock(&trace_types_lock
);
7504 ret
= nonseekable_open(inode
, filp
);
7506 trace_array_put(tr
);
7512 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
7514 struct ftrace_buffer_info
*info
= filp
->private_data
;
7515 struct trace_iterator
*iter
= &info
->iter
;
7517 return trace_poll(iter
, filp
, poll_table
);
7521 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
7522 size_t count
, loff_t
*ppos
)
7524 struct ftrace_buffer_info
*info
= filp
->private_data
;
7525 struct trace_iterator
*iter
= &info
->iter
;
7532 #ifdef CONFIG_TRACER_MAX_TRACE
7533 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
7538 info
->spare
= ring_buffer_alloc_read_page(iter
->array_buffer
->buffer
,
7540 if (IS_ERR(info
->spare
)) {
7541 ret
= PTR_ERR(info
->spare
);
7544 info
->spare_cpu
= iter
->cpu_file
;
7550 /* Do we have previous read data to read? */
7551 if (info
->read
< PAGE_SIZE
)
7555 trace_access_lock(iter
->cpu_file
);
7556 ret
= ring_buffer_read_page(iter
->array_buffer
->buffer
,
7560 trace_access_unlock(iter
->cpu_file
);
7563 if (trace_empty(iter
)) {
7564 if ((filp
->f_flags
& O_NONBLOCK
))
7567 ret
= wait_on_pipe(iter
, 0);
7578 size
= PAGE_SIZE
- info
->read
;
7582 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
7594 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
7596 struct ftrace_buffer_info
*info
= file
->private_data
;
7597 struct trace_iterator
*iter
= &info
->iter
;
7599 mutex_lock(&trace_types_lock
);
7601 iter
->tr
->trace_ref
--;
7603 __trace_array_put(iter
->tr
);
7606 ring_buffer_free_read_page(iter
->array_buffer
->buffer
,
7607 info
->spare_cpu
, info
->spare
);
7610 mutex_unlock(&trace_types_lock
);
7616 struct trace_buffer
*buffer
;
7619 refcount_t refcount
;
7622 static void buffer_ref_release(struct buffer_ref
*ref
)
7624 if (!refcount_dec_and_test(&ref
->refcount
))
7626 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
7630 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
7631 struct pipe_buffer
*buf
)
7633 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
7635 buffer_ref_release(ref
);
7639 static bool buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
7640 struct pipe_buffer
*buf
)
7642 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
7644 if (refcount_read(&ref
->refcount
) > INT_MAX
/2)
7647 refcount_inc(&ref
->refcount
);
7651 /* Pipe buffer operations for a buffer. */
7652 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
7653 .release
= buffer_pipe_buf_release
,
7654 .get
= buffer_pipe_buf_get
,
7658 * Callback from splice_to_pipe(), if we need to release some pages
7659 * at the end of the spd in case we error'ed out in filling the pipe.
7661 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
7663 struct buffer_ref
*ref
=
7664 (struct buffer_ref
*)spd
->partial
[i
].private;
7666 buffer_ref_release(ref
);
7667 spd
->partial
[i
].private = 0;
7671 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
7672 struct pipe_inode_info
*pipe
, size_t len
,
7675 struct ftrace_buffer_info
*info
= file
->private_data
;
7676 struct trace_iterator
*iter
= &info
->iter
;
7677 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
7678 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
7679 struct splice_pipe_desc spd
= {
7681 .partial
= partial_def
,
7682 .nr_pages_max
= PIPE_DEF_BUFFERS
,
7683 .ops
= &buffer_pipe_buf_ops
,
7684 .spd_release
= buffer_spd_release
,
7686 struct buffer_ref
*ref
;
7690 #ifdef CONFIG_TRACER_MAX_TRACE
7691 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
7695 if (*ppos
& (PAGE_SIZE
- 1))
7698 if (len
& (PAGE_SIZE
- 1)) {
7699 if (len
< PAGE_SIZE
)
7704 if (splice_grow_spd(pipe
, &spd
))
7708 trace_access_lock(iter
->cpu_file
);
7709 entries
= ring_buffer_entries_cpu(iter
->array_buffer
->buffer
, iter
->cpu_file
);
7711 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
7715 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
7721 refcount_set(&ref
->refcount
, 1);
7722 ref
->buffer
= iter
->array_buffer
->buffer
;
7723 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
7724 if (IS_ERR(ref
->page
)) {
7725 ret
= PTR_ERR(ref
->page
);
7730 ref
->cpu
= iter
->cpu_file
;
7732 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
7733 len
, iter
->cpu_file
, 1);
7735 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
,
7741 page
= virt_to_page(ref
->page
);
7743 spd
.pages
[i
] = page
;
7744 spd
.partial
[i
].len
= PAGE_SIZE
;
7745 spd
.partial
[i
].offset
= 0;
7746 spd
.partial
[i
].private = (unsigned long)ref
;
7750 entries
= ring_buffer_entries_cpu(iter
->array_buffer
->buffer
, iter
->cpu_file
);
7753 trace_access_unlock(iter
->cpu_file
);
7756 /* did we read anything? */
7757 if (!spd
.nr_pages
) {
7762 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
))
7765 ret
= wait_on_pipe(iter
, iter
->tr
->buffer_percent
);
7772 ret
= splice_to_pipe(pipe
, &spd
);
7774 splice_shrink_spd(&spd
);
7779 static const struct file_operations tracing_buffers_fops
= {
7780 .open
= tracing_buffers_open
,
7781 .read
= tracing_buffers_read
,
7782 .poll
= tracing_buffers_poll
,
7783 .release
= tracing_buffers_release
,
7784 .splice_read
= tracing_buffers_splice_read
,
7785 .llseek
= no_llseek
,
7789 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
7790 size_t count
, loff_t
*ppos
)
7792 struct inode
*inode
= file_inode(filp
);
7793 struct trace_array
*tr
= inode
->i_private
;
7794 struct array_buffer
*trace_buf
= &tr
->array_buffer
;
7795 int cpu
= tracing_get_cpu(inode
);
7796 struct trace_seq
*s
;
7798 unsigned long long t
;
7799 unsigned long usec_rem
;
7801 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
7807 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
7808 trace_seq_printf(s
, "entries: %ld\n", cnt
);
7810 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
7811 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
7813 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
7814 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
7816 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
7817 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
7819 if (trace_clocks
[tr
->clock_id
].in_ns
) {
7820 /* local or global for trace_clock */
7821 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
7822 usec_rem
= do_div(t
, USEC_PER_SEC
);
7823 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
7826 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
7827 usec_rem
= do_div(t
, USEC_PER_SEC
);
7828 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
7830 /* counter or tsc mode for trace_clock */
7831 trace_seq_printf(s
, "oldest event ts: %llu\n",
7832 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
7834 trace_seq_printf(s
, "now ts: %llu\n",
7835 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
7838 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
7839 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
7841 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
7842 trace_seq_printf(s
, "read events: %ld\n", cnt
);
7844 count
= simple_read_from_buffer(ubuf
, count
, ppos
,
7845 s
->buffer
, trace_seq_used(s
));
7852 static const struct file_operations tracing_stats_fops
= {
7853 .open
= tracing_open_generic_tr
,
7854 .read
= tracing_stats_read
,
7855 .llseek
= generic_file_llseek
,
7856 .release
= tracing_release_generic_tr
,
7859 #ifdef CONFIG_DYNAMIC_FTRACE
7862 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
7863 size_t cnt
, loff_t
*ppos
)
7869 /* 256 should be plenty to hold the amount needed */
7870 buf
= kmalloc(256, GFP_KERNEL
);
7874 r
= scnprintf(buf
, 256, "%ld pages:%ld groups: %ld\n",
7875 ftrace_update_tot_cnt
,
7876 ftrace_number_of_pages
,
7877 ftrace_number_of_groups
);
7879 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7884 static const struct file_operations tracing_dyn_info_fops
= {
7885 .open
= tracing_open_generic
,
7886 .read
= tracing_read_dyn_info
,
7887 .llseek
= generic_file_llseek
,
7889 #endif /* CONFIG_DYNAMIC_FTRACE */
7891 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7893 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
,
7894 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
7897 tracing_snapshot_instance(tr
);
7901 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
,
7902 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
7905 struct ftrace_func_mapper
*mapper
= data
;
7909 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7919 tracing_snapshot_instance(tr
);
7923 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
7924 struct ftrace_probe_ops
*ops
, void *data
)
7926 struct ftrace_func_mapper
*mapper
= data
;
7929 seq_printf(m
, "%ps:", (void *)ip
);
7931 seq_puts(m
, "snapshot");
7934 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7937 seq_printf(m
, ":count=%ld\n", *count
);
7939 seq_puts(m
, ":unlimited\n");
7945 ftrace_snapshot_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7946 unsigned long ip
, void *init_data
, void **data
)
7948 struct ftrace_func_mapper
*mapper
= *data
;
7951 mapper
= allocate_ftrace_func_mapper();
7957 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
7961 ftrace_snapshot_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7962 unsigned long ip
, void *data
)
7964 struct ftrace_func_mapper
*mapper
= data
;
7969 free_ftrace_func_mapper(mapper
, NULL
);
7973 ftrace_func_mapper_remove_ip(mapper
, ip
);
7976 static struct ftrace_probe_ops snapshot_probe_ops
= {
7977 .func
= ftrace_snapshot
,
7978 .print
= ftrace_snapshot_print
,
7981 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
7982 .func
= ftrace_count_snapshot
,
7983 .print
= ftrace_snapshot_print
,
7984 .init
= ftrace_snapshot_init
,
7985 .free
= ftrace_snapshot_free
,
7989 ftrace_trace_snapshot_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
7990 char *glob
, char *cmd
, char *param
, int enable
)
7992 struct ftrace_probe_ops
*ops
;
7993 void *count
= (void *)-1;
8000 /* hash funcs only work with set_ftrace_filter */
8004 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
8007 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
8012 number
= strsep(¶m
, ":");
8014 if (!strlen(number
))
8018 * We use the callback data field (which is a pointer)
8021 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
8026 ret
= tracing_alloc_snapshot_instance(tr
);
8030 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
8033 return ret
< 0 ? ret
: 0;
8036 static struct ftrace_func_command ftrace_snapshot_cmd
= {
8038 .func
= ftrace_trace_snapshot_callback
,
8041 static __init
int register_snapshot_cmd(void)
8043 return register_ftrace_command(&ftrace_snapshot_cmd
);
8046 static inline __init
int register_snapshot_cmd(void) { return 0; }
8047 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8049 static struct dentry
*tracing_get_dentry(struct trace_array
*tr
)
8051 if (WARN_ON(!tr
->dir
))
8052 return ERR_PTR(-ENODEV
);
8054 /* Top directory uses NULL as the parent */
8055 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
8058 /* All sub buffers have a descriptor */
8062 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
8064 struct dentry
*d_tracer
;
8067 return tr
->percpu_dir
;
8069 d_tracer
= tracing_get_dentry(tr
);
8070 if (IS_ERR(d_tracer
))
8073 tr
->percpu_dir
= tracefs_create_dir("per_cpu", d_tracer
);
8075 MEM_FAIL(!tr
->percpu_dir
,
8076 "Could not create tracefs directory 'per_cpu/%d'\n", cpu
);
8078 return tr
->percpu_dir
;
8081 static struct dentry
*
8082 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
8083 void *data
, long cpu
, const struct file_operations
*fops
)
8085 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
8087 if (ret
) /* See tracing_get_cpu() */
8088 d_inode(ret
)->i_cdev
= (void *)(cpu
+ 1);
8093 tracing_init_tracefs_percpu(struct trace_array
*tr
, long cpu
)
8095 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
8096 struct dentry
*d_cpu
;
8097 char cpu_dir
[30]; /* 30 characters should be more than enough */
8102 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
8103 d_cpu
= tracefs_create_dir(cpu_dir
, d_percpu
);
8105 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir
);
8109 /* per cpu trace_pipe */
8110 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
8111 tr
, cpu
, &tracing_pipe_fops
);
8114 trace_create_cpu_file("trace", 0644, d_cpu
,
8115 tr
, cpu
, &tracing_fops
);
8117 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
8118 tr
, cpu
, &tracing_buffers_fops
);
8120 trace_create_cpu_file("stats", 0444, d_cpu
,
8121 tr
, cpu
, &tracing_stats_fops
);
8123 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
8124 tr
, cpu
, &tracing_entries_fops
);
8126 #ifdef CONFIG_TRACER_SNAPSHOT
8127 trace_create_cpu_file("snapshot", 0644, d_cpu
,
8128 tr
, cpu
, &snapshot_fops
);
8130 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
8131 tr
, cpu
, &snapshot_raw_fops
);
8135 #ifdef CONFIG_FTRACE_SELFTEST
8136 /* Let selftest have access to static functions in this file */
8137 #include "trace_selftest.c"
8141 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
8144 struct trace_option_dentry
*topt
= filp
->private_data
;
8147 if (topt
->flags
->val
& topt
->opt
->bit
)
8152 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
8156 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
8159 struct trace_option_dentry
*topt
= filp
->private_data
;
8163 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
8167 if (val
!= 0 && val
!= 1)
8170 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
8171 mutex_lock(&trace_types_lock
);
8172 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
8174 mutex_unlock(&trace_types_lock
);
8185 static const struct file_operations trace_options_fops
= {
8186 .open
= tracing_open_generic
,
8187 .read
= trace_options_read
,
8188 .write
= trace_options_write
,
8189 .llseek
= generic_file_llseek
,
8193 * In order to pass in both the trace_array descriptor as well as the index
8194 * to the flag that the trace option file represents, the trace_array
8195 * has a character array of trace_flags_index[], which holds the index
8196 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8197 * The address of this character array is passed to the flag option file
8198 * read/write callbacks.
8200 * In order to extract both the index and the trace_array descriptor,
8201 * get_tr_index() uses the following algorithm.
8205 * As the pointer itself contains the address of the index (remember
8208 * Then to get the trace_array descriptor, by subtracting that index
8209 * from the ptr, we get to the start of the index itself.
8211 * ptr - idx == &index[0]
8213 * Then a simple container_of() from that pointer gets us to the
8214 * trace_array descriptor.
8216 static void get_tr_index(void *data
, struct trace_array
**ptr
,
8217 unsigned int *pindex
)
8219 *pindex
= *(unsigned char *)data
;
8221 *ptr
= container_of(data
- *pindex
, struct trace_array
,
8226 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
8229 void *tr_index
= filp
->private_data
;
8230 struct trace_array
*tr
;
8234 get_tr_index(tr_index
, &tr
, &index
);
8236 if (tr
->trace_flags
& (1 << index
))
8241 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
8245 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
8248 void *tr_index
= filp
->private_data
;
8249 struct trace_array
*tr
;
8254 get_tr_index(tr_index
, &tr
, &index
);
8256 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
8260 if (val
!= 0 && val
!= 1)
8263 mutex_lock(&event_mutex
);
8264 mutex_lock(&trace_types_lock
);
8265 ret
= set_tracer_flag(tr
, 1 << index
, val
);
8266 mutex_unlock(&trace_types_lock
);
8267 mutex_unlock(&event_mutex
);
8277 static const struct file_operations trace_options_core_fops
= {
8278 .open
= tracing_open_generic
,
8279 .read
= trace_options_core_read
,
8280 .write
= trace_options_core_write
,
8281 .llseek
= generic_file_llseek
,
8284 struct dentry
*trace_create_file(const char *name
,
8286 struct dentry
*parent
,
8288 const struct file_operations
*fops
)
8292 ret
= tracefs_create_file(name
, mode
, parent
, data
, fops
);
8294 pr_warn("Could not create tracefs '%s' entry\n", name
);
8300 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
8302 struct dentry
*d_tracer
;
8307 d_tracer
= tracing_get_dentry(tr
);
8308 if (IS_ERR(d_tracer
))
8311 tr
->options
= tracefs_create_dir("options", d_tracer
);
8313 pr_warn("Could not create tracefs directory 'options'\n");
8321 create_trace_option_file(struct trace_array
*tr
,
8322 struct trace_option_dentry
*topt
,
8323 struct tracer_flags
*flags
,
8324 struct tracer_opt
*opt
)
8326 struct dentry
*t_options
;
8328 t_options
= trace_options_init_dentry(tr
);
8332 topt
->flags
= flags
;
8336 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
8337 &trace_options_fops
);
8342 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
8344 struct trace_option_dentry
*topts
;
8345 struct trace_options
*tr_topts
;
8346 struct tracer_flags
*flags
;
8347 struct tracer_opt
*opts
;
8354 flags
= tracer
->flags
;
8356 if (!flags
|| !flags
->opts
)
8360 * If this is an instance, only create flags for tracers
8361 * the instance may have.
8363 if (!trace_ok_for_array(tracer
, tr
))
8366 for (i
= 0; i
< tr
->nr_topts
; i
++) {
8367 /* Make sure there's no duplicate flags. */
8368 if (WARN_ON_ONCE(tr
->topts
[i
].tracer
->flags
== tracer
->flags
))
8374 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
8377 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
8381 tr_topts
= krealloc(tr
->topts
, sizeof(*tr
->topts
) * (tr
->nr_topts
+ 1),
8388 tr
->topts
= tr_topts
;
8389 tr
->topts
[tr
->nr_topts
].tracer
= tracer
;
8390 tr
->topts
[tr
->nr_topts
].topts
= topts
;
8393 for (cnt
= 0; opts
[cnt
].name
; cnt
++) {
8394 create_trace_option_file(tr
, &topts
[cnt
], flags
,
8396 MEM_FAIL(topts
[cnt
].entry
== NULL
,
8397 "Failed to create trace option: %s",
8402 static struct dentry
*
8403 create_trace_option_core_file(struct trace_array
*tr
,
8404 const char *option
, long index
)
8406 struct dentry
*t_options
;
8408 t_options
= trace_options_init_dentry(tr
);
8412 return trace_create_file(option
, 0644, t_options
,
8413 (void *)&tr
->trace_flags_index
[index
],
8414 &trace_options_core_fops
);
8417 static void create_trace_options_dir(struct trace_array
*tr
)
8419 struct dentry
*t_options
;
8420 bool top_level
= tr
== &global_trace
;
8423 t_options
= trace_options_init_dentry(tr
);
8427 for (i
= 0; trace_options
[i
]; i
++) {
8429 !((1 << i
) & TOP_LEVEL_TRACE_FLAGS
))
8430 create_trace_option_core_file(tr
, trace_options
[i
], i
);
8435 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
8436 size_t cnt
, loff_t
*ppos
)
8438 struct trace_array
*tr
= filp
->private_data
;
8442 r
= tracer_tracing_is_on(tr
);
8443 r
= sprintf(buf
, "%d\n", r
);
8445 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
8449 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
8450 size_t cnt
, loff_t
*ppos
)
8452 struct trace_array
*tr
= filp
->private_data
;
8453 struct trace_buffer
*buffer
= tr
->array_buffer
.buffer
;
8457 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
8462 mutex_lock(&trace_types_lock
);
8463 if (!!val
== tracer_tracing_is_on(tr
)) {
8464 val
= 0; /* do nothing */
8466 tracer_tracing_on(tr
);
8467 if (tr
->current_trace
->start
)
8468 tr
->current_trace
->start(tr
);
8470 tracer_tracing_off(tr
);
8471 if (tr
->current_trace
->stop
)
8472 tr
->current_trace
->stop(tr
);
8474 mutex_unlock(&trace_types_lock
);
8482 static const struct file_operations rb_simple_fops
= {
8483 .open
= tracing_open_generic_tr
,
8484 .read
= rb_simple_read
,
8485 .write
= rb_simple_write
,
8486 .release
= tracing_release_generic_tr
,
8487 .llseek
= default_llseek
,
8491 buffer_percent_read(struct file
*filp
, char __user
*ubuf
,
8492 size_t cnt
, loff_t
*ppos
)
8494 struct trace_array
*tr
= filp
->private_data
;
8498 r
= tr
->buffer_percent
;
8499 r
= sprintf(buf
, "%d\n", r
);
8501 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
8505 buffer_percent_write(struct file
*filp
, const char __user
*ubuf
,
8506 size_t cnt
, loff_t
*ppos
)
8508 struct trace_array
*tr
= filp
->private_data
;
8512 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
8522 tr
->buffer_percent
= val
;
8529 static const struct file_operations buffer_percent_fops
= {
8530 .open
= tracing_open_generic_tr
,
8531 .read
= buffer_percent_read
,
8532 .write
= buffer_percent_write
,
8533 .release
= tracing_release_generic_tr
,
8534 .llseek
= default_llseek
,
8537 static struct dentry
*trace_instance_dir
;
8540 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
8543 allocate_trace_buffer(struct trace_array
*tr
, struct array_buffer
*buf
, int size
)
8545 enum ring_buffer_flags rb_flags
;
8547 rb_flags
= tr
->trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
8551 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
8555 buf
->data
= alloc_percpu(struct trace_array_cpu
);
8557 ring_buffer_free(buf
->buffer
);
8562 /* Allocate the first page for all buffers */
8563 set_buffer_entries(&tr
->array_buffer
,
8564 ring_buffer_size(tr
->array_buffer
.buffer
, 0));
8569 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
8573 ret
= allocate_trace_buffer(tr
, &tr
->array_buffer
, size
);
8577 #ifdef CONFIG_TRACER_MAX_TRACE
8578 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
8579 allocate_snapshot
? size
: 1);
8580 if (MEM_FAIL(ret
, "Failed to allocate trace buffer\n")) {
8581 ring_buffer_free(tr
->array_buffer
.buffer
);
8582 tr
->array_buffer
.buffer
= NULL
;
8583 free_percpu(tr
->array_buffer
.data
);
8584 tr
->array_buffer
.data
= NULL
;
8587 tr
->allocated_snapshot
= allocate_snapshot
;
8590 * Only the top level trace array gets its snapshot allocated
8591 * from the kernel command line.
8593 allocate_snapshot
= false;
8599 static void free_trace_buffer(struct array_buffer
*buf
)
8602 ring_buffer_free(buf
->buffer
);
8604 free_percpu(buf
->data
);
8609 static void free_trace_buffers(struct trace_array
*tr
)
8614 free_trace_buffer(&tr
->array_buffer
);
8616 #ifdef CONFIG_TRACER_MAX_TRACE
8617 free_trace_buffer(&tr
->max_buffer
);
8621 static void init_trace_flags_index(struct trace_array
*tr
)
8625 /* Used by the trace options files */
8626 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++)
8627 tr
->trace_flags_index
[i
] = i
;
8630 static void __update_tracer_options(struct trace_array
*tr
)
8634 for (t
= trace_types
; t
; t
= t
->next
)
8635 add_tracer_options(tr
, t
);
8638 static void update_tracer_options(struct trace_array
*tr
)
8640 mutex_lock(&trace_types_lock
);
8641 __update_tracer_options(tr
);
8642 mutex_unlock(&trace_types_lock
);
8645 /* Must have trace_types_lock held */
8646 struct trace_array
*trace_array_find(const char *instance
)
8648 struct trace_array
*tr
, *found
= NULL
;
8650 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
8651 if (tr
->name
&& strcmp(tr
->name
, instance
) == 0) {
8660 struct trace_array
*trace_array_find_get(const char *instance
)
8662 struct trace_array
*tr
;
8664 mutex_lock(&trace_types_lock
);
8665 tr
= trace_array_find(instance
);
8668 mutex_unlock(&trace_types_lock
);
8673 static int trace_array_create_dir(struct trace_array
*tr
)
8677 tr
->dir
= tracefs_create_dir(tr
->name
, trace_instance_dir
);
8681 ret
= event_trace_add_tracer(tr
->dir
, tr
);
8683 tracefs_remove(tr
->dir
);
8685 init_tracer_tracefs(tr
, tr
->dir
);
8686 __update_tracer_options(tr
);
8691 static struct trace_array
*trace_array_create(const char *name
)
8693 struct trace_array
*tr
;
8697 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
8699 return ERR_PTR(ret
);
8701 tr
->name
= kstrdup(name
, GFP_KERNEL
);
8705 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
8708 tr
->trace_flags
= global_trace
.trace_flags
& ~ZEROED_TRACE_FLAGS
;
8710 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
8712 raw_spin_lock_init(&tr
->start_lock
);
8714 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
8716 tr
->current_trace
= &nop_trace
;
8718 INIT_LIST_HEAD(&tr
->systems
);
8719 INIT_LIST_HEAD(&tr
->events
);
8720 INIT_LIST_HEAD(&tr
->hist_vars
);
8721 INIT_LIST_HEAD(&tr
->err_log
);
8723 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
8726 if (ftrace_allocate_ftrace_ops(tr
) < 0)
8729 ftrace_init_trace_array(tr
);
8731 init_trace_flags_index(tr
);
8733 if (trace_instance_dir
) {
8734 ret
= trace_array_create_dir(tr
);
8738 __trace_early_add_events(tr
);
8740 list_add(&tr
->list
, &ftrace_trace_arrays
);
8747 ftrace_free_ftrace_ops(tr
);
8748 free_trace_buffers(tr
);
8749 free_cpumask_var(tr
->tracing_cpumask
);
8753 return ERR_PTR(ret
);
8756 static int instance_mkdir(const char *name
)
8758 struct trace_array
*tr
;
8761 mutex_lock(&event_mutex
);
8762 mutex_lock(&trace_types_lock
);
8765 if (trace_array_find(name
))
8768 tr
= trace_array_create(name
);
8770 ret
= PTR_ERR_OR_ZERO(tr
);
8773 mutex_unlock(&trace_types_lock
);
8774 mutex_unlock(&event_mutex
);
8779 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8780 * @name: The name of the trace array to be looked up/created.
8782 * Returns pointer to trace array with given name.
8783 * NULL, if it cannot be created.
8785 * NOTE: This function increments the reference counter associated with the
8786 * trace array returned. This makes sure it cannot be freed while in use.
8787 * Use trace_array_put() once the trace array is no longer needed.
8788 * If the trace_array is to be freed, trace_array_destroy() needs to
8789 * be called after the trace_array_put(), or simply let user space delete
8790 * it from the tracefs instances directory. But until the
8791 * trace_array_put() is called, user space can not delete it.
8794 struct trace_array
*trace_array_get_by_name(const char *name
)
8796 struct trace_array
*tr
;
8798 mutex_lock(&event_mutex
);
8799 mutex_lock(&trace_types_lock
);
8801 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
8802 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
8806 tr
= trace_array_create(name
);
8814 mutex_unlock(&trace_types_lock
);
8815 mutex_unlock(&event_mutex
);
8818 EXPORT_SYMBOL_GPL(trace_array_get_by_name
);
8820 static int __remove_instance(struct trace_array
*tr
)
8824 /* Reference counter for a newly created trace array = 1. */
8825 if (tr
->ref
> 1 || (tr
->current_trace
&& tr
->trace_ref
))
8828 list_del(&tr
->list
);
8830 /* Disable all the flags that were enabled coming in */
8831 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++) {
8832 if ((1 << i
) & ZEROED_TRACE_FLAGS
)
8833 set_tracer_flag(tr
, 1 << i
, 0);
8836 tracing_set_nop(tr
);
8837 clear_ftrace_function_probes(tr
);
8838 event_trace_del_tracer(tr
);
8839 ftrace_clear_pids(tr
);
8840 ftrace_destroy_function_files(tr
);
8841 tracefs_remove(tr
->dir
);
8842 free_trace_buffers(tr
);
8844 for (i
= 0; i
< tr
->nr_topts
; i
++) {
8845 kfree(tr
->topts
[i
].topts
);
8849 free_cpumask_var(tr
->tracing_cpumask
);
8856 int trace_array_destroy(struct trace_array
*this_tr
)
8858 struct trace_array
*tr
;
8864 mutex_lock(&event_mutex
);
8865 mutex_lock(&trace_types_lock
);
8869 /* Making sure trace array exists before destroying it. */
8870 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
8871 if (tr
== this_tr
) {
8872 ret
= __remove_instance(tr
);
8877 mutex_unlock(&trace_types_lock
);
8878 mutex_unlock(&event_mutex
);
8882 EXPORT_SYMBOL_GPL(trace_array_destroy
);
8884 static int instance_rmdir(const char *name
)
8886 struct trace_array
*tr
;
8889 mutex_lock(&event_mutex
);
8890 mutex_lock(&trace_types_lock
);
8893 tr
= trace_array_find(name
);
8895 ret
= __remove_instance(tr
);
8897 mutex_unlock(&trace_types_lock
);
8898 mutex_unlock(&event_mutex
);
8903 static __init
void create_trace_instances(struct dentry
*d_tracer
)
8905 struct trace_array
*tr
;
8907 trace_instance_dir
= tracefs_create_instance_dir("instances", d_tracer
,
8910 if (MEM_FAIL(!trace_instance_dir
, "Failed to create instances directory\n"))
8913 mutex_lock(&event_mutex
);
8914 mutex_lock(&trace_types_lock
);
8916 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
8919 if (MEM_FAIL(trace_array_create_dir(tr
) < 0,
8920 "Failed to create instance directory\n"))
8924 mutex_unlock(&trace_types_lock
);
8925 mutex_unlock(&event_mutex
);
8929 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
8931 struct trace_event_file
*file
;
8934 trace_create_file("available_tracers", 0444, d_tracer
,
8935 tr
, &show_traces_fops
);
8937 trace_create_file("current_tracer", 0644, d_tracer
,
8938 tr
, &set_tracer_fops
);
8940 trace_create_file("tracing_cpumask", 0644, d_tracer
,
8941 tr
, &tracing_cpumask_fops
);
8943 trace_create_file("trace_options", 0644, d_tracer
,
8944 tr
, &tracing_iter_fops
);
8946 trace_create_file("trace", 0644, d_tracer
,
8949 trace_create_file("trace_pipe", 0444, d_tracer
,
8950 tr
, &tracing_pipe_fops
);
8952 trace_create_file("buffer_size_kb", 0644, d_tracer
,
8953 tr
, &tracing_entries_fops
);
8955 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
8956 tr
, &tracing_total_entries_fops
);
8958 trace_create_file("free_buffer", 0200, d_tracer
,
8959 tr
, &tracing_free_buffer_fops
);
8961 trace_create_file("trace_marker", 0220, d_tracer
,
8962 tr
, &tracing_mark_fops
);
8964 file
= __find_event_file(tr
, "ftrace", "print");
8965 if (file
&& file
->dir
)
8966 trace_create_file("trigger", 0644, file
->dir
, file
,
8967 &event_trigger_fops
);
8968 tr
->trace_marker_file
= file
;
8970 trace_create_file("trace_marker_raw", 0220, d_tracer
,
8971 tr
, &tracing_mark_raw_fops
);
8973 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
8976 trace_create_file("tracing_on", 0644, d_tracer
,
8977 tr
, &rb_simple_fops
);
8979 trace_create_file("timestamp_mode", 0444, d_tracer
, tr
,
8980 &trace_time_stamp_mode_fops
);
8982 tr
->buffer_percent
= 50;
8984 trace_create_file("buffer_percent", 0444, d_tracer
,
8985 tr
, &buffer_percent_fops
);
8987 create_trace_options_dir(tr
);
8989 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8990 trace_create_maxlat_file(tr
, d_tracer
);
8993 if (ftrace_create_function_files(tr
, d_tracer
))
8994 MEM_FAIL(1, "Could not allocate function filter files");
8996 #ifdef CONFIG_TRACER_SNAPSHOT
8997 trace_create_file("snapshot", 0644, d_tracer
,
8998 tr
, &snapshot_fops
);
9001 trace_create_file("error_log", 0644, d_tracer
,
9002 tr
, &tracing_err_log_fops
);
9004 for_each_tracing_cpu(cpu
)
9005 tracing_init_tracefs_percpu(tr
, cpu
);
9007 ftrace_init_tracefs(tr
, d_tracer
);
9010 static struct vfsmount
*trace_automount(struct dentry
*mntpt
, void *ingore
)
9012 struct vfsmount
*mnt
;
9013 struct file_system_type
*type
;
9016 * To maintain backward compatibility for tools that mount
9017 * debugfs to get to the tracing facility, tracefs is automatically
9018 * mounted to the debugfs/tracing directory.
9020 type
= get_fs_type("tracefs");
9023 mnt
= vfs_submount(mntpt
, type
, "tracefs", NULL
);
9024 put_filesystem(type
);
9033 * tracing_init_dentry - initialize top level trace array
9035 * This is called when creating files or directories in the tracing
9036 * directory. It is called via fs_initcall() by any of the boot up code
9037 * and expects to return the dentry of the top level tracing directory.
9039 int tracing_init_dentry(void)
9041 struct trace_array
*tr
= &global_trace
;
9043 if (security_locked_down(LOCKDOWN_TRACEFS
)) {
9044 pr_warn("Tracing disabled due to lockdown\n");
9048 /* The top level trace array uses NULL as parent */
9052 if (WARN_ON(!tracefs_initialized()))
9056 * As there may still be users that expect the tracing
9057 * files to exist in debugfs/tracing, we must automount
9058 * the tracefs file system there, so older tools still
9059 * work with the newer kerenl.
9061 tr
->dir
= debugfs_create_automount("tracing", NULL
,
9062 trace_automount
, NULL
);
9067 extern struct trace_eval_map
*__start_ftrace_eval_maps
[];
9068 extern struct trace_eval_map
*__stop_ftrace_eval_maps
[];
9070 static struct workqueue_struct
*eval_map_wq __initdata
;
9071 static struct work_struct eval_map_work __initdata
;
9073 static void __init
eval_map_work_func(struct work_struct
*work
)
9077 len
= __stop_ftrace_eval_maps
- __start_ftrace_eval_maps
;
9078 trace_insert_eval_map(NULL
, __start_ftrace_eval_maps
, len
);
9081 static int __init
trace_eval_init(void)
9083 INIT_WORK(&eval_map_work
, eval_map_work_func
);
9085 eval_map_wq
= alloc_workqueue("eval_map_wq", WQ_UNBOUND
, 0);
9087 pr_err("Unable to allocate eval_map_wq\n");
9089 eval_map_work_func(&eval_map_work
);
9093 queue_work(eval_map_wq
, &eval_map_work
);
9097 static int __init
trace_eval_sync(void)
9099 /* Make sure the eval map updates are finished */
9101 destroy_workqueue(eval_map_wq
);
9105 late_initcall_sync(trace_eval_sync
);
9108 #ifdef CONFIG_MODULES
9109 static void trace_module_add_evals(struct module
*mod
)
9111 if (!mod
->num_trace_evals
)
9115 * Modules with bad taint do not have events created, do
9116 * not bother with enums either.
9118 if (trace_module_has_bad_taint(mod
))
9121 trace_insert_eval_map(mod
, mod
->trace_evals
, mod
->num_trace_evals
);
9124 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9125 static void trace_module_remove_evals(struct module
*mod
)
9127 union trace_eval_map_item
*map
;
9128 union trace_eval_map_item
**last
= &trace_eval_maps
;
9130 if (!mod
->num_trace_evals
)
9133 mutex_lock(&trace_eval_mutex
);
9135 map
= trace_eval_maps
;
9138 if (map
->head
.mod
== mod
)
9140 map
= trace_eval_jmp_to_tail(map
);
9141 last
= &map
->tail
.next
;
9142 map
= map
->tail
.next
;
9147 *last
= trace_eval_jmp_to_tail(map
)->tail
.next
;
9150 mutex_unlock(&trace_eval_mutex
);
9153 static inline void trace_module_remove_evals(struct module
*mod
) { }
9154 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9156 static int trace_module_notify(struct notifier_block
*self
,
9157 unsigned long val
, void *data
)
9159 struct module
*mod
= data
;
9162 case MODULE_STATE_COMING
:
9163 trace_module_add_evals(mod
);
9165 case MODULE_STATE_GOING
:
9166 trace_module_remove_evals(mod
);
9173 static struct notifier_block trace_module_nb
= {
9174 .notifier_call
= trace_module_notify
,
9177 #endif /* CONFIG_MODULES */
9179 static __init
int tracer_init_tracefs(void)
9183 trace_access_lock_init();
9185 ret
= tracing_init_dentry();
9191 init_tracer_tracefs(&global_trace
, NULL
);
9192 ftrace_init_tracefs_toplevel(&global_trace
, NULL
);
9194 trace_create_file("tracing_thresh", 0644, NULL
,
9195 &global_trace
, &tracing_thresh_fops
);
9197 trace_create_file("README", 0444, NULL
,
9198 NULL
, &tracing_readme_fops
);
9200 trace_create_file("saved_cmdlines", 0444, NULL
,
9201 NULL
, &tracing_saved_cmdlines_fops
);
9203 trace_create_file("saved_cmdlines_size", 0644, NULL
,
9204 NULL
, &tracing_saved_cmdlines_size_fops
);
9206 trace_create_file("saved_tgids", 0444, NULL
,
9207 NULL
, &tracing_saved_tgids_fops
);
9211 trace_create_eval_file(NULL
);
9213 #ifdef CONFIG_MODULES
9214 register_module_notifier(&trace_module_nb
);
9217 #ifdef CONFIG_DYNAMIC_FTRACE
9218 trace_create_file("dyn_ftrace_total_info", 0444, NULL
,
9219 NULL
, &tracing_dyn_info_fops
);
9222 create_trace_instances(NULL
);
9224 update_tracer_options(&global_trace
);
9229 static int trace_panic_handler(struct notifier_block
*this,
9230 unsigned long event
, void *unused
)
9232 if (ftrace_dump_on_oops
)
9233 ftrace_dump(ftrace_dump_on_oops
);
9237 static struct notifier_block trace_panic_notifier
= {
9238 .notifier_call
= trace_panic_handler
,
9240 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
9243 static int trace_die_handler(struct notifier_block
*self
,
9249 if (ftrace_dump_on_oops
)
9250 ftrace_dump(ftrace_dump_on_oops
);
9258 static struct notifier_block trace_die_notifier
= {
9259 .notifier_call
= trace_die_handler
,
9264 * printk is set to max of 1024, we really don't need it that big.
9265 * Nothing should be printing 1000 characters anyway.
9267 #define TRACE_MAX_PRINT 1000
9270 * Define here KERN_TRACE so that we have one place to modify
9271 * it if we decide to change what log level the ftrace dump
9274 #define KERN_TRACE KERN_EMERG
9277 trace_printk_seq(struct trace_seq
*s
)
9279 /* Probably should print a warning here. */
9280 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
9281 s
->seq
.len
= TRACE_MAX_PRINT
;
9284 * More paranoid code. Although the buffer size is set to
9285 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9286 * an extra layer of protection.
9288 if (WARN_ON_ONCE(s
->seq
.len
>= s
->seq
.size
))
9289 s
->seq
.len
= s
->seq
.size
- 1;
9291 /* should be zero ended, but we are paranoid. */
9292 s
->buffer
[s
->seq
.len
] = 0;
9294 printk(KERN_TRACE
"%s", s
->buffer
);
9299 void trace_init_global_iter(struct trace_iterator
*iter
)
9301 iter
->tr
= &global_trace
;
9302 iter
->trace
= iter
->tr
->current_trace
;
9303 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
9304 iter
->array_buffer
= &global_trace
.array_buffer
;
9306 if (iter
->trace
&& iter
->trace
->open
)
9307 iter
->trace
->open(iter
);
9309 /* Annotate start of buffers if we had overruns */
9310 if (ring_buffer_overruns(iter
->array_buffer
->buffer
))
9311 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
9313 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9314 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
9315 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
9318 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
9320 /* use static because iter can be a bit big for the stack */
9321 static struct trace_iterator iter
;
9322 static atomic_t dump_running
;
9323 struct trace_array
*tr
= &global_trace
;
9324 unsigned int old_userobj
;
9325 unsigned long flags
;
9328 /* Only allow one dump user at a time. */
9329 if (atomic_inc_return(&dump_running
) != 1) {
9330 atomic_dec(&dump_running
);
9335 * Always turn off tracing when we dump.
9336 * We don't need to show trace output of what happens
9337 * between multiple crashes.
9339 * If the user does a sysrq-z, then they can re-enable
9340 * tracing with echo 1 > tracing_on.
9344 local_irq_save(flags
);
9345 printk_nmi_direct_enter();
9347 /* Simulate the iterator */
9348 trace_init_global_iter(&iter
);
9349 /* Can not use kmalloc for iter.temp */
9350 iter
.temp
= static_temp_buf
;
9351 iter
.temp_size
= STATIC_TEMP_BUF_SIZE
;
9353 for_each_tracing_cpu(cpu
) {
9354 atomic_inc(&per_cpu_ptr(iter
.array_buffer
->data
, cpu
)->disabled
);
9357 old_userobj
= tr
->trace_flags
& TRACE_ITER_SYM_USEROBJ
;
9359 /* don't look at user memory in panic mode */
9360 tr
->trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
9362 switch (oops_dump_mode
) {
9364 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
9367 iter
.cpu_file
= raw_smp_processor_id();
9372 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
9373 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
9376 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
9378 /* Did function tracer already get disabled? */
9379 if (ftrace_is_dead()) {
9380 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9381 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9385 * We need to stop all tracing on all CPUS to read
9386 * the next buffer. This is a bit expensive, but is
9387 * not done often. We fill all what we can read,
9388 * and then release the locks again.
9391 while (!trace_empty(&iter
)) {
9394 printk(KERN_TRACE
"---------------------------------\n");
9398 trace_iterator_reset(&iter
);
9399 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
9401 if (trace_find_next_entry_inc(&iter
) != NULL
) {
9404 ret
= print_trace_line(&iter
);
9405 if (ret
!= TRACE_TYPE_NO_CONSUME
)
9406 trace_consume(&iter
);
9408 touch_nmi_watchdog();
9410 trace_printk_seq(&iter
.seq
);
9414 printk(KERN_TRACE
" (ftrace buffer empty)\n");
9416 printk(KERN_TRACE
"---------------------------------\n");
9419 tr
->trace_flags
|= old_userobj
;
9421 for_each_tracing_cpu(cpu
) {
9422 atomic_dec(&per_cpu_ptr(iter
.array_buffer
->data
, cpu
)->disabled
);
9424 atomic_dec(&dump_running
);
9425 printk_nmi_direct_exit();
9426 local_irq_restore(flags
);
9428 EXPORT_SYMBOL_GPL(ftrace_dump
);
9430 int trace_run_command(const char *buf
, int (*createfn
)(int, char **))
9437 argv
= argv_split(GFP_KERNEL
, buf
, &argc
);
9442 ret
= createfn(argc
, argv
);
9449 #define WRITE_BUFSIZE 4096
9451 ssize_t
trace_parse_run_command(struct file
*file
, const char __user
*buffer
,
9452 size_t count
, loff_t
*ppos
,
9453 int (*createfn
)(int, char **))
9455 char *kbuf
, *buf
, *tmp
;
9460 kbuf
= kmalloc(WRITE_BUFSIZE
, GFP_KERNEL
);
9464 while (done
< count
) {
9465 size
= count
- done
;
9467 if (size
>= WRITE_BUFSIZE
)
9468 size
= WRITE_BUFSIZE
- 1;
9470 if (copy_from_user(kbuf
, buffer
+ done
, size
)) {
9477 tmp
= strchr(buf
, '\n');
9480 size
= tmp
- buf
+ 1;
9483 if (done
+ size
< count
) {
9486 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9487 pr_warn("Line length is too long: Should be less than %d\n",
9495 /* Remove comments */
9496 tmp
= strchr(buf
, '#');
9501 ret
= trace_run_command(buf
, createfn
);
9506 } while (done
< count
);
9516 __init
static int tracer_alloc_buffers(void)
9522 if (security_locked_down(LOCKDOWN_TRACEFS
)) {
9523 pr_warn("Tracing disabled due to lockdown\n");
9528 * Make sure we don't accidentally add more trace options
9529 * than we have bits for.
9531 BUILD_BUG_ON(TRACE_ITER_LAST_BIT
> TRACE_FLAGS_MAX_SIZE
);
9533 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
9536 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
9537 goto out_free_buffer_mask
;
9539 /* Only allocate trace_printk buffers if a trace_printk exists */
9540 if (&__stop___trace_bprintk_fmt
!= &__start___trace_bprintk_fmt
)
9541 /* Must be called before global_trace.buffer is allocated */
9542 trace_printk_init_buffers();
9544 /* To save memory, keep the ring buffer size to its minimum */
9545 if (ring_buffer_expanded
)
9546 ring_buf_size
= trace_buf_size
;
9550 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
9551 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
9553 raw_spin_lock_init(&global_trace
.start_lock
);
9556 * The prepare callbacks allocates some memory for the ring buffer. We
9557 * don't free the buffer if the CPU goes down. If we were to free
9558 * the buffer, then the user would lose any trace that was in the
9559 * buffer. The memory will be removed once the "instance" is removed.
9561 ret
= cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE
,
9562 "trace/RB:preapre", trace_rb_cpu_prepare
,
9565 goto out_free_cpumask
;
9566 /* Used for event triggers */
9568 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
9570 goto out_rm_hp_state
;
9572 if (trace_create_savedcmd() < 0)
9573 goto out_free_temp_buffer
;
9575 /* TODO: make the number of buffers hot pluggable with CPUS */
9576 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
9577 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9578 goto out_free_savedcmd
;
9581 if (global_trace
.buffer_disabled
)
9584 if (trace_boot_clock
) {
9585 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
9587 pr_warn("Trace clock %s not defined, going back to default\n",
9592 * register_tracer() might reference current_trace, so it
9593 * needs to be set before we register anything. This is
9594 * just a bootstrap of current_trace anyway.
9596 global_trace
.current_trace
= &nop_trace
;
9598 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
9600 ftrace_init_global_array_ops(&global_trace
);
9602 init_trace_flags_index(&global_trace
);
9604 register_tracer(&nop_trace
);
9606 /* Function tracing may start here (via kernel command line) */
9607 init_function_trace();
9609 /* All seems OK, enable tracing */
9610 tracing_disabled
= 0;
9612 atomic_notifier_chain_register(&panic_notifier_list
,
9613 &trace_panic_notifier
);
9615 register_die_notifier(&trace_die_notifier
);
9617 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
9619 INIT_LIST_HEAD(&global_trace
.systems
);
9620 INIT_LIST_HEAD(&global_trace
.events
);
9621 INIT_LIST_HEAD(&global_trace
.hist_vars
);
9622 INIT_LIST_HEAD(&global_trace
.err_log
);
9623 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
9625 apply_trace_boot_options();
9627 register_snapshot_cmd();
9632 free_saved_cmdlines_buffer(savedcmd
);
9633 out_free_temp_buffer
:
9634 ring_buffer_free(temp_buffer
);
9636 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE
);
9638 free_cpumask_var(global_trace
.tracing_cpumask
);
9639 out_free_buffer_mask
:
9640 free_cpumask_var(tracing_buffer_mask
);
9645 void __init
early_trace_init(void)
9647 if (tracepoint_printk
) {
9648 tracepoint_print_iter
=
9649 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
9650 if (MEM_FAIL(!tracepoint_print_iter
,
9651 "Failed to allocate trace iterator\n"))
9652 tracepoint_printk
= 0;
9654 static_key_enable(&tracepoint_printk_key
.key
);
9656 tracer_alloc_buffers();
9659 void __init
trace_init(void)
9664 __init
static int clear_boot_tracer(void)
9667 * The default tracer at boot buffer is an init section.
9668 * This function is called in lateinit. If we did not
9669 * find the boot tracer, then clear it out, to prevent
9670 * later registration from accessing the buffer that is
9671 * about to be freed.
9673 if (!default_bootup_tracer
)
9676 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
9677 default_bootup_tracer
);
9678 default_bootup_tracer
= NULL
;
9683 fs_initcall(tracer_init_tracefs
);
9684 late_initcall_sync(clear_boot_tracer
);
9686 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9687 __init
static int tracing_set_default_clock(void)
9689 /* sched_clock_stable() is determined in late_initcall */
9690 if (!trace_boot_clock
&& !sched_clock_stable()) {
9691 if (security_locked_down(LOCKDOWN_TRACEFS
)) {
9692 pr_warn("Can not set tracing clock due to lockdown\n");
9697 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9698 "If you want to keep using the local clock, then add:\n"
9699 " \"trace_clock=local\"\n"
9700 "on the kernel command line\n");
9701 tracing_set_clock(&global_trace
, "global");
9706 late_initcall_sync(tracing_set_default_clock
);