]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
tracing: Make hist trigger Documentation better reflect actions/handlers
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
bc0c38d1
SR
2/*
3 * ring buffer based function tracer
4 *
2b6080f2 5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 13 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 14 */
2cadf913 15#include <linux/ring_buffer.h>
273b281f 16#include <generated/utsrelease.h>
2cadf913
SR
17#include <linux/stacktrace.h>
18#include <linux/writeback.h>
bc0c38d1
SR
19#include <linux/kallsyms.h>
20#include <linux/seq_file.h>
3f5a54e3 21#include <linux/notifier.h>
2cadf913 22#include <linux/irqflags.h>
bc0c38d1 23#include <linux/debugfs.h>
8434dc93 24#include <linux/tracefs.h>
4c11d7ae 25#include <linux/pagemap.h>
bc0c38d1
SR
26#include <linux/hardirq.h>
27#include <linux/linkage.h>
28#include <linux/uaccess.h>
76c813e2 29#include <linux/vmalloc.h>
bc0c38d1
SR
30#include <linux/ftrace.h>
31#include <linux/module.h>
32#include <linux/percpu.h>
2cadf913 33#include <linux/splice.h>
3f5a54e3 34#include <linux/kdebug.h>
5f0c6c03 35#include <linux/string.h>
f76180bc 36#include <linux/mount.h>
7e53bd42 37#include <linux/rwsem.h>
5a0e3ad6 38#include <linux/slab.h>
bc0c38d1
SR
39#include <linux/ctype.h>
40#include <linux/init.h>
2a2cc8f7 41#include <linux/poll.h>
b892e5c8 42#include <linux/nmi.h>
bc0c38d1 43#include <linux/fs.h>
478409dd 44#include <linux/trace.h>
3fd49c9e 45#include <linux/sched/clock.h>
8bd75c77 46#include <linux/sched/rt.h>
86387f7e 47
bc0c38d1 48#include "trace.h"
f0868d1e 49#include "trace_output.h"
bc0c38d1 50
73c5162a
SR
51/*
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
55034cd6 55bool ring_buffer_expanded;
73c5162a 56
8e1b82e0
FW
57/*
58 * We need to change this state when a selftest is running.
ff32504f
FW
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
5e1607a0 61 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
62 * at the same time, giving false positive or negative results.
63 */
8e1b82e0 64static bool __read_mostly tracing_selftest_running;
ff32504f 65
b2821ae6
SR
66/*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
020e5f85 69bool __read_mostly tracing_selftest_disabled;
b2821ae6 70
0daa2302
SRRH
71/* Pipe tracepoints to printk */
72struct trace_iterator *tracepoint_print_iter;
73int tracepoint_printk;
42391745 74static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 75
adf9f195
FW
76/* For tracers that don't implement custom flags */
77static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79};
80
8c1a49ae
SRRH
81static int
82dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
83{
84 return 0;
85}
0f048701 86
7ffbd48d
SR
87/*
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
d914ba37 92static DEFINE_PER_CPU(bool, trace_taskinfo_save);
7ffbd48d 93
0f048701
SR
94/*
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
4fd27358 100static int tracing_disabled = 1;
0f048701 101
955b61e5 102cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 103
944ac425
SR
104/*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 118 */
cecbca96
FW
119
120enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 121
de7edd31
SRRH
122/* When set, tracing will stop when a WARN*() is hit */
123int __disable_trace_on_warning;
124
681bec03
JL
125#ifdef CONFIG_TRACE_EVAL_MAP_FILE
126/* Map of enums to their values, for "eval_map" file */
23bf8cb8 127struct trace_eval_map_head {
9828413d
SRRH
128 struct module *mod;
129 unsigned long length;
130};
131
23bf8cb8 132union trace_eval_map_item;
9828413d 133
23bf8cb8 134struct trace_eval_map_tail {
9828413d
SRRH
135 /*
136 * "end" is first and points to NULL as it must be different
00f4b652 137 * than "mod" or "eval_string"
9828413d 138 */
23bf8cb8 139 union trace_eval_map_item *next;
9828413d
SRRH
140 const char *end; /* points to NULL */
141};
142
1793ed93 143static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
144
145/*
23bf8cb8 146 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
681bec03 150 * pointer to the next array of saved eval_map items.
9828413d 151 */
23bf8cb8 152union trace_eval_map_item {
00f4b652 153 struct trace_eval_map map;
23bf8cb8
JL
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
9828413d
SRRH
156};
157
23bf8cb8 158static union trace_eval_map_item *trace_eval_maps;
681bec03 159#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 160
607e2ea1 161static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 162
ee6c2c1b
LZ
163#define MAX_TRACER_SIZE 100
164static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 165static char *default_bootup_tracer;
d9e54076 166
55034cd6
SRRH
167static bool allocate_snapshot;
168
1beee96b 169static int __init set_cmdline_ftrace(char *str)
d9e54076 170{
67012ab1 171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 172 default_bootup_tracer = bootup_tracer_buf;
73c5162a 173 /* We are using ftrace early, expand it */
55034cd6 174 ring_buffer_expanded = true;
d9e54076
PZ
175 return 1;
176}
1beee96b 177__setup("ftrace=", set_cmdline_ftrace);
d9e54076 178
944ac425
SR
179static int __init set_ftrace_dump_on_oops(char *str)
180{
cecbca96
FW
181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
183 return 1;
184 }
185
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
188 return 1;
189 }
190
191 return 0;
944ac425
SR
192}
193__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 194
de7edd31
SRRH
195static int __init stop_trace_on_warning(char *str)
196{
933ff9f2
LCG
197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
de7edd31
SRRH
199 return 1;
200}
933ff9f2 201__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 202
3209cff4 203static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
204{
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
208 return 1;
209}
3209cff4 210__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 211
7bcfaf54
SR
212
213static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
214
215static int __init set_trace_boot_options(char *str)
216{
67012ab1 217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
218 return 0;
219}
220__setup("trace_options=", set_trace_boot_options);
221
e1e232ca
SR
222static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223static char *trace_boot_clock __initdata;
224
225static int __init set_trace_boot_clock(char *str)
226{
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
229 return 0;
230}
231__setup("trace_clock=", set_trace_boot_clock);
232
0daa2302
SRRH
233static int __init set_tracepoint_printk(char *str)
234{
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
237 return 1;
238}
239__setup("tp_printk", set_tracepoint_printk);
de7edd31 240
a5a1d1c2 241unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
242{
243 nsec += 500;
244 do_div(nsec, 1000);
245 return nsec;
246}
247
983f938a
SRRH
248/* trace_flags holds trace_options default values */
249#define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255
16270145
SRRH
256/* trace_options that are only supported by global_trace */
257#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259
20550622
SRRH
260/* trace_flags that are default zero for instances */
261#define ZEROED_TRACE_FLAGS \
1e10486f 262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 263
4fcdae83 264/*
67d04bb2
JF
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
4fcdae83 267 */
983f938a
SRRH
268static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
270};
bc0c38d1 271
ae63b31e 272LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 273
ff451961
SRRH
274int trace_array_get(struct trace_array *this_tr)
275{
276 struct trace_array *tr;
277 int ret = -ENODEV;
278
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281 if (tr == this_tr) {
282 tr->ref++;
283 ret = 0;
284 break;
285 }
286 }
287 mutex_unlock(&trace_types_lock);
288
289 return ret;
290}
291
292static void __trace_array_put(struct trace_array *this_tr)
293{
294 WARN_ON(!this_tr->ref);
295 this_tr->ref--;
296}
297
298void trace_array_put(struct trace_array *this_tr)
299{
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
303}
304
2425bcb9 305int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
308{
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
0fc1b09f 311 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
312 return 1;
313 }
314
315 return 0;
eb02ce01
TZ
316}
317
76c813e2
SRRH
318void trace_free_pid_list(struct trace_pid_list *pid_list)
319{
320 vfree(pid_list->pids);
321 kfree(pid_list);
322}
323
d8275c45
SR
324/**
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
328 *
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 */
331bool
332trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333{
334 /*
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
337 */
338 if (search_pid >= filtered_pids->pid_max)
339 return false;
340
341 return test_bit(search_pid, filtered_pids->pids);
342}
343
344/**
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
348 *
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
352 */
353bool
354trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355{
356 /*
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
359 */
360 if (!filtered_pids)
361 return false;
362
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
364}
365
366/**
5a93bae2 367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
d8275c45
SR
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
371 *
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
376 * of a task.
377 */
378void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
381{
382 if (!pid_list)
383 return;
384
385 /* For forks, we only add if the forking task is listed */
386 if (self) {
387 if (!trace_find_filtered_pid(pid_list, self->pid))
388 return;
389 }
390
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
393 return;
394
395 /* "self" is set for forks, and NULL for exits */
396 if (self)
397 set_bit(task->pid, pid_list->pids);
398 else
399 clear_bit(task->pid, pid_list->pids);
400}
401
5cc8976b
SRRH
402/**
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
407 *
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
410 *
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
413 */
414void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415{
416 unsigned long pid = (unsigned long)v;
417
418 (*pos)++;
419
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
426
427 return NULL;
428}
429
430/**
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
434 *
435 * This is used by seq_file "start" operation to start the iteration
436 * of listing pids.
437 *
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
440 */
441void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
442{
443 unsigned long pid;
444 loff_t l = 0;
445
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
448 return NULL;
449
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
453 ;
454 return (void *)pid;
455}
456
457/**
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
461 *
462 * Can be directly used by seq_file operations to display the current
463 * pid value.
464 */
465int trace_pid_show(struct seq_file *m, void *v)
466{
467 unsigned long pid = (unsigned long)v - 1;
468
469 seq_printf(m, "%lu\n", pid);
470 return 0;
471}
472
76c813e2
SRRH
473/* 128 should be much more than enough */
474#define PID_BUF_SIZE 127
475
476int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
479{
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
482 unsigned long val;
483 int nr_pids = 0;
484 ssize_t read = 0;
485 ssize_t ret = 0;
486 loff_t pos;
487 pid_t pid;
488
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490 return -ENOMEM;
491
492 /*
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
496 * not modified.
497 */
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list)
500 return -ENOMEM;
501
502 pid_list->pid_max = READ_ONCE(pid_max);
503
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506 pid_list->pid_max = filtered_pids->pid_max;
507
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) {
510 kfree(pid_list);
511 return -ENOMEM;
512 }
513
514 if (filtered_pids) {
515 /* copy the current bits to the new max */
67f20b08
WY
516 for_each_set_bit(pid, filtered_pids->pids,
517 filtered_pids->pid_max) {
76c813e2 518 set_bit(pid, pid_list->pids);
76c813e2
SRRH
519 nr_pids++;
520 }
521 }
522
523 while (cnt > 0) {
524
525 pos = 0;
526
527 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528 if (ret < 0 || !trace_parser_loaded(&parser))
529 break;
530
531 read += ret;
532 ubuf += ret;
533 cnt -= ret;
534
76c813e2
SRRH
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
a5a1d1c2 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
9457158b 573 if (!buf->buffer)
37886f6a
SR
574 return trace_clock_local();
575
9457158b
AL
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
578
579 return ts;
580}
bc0c38d1 581
a5a1d1c2 582u64 ftrace_now(int cpu)
9457158b
AL
583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
10246fa3
SRRH
587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
9036990d
SR
596int tracing_is_enabled(void)
597{
10246fa3
SRRH
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
9036990d
SR
605}
606
4fcdae83 607/*
3928a8a2
SR
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
3f5a54e3
SR
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
4fcdae83 616 */
3928a8a2 617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 618
3928a8a2 619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 620
4fcdae83 621/* trace_types holds a link list of available tracers. */
bc0c38d1 622static struct tracer *trace_types __read_mostly;
4fcdae83 623
4fcdae83
SR
624/*
625 * trace_types_lock is used to protect the trace_types list.
4fcdae83 626 */
a8227415 627DEFINE_MUTEX(trace_types_lock);
4fcdae83 628
7e53bd42
LJ
629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
ae3b5093 657 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
ae3b5093 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
ae3b5093 673 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
d78a4614
SRRH
711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
73dddbb5
SRRH
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
ca475e83 719
d78a4614
SRRH
720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
2d34f489
SRRH
726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
73dddbb5
SRRH
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
730{
731}
732
d78a4614
SRRH
733#endif
734
3e9a8aad
SRRH
735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
2290f2c5 760void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
499e5470
SR
777/**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
10246fa3 785 tracer_tracing_on(&global_trace);
499e5470
SR
786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
52ffabe3
SRRH
789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
d914ba37 793 __this_cpu_write(trace_taskinfo_save, true);
52ffabe3
SRRH
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
09ae7234
SRRH
805/**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
8abfb872
J
818 int pc;
819
983f938a 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
821 return 0;
822
8abfb872 823 pc = preempt_count();
09ae7234 824
3132e107
SRRH
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
09ae7234
SRRH
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
09ae7234
SRRH
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
2d34f489 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
8abfb872
J
868 int pc;
869
983f938a 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
871 return 0;
872
8abfb872 873 pc = preempt_count();
09ae7234 874
3132e107
SRRH
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
09ae7234
SRRH
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
09ae7234
SRRH
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
2d34f489 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
ad909e21 896#ifdef CONFIG_TRACER_SNAPSHOT
2824f503 897void tracing_snapshot_instance(struct trace_array *tr)
ad909e21 898{
ad909e21
SRRH
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
1b22e382
SRRH
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
ad909e21 908 if (!tr->allocated_snapshot) {
ca268da6
SRRH
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
ca268da6
SRRH
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
cab50379
SRV
926
927/**
5a93bae2 928 * tracing_snapshot - take a snapshot of the current buffer.
cab50379
SRV
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
1b22e382 947EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
2824f503 953int tracing_alloc_snapshot_instance(struct trace_array *tr)
3209cff4
SRRH
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
ad1438a0 971static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
ad909e21 983
93e31ffb
TZ
984/**
985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
2824f503 999 ret = tracing_alloc_snapshot_instance(tr);
93e31ffb
TZ
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
ad909e21 1006/**
5a93bae2 1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
ad909e21 1008 *
5a93bae2 1009 * This is similar to tracing_snapshot(), but it will allocate the
ad909e21
SRRH
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
ad909e21
SRRH
1019 int ret;
1020
93e31ffb
TZ
1021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
3209cff4 1023 return;
ad909e21
SRRH
1024
1025 tracing_snapshot();
1026}
1b22e382 1027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
1b22e382 1033EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
1b22e382 1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
2290f2c5 1048void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
499e5470
SR
1065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
10246fa3 1075 tracer_tracing_off(&global_trace);
499e5470
SR
1076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
de7edd31
SRRH
1079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
10246fa3
SRRH
1085/**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
ec573508 1091bool tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
499e5470
SR
1098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
10246fa3 1103 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
3928a8a2 1107static int __init set_buf_size(char *str)
bc0c38d1 1108{
3928a8a2 1109 unsigned long buf_size;
c6caeeb1 1110
bc0c38d1
SR
1111 if (!str)
1112 return 0;
9d612bef 1113 buf_size = memparse(str, &str);
c6caeeb1 1114 /* nr_entries can not be zero */
9d612bef 1115 if (buf_size == 0)
c6caeeb1 1116 return 0;
3928a8a2 1117 trace_buf_size = buf_size;
bc0c38d1
SR
1118 return 1;
1119}
3928a8a2 1120__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1121
0e950173
TB
1122static int __init set_tracing_thresh(char *str)
1123{
87abb3b1 1124 unsigned long threshold;
0e950173
TB
1125 int ret;
1126
1127 if (!str)
1128 return 0;
bcd83ea6 1129 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1130 if (ret < 0)
1131 return 0;
87abb3b1 1132 tracing_thresh = threshold * 1000;
0e950173
TB
1133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
57f50be1
SR
1137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
a3418a36
SRRH
1142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1146 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1147 */
1148#undef C
1149#define C(a, b) b
1150
4fcdae83 1151/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1152static const char *trace_options[] = {
a3418a36 1153 TRACE_FLAGS
bc0c38d1
SR
1154 NULL
1155};
1156
5079f326
Z
1157static struct {
1158 u64 (*func)(void);
1159 const char *name;
8be0709f 1160 int in_ns; /* is this clock in nanoseconds? */
5079f326 1161} trace_clocks[] = {
1b3e5c09
TG
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
e7fda6c4 1165 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
a3ed0e43 1169 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1170 ARCH_TRACE_CLOCKS
5079f326
Z
1171};
1172
860f9f6b
TZ
1173bool trace_clock_in_ns(struct trace_array *tr)
1174{
1175 if (trace_clocks[tr->clock_id].in_ns)
1176 return true;
1177
1178 return false;
1179}
1180
b63f39ea 1181/*
1182 * trace_parser_get_init - gets the buffer for trace parser
1183 */
1184int trace_parser_get_init(struct trace_parser *parser, int size)
1185{
1186 memset(parser, 0, sizeof(*parser));
1187
1188 parser->buffer = kmalloc(size, GFP_KERNEL);
1189 if (!parser->buffer)
1190 return 1;
1191
1192 parser->size = size;
1193 return 0;
1194}
1195
1196/*
1197 * trace_parser_put - frees the buffer for trace parser
1198 */
1199void trace_parser_put(struct trace_parser *parser)
1200{
1201 kfree(parser->buffer);
0e684b65 1202 parser->buffer = NULL;
b63f39ea 1203}
1204
1205/*
1206 * trace_get_user - reads the user input string separated by space
1207 * (matched by isspace(ch))
1208 *
1209 * For each string found the 'struct trace_parser' is updated,
1210 * and the function returns.
1211 *
1212 * Returns number of bytes read.
1213 *
1214 * See kernel/trace/trace.h for 'struct trace_parser' details.
1215 */
1216int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1217 size_t cnt, loff_t *ppos)
1218{
1219 char ch;
1220 size_t read = 0;
1221 ssize_t ret;
1222
1223 if (!*ppos)
1224 trace_parser_clear(parser);
1225
1226 ret = get_user(ch, ubuf++);
1227 if (ret)
1228 goto out;
1229
1230 read++;
1231 cnt--;
1232
1233 /*
1234 * The parser is not finished with the last write,
1235 * continue reading the user input without skipping spaces.
1236 */
1237 if (!parser->cont) {
1238 /* skip white space */
1239 while (cnt && isspace(ch)) {
1240 ret = get_user(ch, ubuf++);
1241 if (ret)
1242 goto out;
1243 read++;
1244 cnt--;
1245 }
1246
76638d96
CD
1247 parser->idx = 0;
1248
b63f39ea 1249 /* only spaces were written */
921a7acd 1250 if (isspace(ch) || !ch) {
b63f39ea 1251 *ppos += read;
1252 ret = read;
1253 goto out;
1254 }
b63f39ea 1255 }
1256
1257 /* read the non-space input */
921a7acd 1258 while (cnt && !isspace(ch) && ch) {
3c235a33 1259 if (parser->idx < parser->size - 1)
b63f39ea 1260 parser->buffer[parser->idx++] = ch;
1261 else {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 ret = get_user(ch, ubuf++);
1266 if (ret)
1267 goto out;
1268 read++;
1269 cnt--;
1270 }
1271
1272 /* We either got finished input or we have to wait for another call. */
921a7acd 1273 if (isspace(ch) || !ch) {
b63f39ea 1274 parser->buffer[parser->idx] = 0;
1275 parser->cont = false;
057db848 1276 } else if (parser->idx < parser->size - 1) {
b63f39ea 1277 parser->cont = true;
1278 parser->buffer[parser->idx++] = ch;
f4d0706c
CD
1279 /* Make sure the parsed string always terminates with '\0'. */
1280 parser->buffer[parser->idx] = 0;
057db848
SR
1281 } else {
1282 ret = -EINVAL;
1283 goto out;
b63f39ea 1284 }
1285
1286 *ppos += read;
1287 ret = read;
1288
1289out:
1290 return ret;
1291}
1292
3a161d99 1293/* TODO add a seq_buf_to_buffer() */
b8b94265 1294static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1295{
1296 int len;
3c56819b 1297
5ac48378 1298 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1299 return -EBUSY;
1300
5ac48378 1301 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1302 if (cnt > len)
1303 cnt = len;
3a161d99 1304 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1305
3a161d99 1306 s->seq.readpos += cnt;
3c56819b
EGM
1307 return cnt;
1308}
1309
0e950173
TB
1310unsigned long __read_mostly tracing_thresh;
1311
5d4a9dba 1312#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1313/*
1314 * Copy the new maximum trace into the separate maximum-trace
1315 * structure. (this way the maximum trace is permanently saved,
5a93bae2 1316 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
5d4a9dba
SR
1317 */
1318static void
1319__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1320{
12883efb
SRRH
1321 struct trace_buffer *trace_buf = &tr->trace_buffer;
1322 struct trace_buffer *max_buf = &tr->max_buffer;
1323 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1324 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1325
12883efb
SRRH
1326 max_buf->cpu = cpu;
1327 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1328
6d9b3fa5 1329 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1330 max_data->critical_start = data->critical_start;
1331 max_data->critical_end = data->critical_end;
5d4a9dba 1332
1acaa1b2 1333 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1334 max_data->pid = tsk->pid;
f17a5194
SRRH
1335 /*
1336 * If tsk == current, then use current_uid(), as that does not use
1337 * RCU. The irq tracer can be called out of RCU scope.
1338 */
1339 if (tsk == current)
1340 max_data->uid = current_uid();
1341 else
1342 max_data->uid = task_uid(tsk);
1343
8248ac05
SR
1344 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1345 max_data->policy = tsk->policy;
1346 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1347
1348 /* record this tasks comm */
1349 tracing_record_cmdline(tsk);
1350}
1351
4fcdae83
SR
1352/**
1353 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1354 * @tr: tracer
1355 * @tsk: the task with the latency
1356 * @cpu: The cpu that initiated the trace.
1357 *
1358 * Flip the buffers between the @tr and the max_tr and record information
1359 * about which task was the cause of this latency.
1360 */
e309b41d 1361void
bc0c38d1
SR
1362update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1363{
2b6080f2 1364 if (tr->stop_count)
b8de7bd1
SR
1365 return;
1366
4c11d7ae 1367 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1368
45ad21ca 1369 if (!tr->allocated_snapshot) {
debdd57f 1370 /* Only the nop tracer should hit this when disabling */
2b6080f2 1371 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1372 return;
debdd57f 1373 }
34600f0e 1374
0b9b12c1 1375 arch_spin_lock(&tr->max_lock);
3928a8a2 1376
73c8d894
MH
1377 /* Inherit the recordable setting from trace_buffer */
1378 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1379 ring_buffer_record_on(tr->max_buffer.buffer);
1380 else
1381 ring_buffer_record_off(tr->max_buffer.buffer);
1382
08ae88f8 1383 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
3928a8a2 1384
bc0c38d1 1385 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1386 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1387}
1388
1389/**
1390 * update_max_tr_single - only copy one trace over, and reset the rest
1391 * @tr - tracer
1392 * @tsk - task with the latency
1393 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1394 *
1395 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1396 */
e309b41d 1397void
bc0c38d1
SR
1398update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1399{
3928a8a2 1400 int ret;
bc0c38d1 1401
2b6080f2 1402 if (tr->stop_count)
b8de7bd1
SR
1403 return;
1404
4c11d7ae 1405 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1406 if (!tr->allocated_snapshot) {
2930e04d 1407 /* Only the nop tracer should hit this when disabling */
9e8529af 1408 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1409 return;
2930e04d 1410 }
ef710e10 1411
0b9b12c1 1412 arch_spin_lock(&tr->max_lock);
bc0c38d1 1413
12883efb 1414 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1415
e8165dbb
SR
1416 if (ret == -EBUSY) {
1417 /*
1418 * We failed to swap the buffer due to a commit taking
1419 * place on this CPU. We fail to record, but we reset
1420 * the max trace buffer (no one writes directly to it)
1421 * and flag that it failed.
1422 */
12883efb 1423 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1424 "Failed to swap buffers due to commit in progress\n");
1425 }
1426
e8165dbb 1427 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1428
1429 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1430 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1431}
5d4a9dba 1432#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1433
2c2b0a78 1434static int wait_on_pipe(struct trace_iterator *iter, int full)
0d5c6e1c 1435{
15693458
SRRH
1436 /* Iterators are static, they should be filled or empty */
1437 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1438 return 0;
0d5c6e1c 1439
e30f53aa
RV
1440 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1441 full);
0d5c6e1c
SR
1442}
1443
f4e781c0 1444#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1445static bool selftests_can_run;
1446
1447struct trace_selftests {
1448 struct list_head list;
1449 struct tracer *type;
1450};
1451
1452static LIST_HEAD(postponed_selftests);
1453
1454static int save_selftest(struct tracer *type)
1455{
1456 struct trace_selftests *selftest;
1457
1458 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1459 if (!selftest)
1460 return -ENOMEM;
1461
1462 selftest->type = type;
1463 list_add(&selftest->list, &postponed_selftests);
1464 return 0;
1465}
1466
f4e781c0
SRRH
1467static int run_tracer_selftest(struct tracer *type)
1468{
1469 struct trace_array *tr = &global_trace;
1470 struct tracer *saved_tracer = tr->current_trace;
1471 int ret;
0d5c6e1c 1472
f4e781c0
SRRH
1473 if (!type->selftest || tracing_selftest_disabled)
1474 return 0;
0d5c6e1c 1475
9afecfbb
SRV
1476 /*
1477 * If a tracer registers early in boot up (before scheduling is
1478 * initialized and such), then do not run its selftests yet.
1479 * Instead, run it a little later in the boot process.
1480 */
1481 if (!selftests_can_run)
1482 return save_selftest(type);
1483
0d5c6e1c 1484 /*
f4e781c0
SRRH
1485 * Run a selftest on this tracer.
1486 * Here we reset the trace buffer, and set the current
1487 * tracer to be this tracer. The tracer can then run some
1488 * internal tracing to verify that everything is in order.
1489 * If we fail, we do not register this tracer.
0d5c6e1c 1490 */
f4e781c0 1491 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1492
f4e781c0
SRRH
1493 tr->current_trace = type;
1494
1495#ifdef CONFIG_TRACER_MAX_TRACE
1496 if (type->use_max_tr) {
1497 /* If we expanded the buffers, make sure the max is expanded too */
1498 if (ring_buffer_expanded)
1499 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1500 RING_BUFFER_ALL_CPUS);
1501 tr->allocated_snapshot = true;
1502 }
1503#endif
1504
1505 /* the test is responsible for initializing and enabling */
1506 pr_info("Testing tracer %s: ", type->name);
1507 ret = type->selftest(type, tr);
1508 /* the test is responsible for resetting too */
1509 tr->current_trace = saved_tracer;
1510 if (ret) {
1511 printk(KERN_CONT "FAILED!\n");
1512 /* Add the warning after printing 'FAILED' */
1513 WARN_ON(1);
1514 return -1;
1515 }
1516 /* Only reset on passing, to avoid touching corrupted buffers */
1517 tracing_reset_online_cpus(&tr->trace_buffer);
1518
1519#ifdef CONFIG_TRACER_MAX_TRACE
1520 if (type->use_max_tr) {
1521 tr->allocated_snapshot = false;
0d5c6e1c 1522
f4e781c0
SRRH
1523 /* Shrink the max buffer again */
1524 if (ring_buffer_expanded)
1525 ring_buffer_resize(tr->max_buffer.buffer, 1,
1526 RING_BUFFER_ALL_CPUS);
1527 }
1528#endif
1529
1530 printk(KERN_CONT "PASSED\n");
1531 return 0;
1532}
9afecfbb
SRV
1533
1534static __init int init_trace_selftests(void)
1535{
1536 struct trace_selftests *p, *n;
1537 struct tracer *t, **last;
1538 int ret;
1539
1540 selftests_can_run = true;
1541
1542 mutex_lock(&trace_types_lock);
1543
1544 if (list_empty(&postponed_selftests))
1545 goto out;
1546
1547 pr_info("Running postponed tracer tests:\n");
1548
1549 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1550 ret = run_tracer_selftest(p->type);
1551 /* If the test fails, then warn and remove from available_tracers */
1552 if (ret < 0) {
1553 WARN(1, "tracer: %s failed selftest, disabling\n",
1554 p->type->name);
1555 last = &trace_types;
1556 for (t = trace_types; t; t = t->next) {
1557 if (t == p->type) {
1558 *last = t->next;
1559 break;
1560 }
1561 last = &t->next;
1562 }
1563 }
1564 list_del(&p->list);
1565 kfree(p);
1566 }
1567
1568 out:
1569 mutex_unlock(&trace_types_lock);
1570
1571 return 0;
1572}
b9ef0326 1573core_initcall(init_trace_selftests);
f4e781c0
SRRH
1574#else
1575static inline int run_tracer_selftest(struct tracer *type)
1576{
1577 return 0;
0d5c6e1c 1578}
f4e781c0 1579#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1580
41d9c0be
SRRH
1581static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1582
a4d1e688
JW
1583static void __init apply_trace_boot_options(void);
1584
4fcdae83
SR
1585/**
1586 * register_tracer - register a tracer with the ftrace system.
1587 * @type - the plugin for the tracer
1588 *
1589 * Register a new plugin tracer.
1590 */
a4d1e688 1591int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1592{
1593 struct tracer *t;
bc0c38d1
SR
1594 int ret = 0;
1595
1596 if (!type->name) {
1597 pr_info("Tracer must have a name\n");
1598 return -1;
1599 }
1600
24a461d5 1601 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1602 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1603 return -1;
1604 }
1605
bc0c38d1 1606 mutex_lock(&trace_types_lock);
86fa2f60 1607
8e1b82e0
FW
1608 tracing_selftest_running = true;
1609
bc0c38d1
SR
1610 for (t = trace_types; t; t = t->next) {
1611 if (strcmp(type->name, t->name) == 0) {
1612 /* already found */
ee6c2c1b 1613 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1614 type->name);
1615 ret = -1;
1616 goto out;
1617 }
1618 }
1619
adf9f195
FW
1620 if (!type->set_flag)
1621 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1622 if (!type->flags) {
1623 /*allocate a dummy tracer_flags*/
1624 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1625 if (!type->flags) {
1626 ret = -ENOMEM;
1627 goto out;
1628 }
d39cdd20
CH
1629 type->flags->val = 0;
1630 type->flags->opts = dummy_tracer_opt;
1631 } else
adf9f195
FW
1632 if (!type->flags->opts)
1633 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1634
d39cdd20
CH
1635 /* store the tracer for __set_tracer_option */
1636 type->flags->trace = type;
1637
f4e781c0
SRRH
1638 ret = run_tracer_selftest(type);
1639 if (ret < 0)
1640 goto out;
60a11774 1641
bc0c38d1
SR
1642 type->next = trace_types;
1643 trace_types = type;
41d9c0be 1644 add_tracer_options(&global_trace, type);
60a11774 1645
bc0c38d1 1646 out:
8e1b82e0 1647 tracing_selftest_running = false;
bc0c38d1
SR
1648 mutex_unlock(&trace_types_lock);
1649
dac74940
SR
1650 if (ret || !default_bootup_tracer)
1651 goto out_unlock;
1652
ee6c2c1b 1653 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1654 goto out_unlock;
1655
1656 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1657 /* Do we want this tracer to start on bootup? */
607e2ea1 1658 tracing_set_tracer(&global_trace, type->name);
dac74940 1659 default_bootup_tracer = NULL;
a4d1e688
JW
1660
1661 apply_trace_boot_options();
1662
dac74940 1663 /* disable other selftests, since this will break it. */
55034cd6 1664 tracing_selftest_disabled = true;
b2821ae6 1665#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1666 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1667 type->name);
b2821ae6 1668#endif
b2821ae6 1669
dac74940 1670 out_unlock:
bc0c38d1
SR
1671 return ret;
1672}
1673
12883efb 1674void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1675{
12883efb 1676 struct ring_buffer *buffer = buf->buffer;
f633903a 1677
a5416411
HT
1678 if (!buffer)
1679 return;
1680
f633903a
SR
1681 ring_buffer_record_disable(buffer);
1682
1683 /* Make sure all commits have finished */
74401729 1684 synchronize_rcu();
68179686 1685 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1686
1687 ring_buffer_record_enable(buffer);
1688}
1689
12883efb 1690void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1691{
12883efb 1692 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1693 int cpu;
1694
a5416411
HT
1695 if (!buffer)
1696 return;
1697
621968cd
SR
1698 ring_buffer_record_disable(buffer);
1699
1700 /* Make sure all commits have finished */
74401729 1701 synchronize_rcu();
621968cd 1702
9457158b 1703 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1704
1705 for_each_online_cpu(cpu)
68179686 1706 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1707
1708 ring_buffer_record_enable(buffer);
213cc060
PE
1709}
1710
09d8091c 1711/* Must have trace_types_lock held */
873c642f 1712void tracing_reset_all_online_cpus(void)
9456f0fa 1713{
873c642f
SRRH
1714 struct trace_array *tr;
1715
873c642f 1716 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
065e63f9
SRV
1717 if (!tr->clear_trace)
1718 continue;
1719 tr->clear_trace = false;
12883efb
SRRH
1720 tracing_reset_online_cpus(&tr->trace_buffer);
1721#ifdef CONFIG_TRACER_MAX_TRACE
1722 tracing_reset_online_cpus(&tr->max_buffer);
1723#endif
873c642f 1724 }
9456f0fa
SR
1725}
1726
d914ba37
JF
1727static int *tgid_map;
1728
939c7a4f 1729#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1730#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1731static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1732struct saved_cmdlines_buffer {
1733 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1734 unsigned *map_cmdline_to_pid;
1735 unsigned cmdline_num;
1736 int cmdline_idx;
1737 char *saved_cmdlines;
1738};
1739static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1740
25b0b44a 1741/* temporary disable recording */
d914ba37 1742static atomic_t trace_record_taskinfo_disabled __read_mostly;
bc0c38d1 1743
939c7a4f
YY
1744static inline char *get_saved_cmdlines(int idx)
1745{
1746 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1747}
1748
1749static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1750{
939c7a4f
YY
1751 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1752}
1753
1754static int allocate_cmdlines_buffer(unsigned int val,
1755 struct saved_cmdlines_buffer *s)
1756{
6da2ec56
KC
1757 s->map_cmdline_to_pid = kmalloc_array(val,
1758 sizeof(*s->map_cmdline_to_pid),
1759 GFP_KERNEL);
939c7a4f
YY
1760 if (!s->map_cmdline_to_pid)
1761 return -ENOMEM;
1762
6da2ec56 1763 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
939c7a4f
YY
1764 if (!s->saved_cmdlines) {
1765 kfree(s->map_cmdline_to_pid);
1766 return -ENOMEM;
1767 }
1768
1769 s->cmdline_idx = 0;
1770 s->cmdline_num = val;
1771 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1772 sizeof(s->map_pid_to_cmdline));
1773 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1774 val * sizeof(*s->map_cmdline_to_pid));
1775
1776 return 0;
1777}
1778
1779static int trace_create_savedcmd(void)
1780{
1781 int ret;
1782
a6af8fbf 1783 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1784 if (!savedcmd)
1785 return -ENOMEM;
1786
1787 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1788 if (ret < 0) {
1789 kfree(savedcmd);
1790 savedcmd = NULL;
1791 return -ENOMEM;
1792 }
1793
1794 return 0;
bc0c38d1
SR
1795}
1796
b5130b1e
CE
1797int is_tracing_stopped(void)
1798{
2b6080f2 1799 return global_trace.stop_count;
b5130b1e
CE
1800}
1801
0f048701
SR
1802/**
1803 * tracing_start - quick start of the tracer
1804 *
1805 * If tracing is enabled but was stopped by tracing_stop,
1806 * this will start the tracer back up.
1807 */
1808void tracing_start(void)
1809{
1810 struct ring_buffer *buffer;
1811 unsigned long flags;
1812
1813 if (tracing_disabled)
1814 return;
1815
2b6080f2
SR
1816 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1817 if (--global_trace.stop_count) {
1818 if (global_trace.stop_count < 0) {
b06a8301
SR
1819 /* Someone screwed up their debugging */
1820 WARN_ON_ONCE(1);
2b6080f2 1821 global_trace.stop_count = 0;
b06a8301 1822 }
0f048701
SR
1823 goto out;
1824 }
1825
a2f80714 1826 /* Prevent the buffers from switching */
0b9b12c1 1827 arch_spin_lock(&global_trace.max_lock);
0f048701 1828
12883efb 1829 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1830 if (buffer)
1831 ring_buffer_record_enable(buffer);
1832
12883efb
SRRH
1833#ifdef CONFIG_TRACER_MAX_TRACE
1834 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1835 if (buffer)
1836 ring_buffer_record_enable(buffer);
12883efb 1837#endif
0f048701 1838
0b9b12c1 1839 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1840
0f048701 1841 out:
2b6080f2
SR
1842 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1843}
1844
1845static void tracing_start_tr(struct trace_array *tr)
1846{
1847 struct ring_buffer *buffer;
1848 unsigned long flags;
1849
1850 if (tracing_disabled)
1851 return;
1852
1853 /* If global, we need to also start the max tracer */
1854 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1855 return tracing_start();
1856
1857 raw_spin_lock_irqsave(&tr->start_lock, flags);
1858
1859 if (--tr->stop_count) {
1860 if (tr->stop_count < 0) {
1861 /* Someone screwed up their debugging */
1862 WARN_ON_ONCE(1);
1863 tr->stop_count = 0;
1864 }
1865 goto out;
1866 }
1867
12883efb 1868 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1869 if (buffer)
1870 ring_buffer_record_enable(buffer);
1871
1872 out:
1873 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1874}
1875
1876/**
1877 * tracing_stop - quick stop of the tracer
1878 *
1879 * Light weight way to stop tracing. Use in conjunction with
1880 * tracing_start.
1881 */
1882void tracing_stop(void)
1883{
1884 struct ring_buffer *buffer;
1885 unsigned long flags;
1886
2b6080f2
SR
1887 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1888 if (global_trace.stop_count++)
0f048701
SR
1889 goto out;
1890
a2f80714 1891 /* Prevent the buffers from switching */
0b9b12c1 1892 arch_spin_lock(&global_trace.max_lock);
a2f80714 1893
12883efb 1894 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1895 if (buffer)
1896 ring_buffer_record_disable(buffer);
1897
12883efb
SRRH
1898#ifdef CONFIG_TRACER_MAX_TRACE
1899 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1900 if (buffer)
1901 ring_buffer_record_disable(buffer);
12883efb 1902#endif
0f048701 1903
0b9b12c1 1904 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1905
0f048701 1906 out:
2b6080f2
SR
1907 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1908}
1909
1910static void tracing_stop_tr(struct trace_array *tr)
1911{
1912 struct ring_buffer *buffer;
1913 unsigned long flags;
1914
1915 /* If global, we need to also stop the max tracer */
1916 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1917 return tracing_stop();
1918
1919 raw_spin_lock_irqsave(&tr->start_lock, flags);
1920 if (tr->stop_count++)
1921 goto out;
1922
12883efb 1923 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1924 if (buffer)
1925 ring_buffer_record_disable(buffer);
1926
1927 out:
1928 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1929}
1930
379cfdac 1931static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1932{
a635cf04 1933 unsigned pid, idx;
bc0c38d1 1934
eaf260ac
JF
1935 /* treat recording of idle task as a success */
1936 if (!tsk->pid)
1937 return 1;
1938
1939 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1940 return 0;
bc0c38d1
SR
1941
1942 /*
1943 * It's not the end of the world if we don't get
1944 * the lock, but we also don't want to spin
1945 * nor do we want to disable interrupts,
1946 * so if we miss here, then better luck next time.
1947 */
0199c4e6 1948 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1949 return 0;
bc0c38d1 1950
939c7a4f 1951 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1952 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1953 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1954
a635cf04
CE
1955 /*
1956 * Check whether the cmdline buffer at idx has a pid
1957 * mapped. We are going to overwrite that entry so we
1958 * need to clear the map_pid_to_cmdline. Otherwise we
1959 * would read the new comm for the old pid.
1960 */
939c7a4f 1961 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1962 if (pid != NO_CMDLINE_MAP)
939c7a4f 1963 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1964
939c7a4f
YY
1965 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1966 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1967
939c7a4f 1968 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1969 }
1970
939c7a4f 1971 set_cmdline(idx, tsk->comm);
bc0c38d1 1972
0199c4e6 1973 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1974
1975 return 1;
bc0c38d1
SR
1976}
1977
4c27e756 1978static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1979{
bc0c38d1
SR
1980 unsigned map;
1981
4ca53085
SR
1982 if (!pid) {
1983 strcpy(comm, "<idle>");
1984 return;
1985 }
bc0c38d1 1986
74bf4076
SR
1987 if (WARN_ON_ONCE(pid < 0)) {
1988 strcpy(comm, "<XXX>");
1989 return;
1990 }
1991
4ca53085
SR
1992 if (pid > PID_MAX_DEFAULT) {
1993 strcpy(comm, "<...>");
1994 return;
1995 }
bc0c38d1 1996
939c7a4f 1997 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1998 if (map != NO_CMDLINE_MAP)
e09e2867 1999 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
2000 else
2001 strcpy(comm, "<...>");
4c27e756
SRRH
2002}
2003
2004void trace_find_cmdline(int pid, char comm[])
2005{
2006 preempt_disable();
2007 arch_spin_lock(&trace_cmdline_lock);
2008
2009 __trace_find_cmdline(pid, comm);
bc0c38d1 2010
0199c4e6 2011 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 2012 preempt_enable();
bc0c38d1
SR
2013}
2014
d914ba37
JF
2015int trace_find_tgid(int pid)
2016{
2017 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2018 return 0;
2019
2020 return tgid_map[pid];
2021}
2022
2023static int trace_save_tgid(struct task_struct *tsk)
2024{
bd45d34d
JF
2025 /* treat recording of idle task as a success */
2026 if (!tsk->pid)
2027 return 1;
2028
2029 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
d914ba37
JF
2030 return 0;
2031
2032 tgid_map[tsk->pid] = tsk->tgid;
2033 return 1;
2034}
2035
2036static bool tracing_record_taskinfo_skip(int flags)
2037{
2038 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2039 return true;
2040 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2041 return true;
2042 if (!__this_cpu_read(trace_taskinfo_save))
2043 return true;
2044 return false;
2045}
2046
2047/**
2048 * tracing_record_taskinfo - record the task info of a task
2049 *
2050 * @task - task to record
2051 * @flags - TRACE_RECORD_CMDLINE for recording comm
2052 * - TRACE_RECORD_TGID for recording tgid
2053 */
2054void tracing_record_taskinfo(struct task_struct *task, int flags)
2055{
29b1a8ad
JF
2056 bool done;
2057
d914ba37
JF
2058 if (tracing_record_taskinfo_skip(flags))
2059 return;
29b1a8ad
JF
2060
2061 /*
2062 * Record as much task information as possible. If some fail, continue
2063 * to try to record the others.
2064 */
2065 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2066 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2067
2068 /* If recording any information failed, retry again soon. */
2069 if (!done)
d914ba37
JF
2070 return;
2071
2072 __this_cpu_write(trace_taskinfo_save, false);
2073}
2074
2075/**
2076 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2077 *
2078 * @prev - previous task during sched_switch
2079 * @next - next task during sched_switch
2080 * @flags - TRACE_RECORD_CMDLINE for recording comm
2081 * TRACE_RECORD_TGID for recording tgid
2082 */
2083void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2084 struct task_struct *next, int flags)
bc0c38d1 2085{
29b1a8ad
JF
2086 bool done;
2087
d914ba37
JF
2088 if (tracing_record_taskinfo_skip(flags))
2089 return;
2090
29b1a8ad
JF
2091 /*
2092 * Record as much task information as possible. If some fail, continue
2093 * to try to record the others.
2094 */
2095 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2096 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2097 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2098 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
bc0c38d1 2099
29b1a8ad
JF
2100 /* If recording any information failed, retry again soon. */
2101 if (!done)
7ffbd48d
SR
2102 return;
2103
d914ba37
JF
2104 __this_cpu_write(trace_taskinfo_save, false);
2105}
2106
2107/* Helpers to record a specific task information */
2108void tracing_record_cmdline(struct task_struct *task)
2109{
2110 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2111}
2112
2113void tracing_record_tgid(struct task_struct *task)
2114{
2115 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
bc0c38d1
SR
2116}
2117
af0009fc
SRV
2118/*
2119 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2120 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2121 * simplifies those functions and keeps them in sync.
2122 */
2123enum print_line_t trace_handle_return(struct trace_seq *s)
2124{
2125 return trace_seq_has_overflowed(s) ?
2126 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2127}
2128EXPORT_SYMBOL_GPL(trace_handle_return);
2129
45dcd8b8 2130void
38697053
SR
2131tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2132 int pc)
bc0c38d1
SR
2133{
2134 struct task_struct *tsk = current;
bc0c38d1 2135
777e208d
SR
2136 entry->preempt_count = pc & 0xff;
2137 entry->pid = (tsk) ? tsk->pid : 0;
2138 entry->flags =
9244489a 2139#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2140 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2141#else
2142 TRACE_FLAG_IRQS_NOSUPPORT |
2143#endif
7e6867bf 2144 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2145 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2146 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2147 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2148 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2149}
f413cdb8 2150EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2151
e77405ad
SR
2152struct ring_buffer_event *
2153trace_buffer_lock_reserve(struct ring_buffer *buffer,
2154 int type,
2155 unsigned long len,
2156 unsigned long flags, int pc)
51a763dd 2157{
3e9a8aad 2158 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2159}
2160
2161DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2162DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2163static int trace_buffered_event_ref;
2164
2165/**
2166 * trace_buffered_event_enable - enable buffering events
2167 *
2168 * When events are being filtered, it is quicker to use a temporary
2169 * buffer to write the event data into if there's a likely chance
2170 * that it will not be committed. The discard of the ring buffer
2171 * is not as fast as committing, and is much slower than copying
2172 * a commit.
2173 *
2174 * When an event is to be filtered, allocate per cpu buffers to
2175 * write the event data into, and if the event is filtered and discarded
2176 * it is simply dropped, otherwise, the entire data is to be committed
2177 * in one shot.
2178 */
2179void trace_buffered_event_enable(void)
2180{
2181 struct ring_buffer_event *event;
2182 struct page *page;
2183 int cpu;
51a763dd 2184
0fc1b09f
SRRH
2185 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2186
2187 if (trace_buffered_event_ref++)
2188 return;
2189
2190 for_each_tracing_cpu(cpu) {
2191 page = alloc_pages_node(cpu_to_node(cpu),
2192 GFP_KERNEL | __GFP_NORETRY, 0);
2193 if (!page)
2194 goto failed;
2195
2196 event = page_address(page);
2197 memset(event, 0, sizeof(*event));
2198
2199 per_cpu(trace_buffered_event, cpu) = event;
2200
2201 preempt_disable();
2202 if (cpu == smp_processor_id() &&
2203 this_cpu_read(trace_buffered_event) !=
2204 per_cpu(trace_buffered_event, cpu))
2205 WARN_ON_ONCE(1);
2206 preempt_enable();
51a763dd
ACM
2207 }
2208
0fc1b09f
SRRH
2209 return;
2210 failed:
2211 trace_buffered_event_disable();
2212}
2213
2214static void enable_trace_buffered_event(void *data)
2215{
2216 /* Probably not needed, but do it anyway */
2217 smp_rmb();
2218 this_cpu_dec(trace_buffered_event_cnt);
2219}
2220
2221static void disable_trace_buffered_event(void *data)
2222{
2223 this_cpu_inc(trace_buffered_event_cnt);
2224}
2225
2226/**
2227 * trace_buffered_event_disable - disable buffering events
2228 *
2229 * When a filter is removed, it is faster to not use the buffered
2230 * events, and to commit directly into the ring buffer. Free up
2231 * the temp buffers when there are no more users. This requires
2232 * special synchronization with current events.
2233 */
2234void trace_buffered_event_disable(void)
2235{
2236 int cpu;
2237
2238 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2239
2240 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2241 return;
2242
2243 if (--trace_buffered_event_ref)
2244 return;
2245
2246 preempt_disable();
2247 /* For each CPU, set the buffer as used. */
2248 smp_call_function_many(tracing_buffer_mask,
2249 disable_trace_buffered_event, NULL, 1);
2250 preempt_enable();
2251
2252 /* Wait for all current users to finish */
74401729 2253 synchronize_rcu();
0fc1b09f
SRRH
2254
2255 for_each_tracing_cpu(cpu) {
2256 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2257 per_cpu(trace_buffered_event, cpu) = NULL;
2258 }
2259 /*
2260 * Make sure trace_buffered_event is NULL before clearing
2261 * trace_buffered_event_cnt.
2262 */
2263 smp_wmb();
2264
2265 preempt_disable();
2266 /* Do the work on each cpu */
2267 smp_call_function_many(tracing_buffer_mask,
2268 enable_trace_buffered_event, NULL, 1);
2269 preempt_enable();
51a763dd 2270}
51a763dd 2271
2c4a33ab
SRRH
2272static struct ring_buffer *temp_buffer;
2273
ccb469a1
SR
2274struct ring_buffer_event *
2275trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2276 struct trace_event_file *trace_file,
ccb469a1
SR
2277 int type, unsigned long len,
2278 unsigned long flags, int pc)
2279{
2c4a33ab 2280 struct ring_buffer_event *entry;
0fc1b09f 2281 int val;
2c4a33ab 2282
7f1d2f82 2283 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f 2284
00b41452 2285 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
0fc1b09f
SRRH
2286 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2287 (entry = this_cpu_read(trace_buffered_event))) {
2288 /* Try to use the per cpu buffer first */
2289 val = this_cpu_inc_return(trace_buffered_event_cnt);
2290 if (val == 1) {
2291 trace_event_setup(entry, type, flags, pc);
2292 entry->array[0] = len;
2293 return entry;
2294 }
2295 this_cpu_dec(trace_buffered_event_cnt);
2296 }
2297
3e9a8aad
SRRH
2298 entry = __trace_buffer_lock_reserve(*current_rb,
2299 type, len, flags, pc);
2c4a33ab
SRRH
2300 /*
2301 * If tracing is off, but we have triggers enabled
2302 * we still need to look at the event data. Use the temp_buffer
2303 * to store the trace event for the tigger to use. It's recusive
2304 * safe and will not be recorded anywhere.
2305 */
5d6ad960 2306 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2307 *current_rb = temp_buffer;
3e9a8aad
SRRH
2308 entry = __trace_buffer_lock_reserve(*current_rb,
2309 type, len, flags, pc);
2c4a33ab
SRRH
2310 }
2311 return entry;
ccb469a1
SR
2312}
2313EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2314
42391745
SRRH
2315static DEFINE_SPINLOCK(tracepoint_iter_lock);
2316static DEFINE_MUTEX(tracepoint_printk_mutex);
2317
2318static void output_printk(struct trace_event_buffer *fbuffer)
2319{
2320 struct trace_event_call *event_call;
2321 struct trace_event *event;
2322 unsigned long flags;
2323 struct trace_iterator *iter = tracepoint_print_iter;
2324
2325 /* We should never get here if iter is NULL */
2326 if (WARN_ON_ONCE(!iter))
2327 return;
2328
2329 event_call = fbuffer->trace_file->event_call;
2330 if (!event_call || !event_call->event.funcs ||
2331 !event_call->event.funcs->trace)
2332 return;
2333
2334 event = &fbuffer->trace_file->event_call->event;
2335
2336 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2337 trace_seq_init(&iter->seq);
2338 iter->ent = fbuffer->entry;
2339 event_call->event.funcs->trace(iter, 0, event);
2340 trace_seq_putc(&iter->seq, 0);
2341 printk("%s", iter->seq.buffer);
2342
2343 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2344}
2345
2346int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2347 void __user *buffer, size_t *lenp,
2348 loff_t *ppos)
2349{
2350 int save_tracepoint_printk;
2351 int ret;
2352
2353 mutex_lock(&tracepoint_printk_mutex);
2354 save_tracepoint_printk = tracepoint_printk;
2355
2356 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2357
2358 /*
2359 * This will force exiting early, as tracepoint_printk
2360 * is always zero when tracepoint_printk_iter is not allocated
2361 */
2362 if (!tracepoint_print_iter)
2363 tracepoint_printk = 0;
2364
2365 if (save_tracepoint_printk == tracepoint_printk)
2366 goto out;
2367
2368 if (tracepoint_printk)
2369 static_key_enable(&tracepoint_printk_key.key);
2370 else
2371 static_key_disable(&tracepoint_printk_key.key);
2372
2373 out:
2374 mutex_unlock(&tracepoint_printk_mutex);
2375
2376 return ret;
2377}
2378
2379void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2380{
2381 if (static_key_false(&tracepoint_printk_key.key))
2382 output_printk(fbuffer);
2383
2384 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2385 fbuffer->event, fbuffer->entry,
2386 fbuffer->flags, fbuffer->pc);
2387}
2388EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2389
2ee5b92a
SRV
2390/*
2391 * Skip 3:
2392 *
2393 * trace_buffer_unlock_commit_regs()
2394 * trace_event_buffer_commit()
2395 * trace_event_raw_event_xxx()
13cf912b 2396 */
2ee5b92a
SRV
2397# define STACK_SKIP 3
2398
b7f0c959
SRRH
2399void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2400 struct ring_buffer *buffer,
0d5c6e1c
SR
2401 struct ring_buffer_event *event,
2402 unsigned long flags, int pc,
2403 struct pt_regs *regs)
1fd8df2c 2404{
7ffbd48d 2405 __buffer_unlock_commit(buffer, event);
1fd8df2c 2406
be54f69c 2407 /*
2ee5b92a 2408 * If regs is not set, then skip the necessary functions.
be54f69c
SRRH
2409 * Note, we can still get here via blktrace, wakeup tracer
2410 * and mmiotrace, but that's ok if they lose a function or
2ee5b92a 2411 * two. They are not that meaningful.
be54f69c 2412 */
2ee5b92a 2413 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
1fd8df2c
MH
2414 ftrace_trace_userstack(buffer, flags, pc);
2415}
1fd8df2c 2416
52ffabe3
SRRH
2417/*
2418 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2419 */
2420void
2421trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2422 struct ring_buffer_event *event)
2423{
2424 __buffer_unlock_commit(buffer, event);
2425}
2426
478409dd
CZ
2427static void
2428trace_process_export(struct trace_export *export,
2429 struct ring_buffer_event *event)
2430{
2431 struct trace_entry *entry;
2432 unsigned int size = 0;
2433
2434 entry = ring_buffer_event_data(event);
2435 size = ring_buffer_event_length(event);
a773d419 2436 export->write(export, entry, size);
478409dd
CZ
2437}
2438
2439static DEFINE_MUTEX(ftrace_export_lock);
2440
2441static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2442
2443static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2444
2445static inline void ftrace_exports_enable(void)
2446{
2447 static_branch_enable(&ftrace_exports_enabled);
2448}
2449
2450static inline void ftrace_exports_disable(void)
2451{
2452 static_branch_disable(&ftrace_exports_enabled);
2453}
2454
1cce377d 2455static void ftrace_exports(struct ring_buffer_event *event)
478409dd
CZ
2456{
2457 struct trace_export *export;
2458
2459 preempt_disable_notrace();
2460
2461 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2462 while (export) {
2463 trace_process_export(export, event);
2464 export = rcu_dereference_raw_notrace(export->next);
2465 }
2466
2467 preempt_enable_notrace();
2468}
2469
2470static inline void
2471add_trace_export(struct trace_export **list, struct trace_export *export)
2472{
2473 rcu_assign_pointer(export->next, *list);
2474 /*
2475 * We are entering export into the list but another
2476 * CPU might be walking that list. We need to make sure
2477 * the export->next pointer is valid before another CPU sees
2478 * the export pointer included into the list.
2479 */
2480 rcu_assign_pointer(*list, export);
2481}
2482
2483static inline int
2484rm_trace_export(struct trace_export **list, struct trace_export *export)
2485{
2486 struct trace_export **p;
2487
2488 for (p = list; *p != NULL; p = &(*p)->next)
2489 if (*p == export)
2490 break;
2491
2492 if (*p != export)
2493 return -1;
2494
2495 rcu_assign_pointer(*p, (*p)->next);
2496
2497 return 0;
2498}
2499
2500static inline void
2501add_ftrace_export(struct trace_export **list, struct trace_export *export)
2502{
2503 if (*list == NULL)
2504 ftrace_exports_enable();
2505
2506 add_trace_export(list, export);
2507}
2508
2509static inline int
2510rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2511{
2512 int ret;
2513
2514 ret = rm_trace_export(list, export);
2515 if (*list == NULL)
2516 ftrace_exports_disable();
2517
2518 return ret;
2519}
2520
2521int register_ftrace_export(struct trace_export *export)
2522{
2523 if (WARN_ON_ONCE(!export->write))
2524 return -1;
2525
2526 mutex_lock(&ftrace_export_lock);
2527
2528 add_ftrace_export(&ftrace_exports_list, export);
2529
2530 mutex_unlock(&ftrace_export_lock);
2531
2532 return 0;
2533}
2534EXPORT_SYMBOL_GPL(register_ftrace_export);
2535
2536int unregister_ftrace_export(struct trace_export *export)
2537{
2538 int ret;
2539
2540 mutex_lock(&ftrace_export_lock);
2541
2542 ret = rm_ftrace_export(&ftrace_exports_list, export);
2543
2544 mutex_unlock(&ftrace_export_lock);
2545
2546 return ret;
2547}
2548EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2549
e309b41d 2550void
7be42151 2551trace_function(struct trace_array *tr,
38697053
SR
2552 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2553 int pc)
bc0c38d1 2554{
2425bcb9 2555 struct trace_event_call *call = &event_function;
12883efb 2556 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2557 struct ring_buffer_event *event;
777e208d 2558 struct ftrace_entry *entry;
bc0c38d1 2559
3e9a8aad
SRRH
2560 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2561 flags, pc);
3928a8a2
SR
2562 if (!event)
2563 return;
2564 entry = ring_buffer_event_data(event);
777e208d
SR
2565 entry->ip = ip;
2566 entry->parent_ip = parent_ip;
e1112b4d 2567
478409dd
CZ
2568 if (!call_filter_check_discard(call, entry, buffer, event)) {
2569 if (static_branch_unlikely(&ftrace_exports_enabled))
2570 ftrace_exports(event);
7ffbd48d 2571 __buffer_unlock_commit(buffer, event);
478409dd 2572 }
bc0c38d1
SR
2573}
2574
c0a0d0d3 2575#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2576
2577#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2578struct ftrace_stack {
2579 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2580};
2581
2582static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2583static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2584
e77405ad 2585static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2586 unsigned long flags,
1fd8df2c 2587 int skip, int pc, struct pt_regs *regs)
86387f7e 2588{
2425bcb9 2589 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2590 struct ring_buffer_event *event;
777e208d 2591 struct stack_entry *entry;
86387f7e 2592 struct stack_trace trace;
4a9bd3f1
SR
2593 int use_stack;
2594 int size = FTRACE_STACK_ENTRIES;
2595
2596 trace.nr_entries = 0;
2597 trace.skip = skip;
2598
be54f69c 2599 /*
2ee5b92a 2600 * Add one, for this function and the call to save_stack_trace()
be54f69c
SRRH
2601 * If regs is set, then these functions will not be in the way.
2602 */
2ee5b92a 2603#ifndef CONFIG_UNWINDER_ORC
be54f69c 2604 if (!regs)
2ee5b92a
SRV
2605 trace.skip++;
2606#endif
be54f69c 2607
4a9bd3f1
SR
2608 /*
2609 * Since events can happen in NMIs there's no safe way to
2610 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2611 * or NMI comes in, it will just have to use the default
2612 * FTRACE_STACK_SIZE.
2613 */
2614 preempt_disable_notrace();
2615
82146529 2616 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2617 /*
2618 * We don't need any atomic variables, just a barrier.
2619 * If an interrupt comes in, we don't care, because it would
2620 * have exited and put the counter back to what we want.
2621 * We just need a barrier to keep gcc from moving things
2622 * around.
2623 */
2624 barrier();
2625 if (use_stack == 1) {
bdffd893 2626 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2627 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2628
2629 if (regs)
2630 save_stack_trace_regs(regs, &trace);
2631 else
2632 save_stack_trace(&trace);
2633
2634 if (trace.nr_entries > size)
2635 size = trace.nr_entries;
2636 } else
2637 /* From now on, use_stack is a boolean */
2638 use_stack = 0;
2639
2640 size *= sizeof(unsigned long);
86387f7e 2641
3e9a8aad
SRRH
2642 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2643 sizeof(*entry) + size, flags, pc);
3928a8a2 2644 if (!event)
4a9bd3f1
SR
2645 goto out;
2646 entry = ring_buffer_event_data(event);
86387f7e 2647
4a9bd3f1
SR
2648 memset(&entry->caller, 0, size);
2649
2650 if (use_stack)
2651 memcpy(&entry->caller, trace.entries,
2652 trace.nr_entries * sizeof(unsigned long));
2653 else {
2654 trace.max_entries = FTRACE_STACK_ENTRIES;
2655 trace.entries = entry->caller;
2656 if (regs)
2657 save_stack_trace_regs(regs, &trace);
2658 else
2659 save_stack_trace(&trace);
2660 }
2661
2662 entry->size = trace.nr_entries;
86387f7e 2663
f306cc82 2664 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2665 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2666
2667 out:
2668 /* Again, don't let gcc optimize things here */
2669 barrier();
82146529 2670 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2671 preempt_enable_notrace();
2672
f0a920d5
IM
2673}
2674
2d34f489
SRRH
2675static inline void ftrace_trace_stack(struct trace_array *tr,
2676 struct ring_buffer *buffer,
73dddbb5
SRRH
2677 unsigned long flags,
2678 int skip, int pc, struct pt_regs *regs)
53614991 2679{
2d34f489 2680 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2681 return;
2682
73dddbb5 2683 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2684}
2685
c0a0d0d3
FW
2686void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2687 int pc)
38697053 2688{
a33d7d94
SRV
2689 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2690
2691 if (rcu_is_watching()) {
2692 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2693 return;
2694 }
2695
2696 /*
2697 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2698 * but if the above rcu_is_watching() failed, then the NMI
2699 * triggered someplace critical, and rcu_irq_enter() should
2700 * not be called from NMI.
2701 */
2702 if (unlikely(in_nmi()))
2703 return;
2704
a33d7d94
SRV
2705 rcu_irq_enter_irqson();
2706 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2707 rcu_irq_exit_irqson();
38697053
SR
2708}
2709
03889384
SR
2710/**
2711 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2712 * @skip: Number of functions to skip (helper handlers)
03889384 2713 */
c142be8e 2714void trace_dump_stack(int skip)
03889384
SR
2715{
2716 unsigned long flags;
2717
2718 if (tracing_disabled || tracing_selftest_running)
e36c5458 2719 return;
03889384
SR
2720
2721 local_save_flags(flags);
2722
2ee5b92a
SRV
2723#ifndef CONFIG_UNWINDER_ORC
2724 /* Skip 1 to skip this function. */
2725 skip++;
2726#endif
c142be8e
SRRH
2727 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2728 flags, skip, preempt_count(), NULL);
03889384 2729}
da387e5c 2730EXPORT_SYMBOL_GPL(trace_dump_stack);
03889384 2731
91e86e56
SR
2732static DEFINE_PER_CPU(int, user_stack_count);
2733
e77405ad
SR
2734void
2735ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2736{
2425bcb9 2737 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2738 struct ring_buffer_event *event;
02b67518
TE
2739 struct userstack_entry *entry;
2740 struct stack_trace trace;
02b67518 2741
983f938a 2742 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2743 return;
2744
b6345879
SR
2745 /*
2746 * NMIs can not handle page faults, even with fix ups.
2747 * The save user stack can (and often does) fault.
2748 */
2749 if (unlikely(in_nmi()))
2750 return;
02b67518 2751
91e86e56
SR
2752 /*
2753 * prevent recursion, since the user stack tracing may
2754 * trigger other kernel events.
2755 */
2756 preempt_disable();
2757 if (__this_cpu_read(user_stack_count))
2758 goto out;
2759
2760 __this_cpu_inc(user_stack_count);
2761
3e9a8aad
SRRH
2762 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2763 sizeof(*entry), flags, pc);
02b67518 2764 if (!event)
1dbd1951 2765 goto out_drop_count;
02b67518 2766 entry = ring_buffer_event_data(event);
02b67518 2767
48659d31 2768 entry->tgid = current->tgid;
02b67518
TE
2769 memset(&entry->caller, 0, sizeof(entry->caller));
2770
2771 trace.nr_entries = 0;
2772 trace.max_entries = FTRACE_STACK_ENTRIES;
2773 trace.skip = 0;
2774 trace.entries = entry->caller;
2775
2776 save_stack_trace_user(&trace);
f306cc82 2777 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2778 __buffer_unlock_commit(buffer, event);
91e86e56 2779
1dbd1951 2780 out_drop_count:
91e86e56 2781 __this_cpu_dec(user_stack_count);
91e86e56
SR
2782 out:
2783 preempt_enable();
02b67518
TE
2784}
2785
4fd27358
HE
2786#ifdef UNUSED
2787static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2788{
7be42151 2789 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2790}
4fd27358 2791#endif /* UNUSED */
02b67518 2792
c0a0d0d3
FW
2793#endif /* CONFIG_STACKTRACE */
2794
07d777fe
SR
2795/* created for use with alloc_percpu */
2796struct trace_buffer_struct {
e2ace001
AL
2797 int nesting;
2798 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2799};
2800
2801static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2802
2803/*
e2ace001
AL
2804 * Thise allows for lockless recording. If we're nested too deeply, then
2805 * this returns NULL.
07d777fe
SR
2806 */
2807static char *get_trace_buf(void)
2808{
e2ace001 2809 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2810
e2ace001 2811 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2812 return NULL;
2813
3d9622c1
SRV
2814 buffer->nesting++;
2815
2816 /* Interrupts must see nesting incremented before we use the buffer */
2817 barrier();
2818 return &buffer->buffer[buffer->nesting][0];
e2ace001
AL
2819}
2820
2821static void put_trace_buf(void)
2822{
3d9622c1
SRV
2823 /* Don't let the decrement of nesting leak before this */
2824 barrier();
e2ace001 2825 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2826}
2827
2828static int alloc_percpu_trace_buffer(void)
2829{
2830 struct trace_buffer_struct *buffers;
07d777fe
SR
2831
2832 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2833 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2834 return -ENOMEM;
07d777fe
SR
2835
2836 trace_percpu_buffer = buffers;
07d777fe 2837 return 0;
07d777fe
SR
2838}
2839
81698831
SR
2840static int buffers_allocated;
2841
07d777fe
SR
2842void trace_printk_init_buffers(void)
2843{
07d777fe
SR
2844 if (buffers_allocated)
2845 return;
2846
2847 if (alloc_percpu_trace_buffer())
2848 return;
2849
2184db46
SR
2850 /* trace_printk() is for debug use only. Don't use it in production. */
2851
a395d6a7
JP
2852 pr_warn("\n");
2853 pr_warn("**********************************************************\n");
2854 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2855 pr_warn("** **\n");
2856 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2857 pr_warn("** **\n");
2858 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2859 pr_warn("** unsafe for production use. **\n");
2860 pr_warn("** **\n");
2861 pr_warn("** If you see this message and you are not debugging **\n");
2862 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2863 pr_warn("** **\n");
2864 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2865 pr_warn("**********************************************************\n");
07d777fe 2866
b382ede6
SR
2867 /* Expand the buffers to set size */
2868 tracing_update_buffers();
2869
07d777fe 2870 buffers_allocated = 1;
81698831
SR
2871
2872 /*
2873 * trace_printk_init_buffers() can be called by modules.
2874 * If that happens, then we need to start cmdline recording
2875 * directly here. If the global_trace.buffer is already
2876 * allocated here, then this was called by module code.
2877 */
12883efb 2878 if (global_trace.trace_buffer.buffer)
81698831
SR
2879 tracing_start_cmdline_record();
2880}
2881
2882void trace_printk_start_comm(void)
2883{
2884 /* Start tracing comms if trace printk is set */
2885 if (!buffers_allocated)
2886 return;
2887 tracing_start_cmdline_record();
2888}
2889
2890static void trace_printk_start_stop_comm(int enabled)
2891{
2892 if (!buffers_allocated)
2893 return;
2894
2895 if (enabled)
2896 tracing_start_cmdline_record();
2897 else
2898 tracing_stop_cmdline_record();
07d777fe
SR
2899}
2900
769b0441 2901/**
48ead020 2902 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2903 *
2904 */
40ce74f1 2905int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2906{
2425bcb9 2907 struct trace_event_call *call = &event_bprint;
769b0441 2908 struct ring_buffer_event *event;
e77405ad 2909 struct ring_buffer *buffer;
769b0441 2910 struct trace_array *tr = &global_trace;
48ead020 2911 struct bprint_entry *entry;
769b0441 2912 unsigned long flags;
07d777fe
SR
2913 char *tbuffer;
2914 int len = 0, size, pc;
769b0441
FW
2915
2916 if (unlikely(tracing_selftest_running || tracing_disabled))
2917 return 0;
2918
2919 /* Don't pollute graph traces with trace_vprintk internals */
2920 pause_graph_tracing();
2921
2922 pc = preempt_count();
5168ae50 2923 preempt_disable_notrace();
769b0441 2924
07d777fe
SR
2925 tbuffer = get_trace_buf();
2926 if (!tbuffer) {
2927 len = 0;
e2ace001 2928 goto out_nobuffer;
07d777fe 2929 }
769b0441 2930
07d777fe 2931 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2932
07d777fe
SR
2933 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2934 goto out;
769b0441 2935
07d777fe 2936 local_save_flags(flags);
769b0441 2937 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2938 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2939 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2940 flags, pc);
769b0441 2941 if (!event)
07d777fe 2942 goto out;
769b0441
FW
2943 entry = ring_buffer_event_data(event);
2944 entry->ip = ip;
769b0441
FW
2945 entry->fmt = fmt;
2946
07d777fe 2947 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2948 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2949 __buffer_unlock_commit(buffer, event);
2d34f489 2950 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2951 }
769b0441 2952
769b0441 2953out:
e2ace001
AL
2954 put_trace_buf();
2955
2956out_nobuffer:
5168ae50 2957 preempt_enable_notrace();
769b0441
FW
2958 unpause_graph_tracing();
2959
2960 return len;
2961}
48ead020
FW
2962EXPORT_SYMBOL_GPL(trace_vbprintk);
2963
26b68dd2 2964__printf(3, 0)
12883efb
SRRH
2965static int
2966__trace_array_vprintk(struct ring_buffer *buffer,
2967 unsigned long ip, const char *fmt, va_list args)
48ead020 2968{
2425bcb9 2969 struct trace_event_call *call = &event_print;
48ead020 2970 struct ring_buffer_event *event;
07d777fe 2971 int len = 0, size, pc;
48ead020 2972 struct print_entry *entry;
07d777fe
SR
2973 unsigned long flags;
2974 char *tbuffer;
48ead020
FW
2975
2976 if (tracing_disabled || tracing_selftest_running)
2977 return 0;
2978
07d777fe
SR
2979 /* Don't pollute graph traces with trace_vprintk internals */
2980 pause_graph_tracing();
2981
48ead020
FW
2982 pc = preempt_count();
2983 preempt_disable_notrace();
48ead020 2984
07d777fe
SR
2985
2986 tbuffer = get_trace_buf();
2987 if (!tbuffer) {
2988 len = 0;
e2ace001 2989 goto out_nobuffer;
07d777fe 2990 }
48ead020 2991
3558a5ac 2992 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2993
07d777fe 2994 local_save_flags(flags);
48ead020 2995 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2996 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2997 flags, pc);
48ead020 2998 if (!event)
07d777fe 2999 goto out;
48ead020 3000 entry = ring_buffer_event_data(event);
c13d2f7c 3001 entry->ip = ip;
48ead020 3002
3558a5ac 3003 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 3004 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 3005 __buffer_unlock_commit(buffer, event);
2d34f489 3006 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 3007 }
e2ace001
AL
3008
3009out:
3010 put_trace_buf();
3011
3012out_nobuffer:
48ead020 3013 preempt_enable_notrace();
07d777fe 3014 unpause_graph_tracing();
48ead020
FW
3015
3016 return len;
3017}
659372d3 3018
26b68dd2 3019__printf(3, 0)
12883efb
SRRH
3020int trace_array_vprintk(struct trace_array *tr,
3021 unsigned long ip, const char *fmt, va_list args)
3022{
3023 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3024}
3025
26b68dd2 3026__printf(3, 0)
12883efb
SRRH
3027int trace_array_printk(struct trace_array *tr,
3028 unsigned long ip, const char *fmt, ...)
3029{
3030 int ret;
3031 va_list ap;
3032
983f938a 3033 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3034 return 0;
3035
3036 va_start(ap, fmt);
3037 ret = trace_array_vprintk(tr, ip, fmt, ap);
3038 va_end(ap);
3039 return ret;
3040}
3041
26b68dd2 3042__printf(3, 4)
12883efb
SRRH
3043int trace_array_printk_buf(struct ring_buffer *buffer,
3044 unsigned long ip, const char *fmt, ...)
3045{
3046 int ret;
3047 va_list ap;
3048
983f938a 3049 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3050 return 0;
3051
3052 va_start(ap, fmt);
3053 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3054 va_end(ap);
3055 return ret;
3056}
3057
26b68dd2 3058__printf(2, 0)
659372d3
SR
3059int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3060{
a813a159 3061 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 3062}
769b0441
FW
3063EXPORT_SYMBOL_GPL(trace_vprintk);
3064
e2ac8ef5 3065static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 3066{
6d158a81
SR
3067 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3068
5a90f577 3069 iter->idx++;
6d158a81
SR
3070 if (buf_iter)
3071 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
3072}
3073
e309b41d 3074static struct trace_entry *
bc21b478
SR
3075peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3076 unsigned long *lost_events)
dd0e545f 3077{
3928a8a2 3078 struct ring_buffer_event *event;
6d158a81 3079 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 3080
d769041f
SR
3081 if (buf_iter)
3082 event = ring_buffer_iter_peek(buf_iter, ts);
3083 else
12883efb 3084 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 3085 lost_events);
d769041f 3086
4a9bd3f1
SR
3087 if (event) {
3088 iter->ent_size = ring_buffer_event_length(event);
3089 return ring_buffer_event_data(event);
3090 }
3091 iter->ent_size = 0;
3092 return NULL;
dd0e545f 3093}
d769041f 3094
dd0e545f 3095static struct trace_entry *
bc21b478
SR
3096__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3097 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 3098{
12883efb 3099 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 3100 struct trace_entry *ent, *next = NULL;
aa27497c 3101 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 3102 int cpu_file = iter->cpu_file;
3928a8a2 3103 u64 next_ts = 0, ts;
bc0c38d1 3104 int next_cpu = -1;
12b5da34 3105 int next_size = 0;
bc0c38d1
SR
3106 int cpu;
3107
b04cc6b1
FW
3108 /*
3109 * If we are in a per_cpu trace file, don't bother by iterating over
3110 * all cpu and peek directly.
3111 */
ae3b5093 3112 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
3113 if (ring_buffer_empty_cpu(buffer, cpu_file))
3114 return NULL;
bc21b478 3115 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
3116 if (ent_cpu)
3117 *ent_cpu = cpu_file;
3118
3119 return ent;
3120 }
3121
ab46428c 3122 for_each_tracing_cpu(cpu) {
dd0e545f 3123
3928a8a2
SR
3124 if (ring_buffer_empty_cpu(buffer, cpu))
3125 continue;
dd0e545f 3126
bc21b478 3127 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3128
cdd31cd2
IM
3129 /*
3130 * Pick the entry with the smallest timestamp:
3131 */
3928a8a2 3132 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3133 next = ent;
3134 next_cpu = cpu;
3928a8a2 3135 next_ts = ts;
bc21b478 3136 next_lost = lost_events;
12b5da34 3137 next_size = iter->ent_size;
bc0c38d1
SR
3138 }
3139 }
3140
12b5da34
SR
3141 iter->ent_size = next_size;
3142
bc0c38d1
SR
3143 if (ent_cpu)
3144 *ent_cpu = next_cpu;
3145
3928a8a2
SR
3146 if (ent_ts)
3147 *ent_ts = next_ts;
3148
bc21b478
SR
3149 if (missing_events)
3150 *missing_events = next_lost;
3151
bc0c38d1
SR
3152 return next;
3153}
3154
dd0e545f 3155/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3156struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3157 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3158{
bc21b478 3159 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3160}
3161
3162/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3163void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3164{
bc21b478
SR
3165 iter->ent = __find_next_entry(iter, &iter->cpu,
3166 &iter->lost_events, &iter->ts);
dd0e545f 3167
3928a8a2 3168 if (iter->ent)
e2ac8ef5 3169 trace_iterator_increment(iter);
dd0e545f 3170
3928a8a2 3171 return iter->ent ? iter : NULL;
b3806b43 3172}
bc0c38d1 3173
e309b41d 3174static void trace_consume(struct trace_iterator *iter)
b3806b43 3175{
12883efb 3176 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3177 &iter->lost_events);
bc0c38d1
SR
3178}
3179
e309b41d 3180static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3181{
3182 struct trace_iterator *iter = m->private;
bc0c38d1 3183 int i = (int)*pos;
4e3c3333 3184 void *ent;
bc0c38d1 3185
a63ce5b3
SR
3186 WARN_ON_ONCE(iter->leftover);
3187
bc0c38d1
SR
3188 (*pos)++;
3189
3190 /* can't go backwards */
3191 if (iter->idx > i)
3192 return NULL;
3193
3194 if (iter->idx < 0)
955b61e5 3195 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3196 else
3197 ent = iter;
3198
3199 while (ent && iter->idx < i)
955b61e5 3200 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3201
3202 iter->pos = *pos;
3203
bc0c38d1
SR
3204 return ent;
3205}
3206
955b61e5 3207void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3208{
2f26ebd5
SR
3209 struct ring_buffer_event *event;
3210 struct ring_buffer_iter *buf_iter;
3211 unsigned long entries = 0;
3212 u64 ts;
3213
12883efb 3214 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3215
6d158a81
SR
3216 buf_iter = trace_buffer_iter(iter, cpu);
3217 if (!buf_iter)
2f26ebd5
SR
3218 return;
3219
2f26ebd5
SR
3220 ring_buffer_iter_reset(buf_iter);
3221
3222 /*
3223 * We could have the case with the max latency tracers
3224 * that a reset never took place on a cpu. This is evident
3225 * by the timestamp being before the start of the buffer.
3226 */
3227 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3228 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3229 break;
3230 entries++;
3231 ring_buffer_read(buf_iter, NULL);
3232 }
3233
12883efb 3234 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3235}
3236
d7350c3f 3237/*
d7350c3f
FW
3238 * The current tracer is copied to avoid a global locking
3239 * all around.
3240 */
bc0c38d1
SR
3241static void *s_start(struct seq_file *m, loff_t *pos)
3242{
3243 struct trace_iterator *iter = m->private;
2b6080f2 3244 struct trace_array *tr = iter->tr;
b04cc6b1 3245 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3246 void *p = NULL;
3247 loff_t l = 0;
3928a8a2 3248 int cpu;
bc0c38d1 3249
2fd196ec
HT
3250 /*
3251 * copy the tracer to avoid using a global lock all around.
3252 * iter->trace is a copy of current_trace, the pointer to the
3253 * name may be used instead of a strcmp(), as iter->trace->name
3254 * will point to the same string as current_trace->name.
3255 */
bc0c38d1 3256 mutex_lock(&trace_types_lock);
2b6080f2
SR
3257 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3258 *iter->trace = *tr->current_trace;
d7350c3f 3259 mutex_unlock(&trace_types_lock);
bc0c38d1 3260
12883efb 3261#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3262 if (iter->snapshot && iter->trace->use_max_tr)
3263 return ERR_PTR(-EBUSY);
12883efb 3264#endif
debdd57f
HT
3265
3266 if (!iter->snapshot)
d914ba37 3267 atomic_inc(&trace_record_taskinfo_disabled);
bc0c38d1 3268
bc0c38d1
SR
3269 if (*pos != iter->pos) {
3270 iter->ent = NULL;
3271 iter->cpu = 0;
3272 iter->idx = -1;
3273
ae3b5093 3274 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3275 for_each_tracing_cpu(cpu)
2f26ebd5 3276 tracing_iter_reset(iter, cpu);
b04cc6b1 3277 } else
2f26ebd5 3278 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3279
ac91d854 3280 iter->leftover = 0;
bc0c38d1
SR
3281 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3282 ;
3283
3284 } else {
a63ce5b3
SR
3285 /*
3286 * If we overflowed the seq_file before, then we want
3287 * to just reuse the trace_seq buffer again.
3288 */
3289 if (iter->leftover)
3290 p = iter;
3291 else {
3292 l = *pos - 1;
3293 p = s_next(m, p, &l);
3294 }
bc0c38d1
SR
3295 }
3296
4f535968 3297 trace_event_read_lock();
7e53bd42 3298 trace_access_lock(cpu_file);
bc0c38d1
SR
3299 return p;
3300}
3301
3302static void s_stop(struct seq_file *m, void *p)
3303{
7e53bd42
LJ
3304 struct trace_iterator *iter = m->private;
3305
12883efb 3306#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3307 if (iter->snapshot && iter->trace->use_max_tr)
3308 return;
12883efb 3309#endif
debdd57f
HT
3310
3311 if (!iter->snapshot)
d914ba37 3312 atomic_dec(&trace_record_taskinfo_disabled);
12883efb 3313
7e53bd42 3314 trace_access_unlock(iter->cpu_file);
4f535968 3315 trace_event_read_unlock();
bc0c38d1
SR
3316}
3317
39eaf7ef 3318static void
12883efb
SRRH
3319get_total_entries(struct trace_buffer *buf,
3320 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3321{
3322 unsigned long count;
3323 int cpu;
3324
3325 *total = 0;
3326 *entries = 0;
3327
3328 for_each_tracing_cpu(cpu) {
12883efb 3329 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3330 /*
3331 * If this buffer has skipped entries, then we hold all
3332 * entries for the trace and we need to ignore the
3333 * ones before the time stamp.
3334 */
12883efb
SRRH
3335 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3336 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3337 /* total is the same as the entries */
3338 *total += count;
3339 } else
3340 *total += count +
12883efb 3341 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3342 *entries += count;
3343 }
3344}
3345
e309b41d 3346static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3347{
d79ac28f
RV
3348 seq_puts(m, "# _------=> CPU# \n"
3349 "# / _-----=> irqs-off \n"
3350 "# | / _----=> need-resched \n"
3351 "# || / _---=> hardirq/softirq \n"
3352 "# ||| / _--=> preempt-depth \n"
3353 "# |||| / delay \n"
3354 "# cmd pid ||||| time | caller \n"
3355 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3356}
3357
12883efb 3358static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3359{
39eaf7ef
SR
3360 unsigned long total;
3361 unsigned long entries;
3362
12883efb 3363 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3364 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3365 entries, total, num_online_cpus());
3366 seq_puts(m, "#\n");
3367}
3368
441dae8f
JF
3369static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3370 unsigned int flags)
39eaf7ef 3371{
441dae8f
JF
3372 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3373
12883efb 3374 print_event_info(buf, m);
441dae8f 3375
f8494fa3
JFG
3376 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3377 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
bc0c38d1
SR
3378}
3379
441dae8f
JF
3380static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3381 unsigned int flags)
77271ce4 3382{
441dae8f 3383 bool tgid = flags & TRACE_ITER_RECORD_TGID;
b11fb737
SRV
3384 const char tgid_space[] = " ";
3385 const char space[] = " ";
3386
3387 seq_printf(m, "# %s _-----=> irqs-off\n",
3388 tgid ? tgid_space : space);
3389 seq_printf(m, "# %s / _----=> need-resched\n",
3390 tgid ? tgid_space : space);
3391 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3392 tgid ? tgid_space : space);
3393 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3394 tgid ? tgid_space : space);
3395 seq_printf(m, "# %s||| / delay\n",
3396 tgid ? tgid_space : space);
f8494fa3 3397 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
b11fb737 3398 tgid ? " TGID " : space);
f8494fa3 3399 seq_printf(m, "# | | %s | |||| | |\n",
b11fb737 3400 tgid ? " | " : space);
77271ce4 3401}
bc0c38d1 3402
62b915f1 3403void
bc0c38d1
SR
3404print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3405{
983f938a 3406 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3407 struct trace_buffer *buf = iter->trace_buffer;
3408 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3409 struct tracer *type = iter->trace;
39eaf7ef
SR
3410 unsigned long entries;
3411 unsigned long total;
bc0c38d1
SR
3412 const char *name = "preemption";
3413
d840f718 3414 name = type->name;
bc0c38d1 3415
12883efb 3416 get_total_entries(buf, &total, &entries);
bc0c38d1 3417
888b55dc 3418 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3419 name, UTS_RELEASE);
888b55dc 3420 seq_puts(m, "# -----------------------------------"
bc0c38d1 3421 "---------------------------------\n");
888b55dc 3422 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3423 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3424 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3425 entries,
4c11d7ae 3426 total,
12883efb 3427 buf->cpu,
bc0c38d1
SR
3428#if defined(CONFIG_PREEMPT_NONE)
3429 "server",
3430#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3431 "desktop",
b5c21b45 3432#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3433 "preempt",
3434#else
3435 "unknown",
3436#endif
3437 /* These are reserved for later use */
3438 0, 0, 0, 0);
3439#ifdef CONFIG_SMP
3440 seq_printf(m, " #P:%d)\n", num_online_cpus());
3441#else
3442 seq_puts(m, ")\n");
3443#endif
888b55dc
KM
3444 seq_puts(m, "# -----------------\n");
3445 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3446 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3447 data->comm, data->pid,
3448 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3449 data->policy, data->rt_priority);
888b55dc 3450 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3451
3452 if (data->critical_start) {
888b55dc 3453 seq_puts(m, "# => started at: ");
214023c3
SR
3454 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3455 trace_print_seq(m, &iter->seq);
888b55dc 3456 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3457 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3458 trace_print_seq(m, &iter->seq);
8248ac05 3459 seq_puts(m, "\n#\n");
bc0c38d1
SR
3460 }
3461
888b55dc 3462 seq_puts(m, "#\n");
bc0c38d1
SR
3463}
3464
a309720c
SR
3465static void test_cpu_buff_start(struct trace_iterator *iter)
3466{
3467 struct trace_seq *s = &iter->seq;
983f938a 3468 struct trace_array *tr = iter->tr;
a309720c 3469
983f938a 3470 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3471 return;
3472
3473 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3474 return;
3475
4dbbe2d8
MK
3476 if (cpumask_available(iter->started) &&
3477 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3478 return;
3479
12883efb 3480 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3481 return;
3482
4dbbe2d8 3483 if (cpumask_available(iter->started))
919cd979 3484 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3485
3486 /* Don't print started cpu buffer for the first entry of the trace */
3487 if (iter->idx > 1)
3488 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3489 iter->cpu);
a309720c
SR
3490}
3491
2c4f035f 3492static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3493{
983f938a 3494 struct trace_array *tr = iter->tr;
214023c3 3495 struct trace_seq *s = &iter->seq;
983f938a 3496 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3497 struct trace_entry *entry;
f633cef0 3498 struct trace_event *event;
bc0c38d1 3499
4e3c3333 3500 entry = iter->ent;
dd0e545f 3501
a309720c
SR
3502 test_cpu_buff_start(iter);
3503
c4a8e8be 3504 event = ftrace_find_event(entry->type);
bc0c38d1 3505
983f938a 3506 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3507 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3508 trace_print_lat_context(iter);
3509 else
3510 trace_print_context(iter);
c4a8e8be 3511 }
bc0c38d1 3512
19a7fe20
SRRH
3513 if (trace_seq_has_overflowed(s))
3514 return TRACE_TYPE_PARTIAL_LINE;
3515
268ccda0 3516 if (event)
a9a57763 3517 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3518
19a7fe20 3519 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3520
19a7fe20 3521 return trace_handle_return(s);
bc0c38d1
SR
3522}
3523
2c4f035f 3524static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3525{
983f938a 3526 struct trace_array *tr = iter->tr;
f9896bf3
IM
3527 struct trace_seq *s = &iter->seq;
3528 struct trace_entry *entry;
f633cef0 3529 struct trace_event *event;
f9896bf3
IM
3530
3531 entry = iter->ent;
dd0e545f 3532
983f938a 3533 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3534 trace_seq_printf(s, "%d %d %llu ",
3535 entry->pid, iter->cpu, iter->ts);
3536
3537 if (trace_seq_has_overflowed(s))
3538 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3539
f633cef0 3540 event = ftrace_find_event(entry->type);
268ccda0 3541 if (event)
a9a57763 3542 return event->funcs->raw(iter, 0, event);
d9793bd8 3543
19a7fe20 3544 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3545
19a7fe20 3546 return trace_handle_return(s);
f9896bf3
IM
3547}
3548
2c4f035f 3549static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3550{
983f938a 3551 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3552 struct trace_seq *s = &iter->seq;
3553 unsigned char newline = '\n';
3554 struct trace_entry *entry;
f633cef0 3555 struct trace_event *event;
5e3ca0ec
IM
3556
3557 entry = iter->ent;
dd0e545f 3558
983f938a 3559 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3560 SEQ_PUT_HEX_FIELD(s, entry->pid);
3561 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3562 SEQ_PUT_HEX_FIELD(s, iter->ts);
3563 if (trace_seq_has_overflowed(s))
3564 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3565 }
5e3ca0ec 3566
f633cef0 3567 event = ftrace_find_event(entry->type);
268ccda0 3568 if (event) {
a9a57763 3569 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3570 if (ret != TRACE_TYPE_HANDLED)
3571 return ret;
3572 }
7104f300 3573
19a7fe20 3574 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3575
19a7fe20 3576 return trace_handle_return(s);
5e3ca0ec
IM
3577}
3578
2c4f035f 3579static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3580{
983f938a 3581 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3582 struct trace_seq *s = &iter->seq;
3583 struct trace_entry *entry;
f633cef0 3584 struct trace_event *event;
cb0f12aa
IM
3585
3586 entry = iter->ent;
dd0e545f 3587
983f938a 3588 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3589 SEQ_PUT_FIELD(s, entry->pid);
3590 SEQ_PUT_FIELD(s, iter->cpu);
3591 SEQ_PUT_FIELD(s, iter->ts);
3592 if (trace_seq_has_overflowed(s))
3593 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3594 }
cb0f12aa 3595
f633cef0 3596 event = ftrace_find_event(entry->type);
a9a57763
SR
3597 return event ? event->funcs->binary(iter, 0, event) :
3598 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3599}
3600
62b915f1 3601int trace_empty(struct trace_iterator *iter)
bc0c38d1 3602{
6d158a81 3603 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3604 int cpu;
3605
9aba60fe 3606 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3607 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3608 cpu = iter->cpu_file;
6d158a81
SR
3609 buf_iter = trace_buffer_iter(iter, cpu);
3610 if (buf_iter) {
3611 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3612 return 0;
3613 } else {
12883efb 3614 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3615 return 0;
3616 }
3617 return 1;
3618 }
3619
ab46428c 3620 for_each_tracing_cpu(cpu) {
6d158a81
SR
3621 buf_iter = trace_buffer_iter(iter, cpu);
3622 if (buf_iter) {
3623 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3624 return 0;
3625 } else {
12883efb 3626 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3627 return 0;
3628 }
bc0c38d1 3629 }
d769041f 3630
797d3712 3631 return 1;
bc0c38d1
SR
3632}
3633
4f535968 3634/* Called with trace_event_read_lock() held. */
955b61e5 3635enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3636{
983f938a
SRRH
3637 struct trace_array *tr = iter->tr;
3638 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3639 enum print_line_t ret;
3640
19a7fe20
SRRH
3641 if (iter->lost_events) {
3642 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3643 iter->cpu, iter->lost_events);
3644 if (trace_seq_has_overflowed(&iter->seq))
3645 return TRACE_TYPE_PARTIAL_LINE;
3646 }
bc21b478 3647
2c4f035f
FW
3648 if (iter->trace && iter->trace->print_line) {
3649 ret = iter->trace->print_line(iter);
3650 if (ret != TRACE_TYPE_UNHANDLED)
3651 return ret;
3652 }
72829bc3 3653
09ae7234
SRRH
3654 if (iter->ent->type == TRACE_BPUTS &&
3655 trace_flags & TRACE_ITER_PRINTK &&
3656 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3657 return trace_print_bputs_msg_only(iter);
3658
48ead020
FW
3659 if (iter->ent->type == TRACE_BPRINT &&
3660 trace_flags & TRACE_ITER_PRINTK &&
3661 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3662 return trace_print_bprintk_msg_only(iter);
48ead020 3663
66896a85
FW
3664 if (iter->ent->type == TRACE_PRINT &&
3665 trace_flags & TRACE_ITER_PRINTK &&
3666 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3667 return trace_print_printk_msg_only(iter);
66896a85 3668
cb0f12aa
IM
3669 if (trace_flags & TRACE_ITER_BIN)
3670 return print_bin_fmt(iter);
3671
5e3ca0ec
IM
3672 if (trace_flags & TRACE_ITER_HEX)
3673 return print_hex_fmt(iter);
3674
f9896bf3
IM
3675 if (trace_flags & TRACE_ITER_RAW)
3676 return print_raw_fmt(iter);
3677
f9896bf3
IM
3678 return print_trace_fmt(iter);
3679}
3680
7e9a49ef
JO
3681void trace_latency_header(struct seq_file *m)
3682{
3683 struct trace_iterator *iter = m->private;
983f938a 3684 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3685
3686 /* print nothing if the buffers are empty */
3687 if (trace_empty(iter))
3688 return;
3689
3690 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3691 print_trace_header(m, iter);
3692
983f938a 3693 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3694 print_lat_help_header(m);
3695}
3696
62b915f1
JO
3697void trace_default_header(struct seq_file *m)
3698{
3699 struct trace_iterator *iter = m->private;
983f938a
SRRH
3700 struct trace_array *tr = iter->tr;
3701 unsigned long trace_flags = tr->trace_flags;
62b915f1 3702
f56e7f8e
JO
3703 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3704 return;
3705
62b915f1
JO
3706 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3707 /* print nothing if the buffers are empty */
3708 if (trace_empty(iter))
3709 return;
3710 print_trace_header(m, iter);
3711 if (!(trace_flags & TRACE_ITER_VERBOSE))
3712 print_lat_help_header(m);
3713 } else {
77271ce4
SR
3714 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3715 if (trace_flags & TRACE_ITER_IRQ_INFO)
441dae8f
JF
3716 print_func_help_header_irq(iter->trace_buffer,
3717 m, trace_flags);
77271ce4 3718 else
441dae8f
JF
3719 print_func_help_header(iter->trace_buffer, m,
3720 trace_flags);
77271ce4 3721 }
62b915f1
JO
3722 }
3723}
3724
e0a413f6
SR
3725static void test_ftrace_alive(struct seq_file *m)
3726{
3727 if (!ftrace_is_dead())
3728 return;
d79ac28f
RV
3729 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3730 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3731}
3732
d8741e2e 3733#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3734static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3735{
d79ac28f
RV
3736 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3737 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3738 "# Takes a snapshot of the main buffer.\n"
3739 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3740 "# (Doesn't have to be '2' works with any number that\n"
3741 "# is not a '0' or '1')\n");
d8741e2e 3742}
f1affcaa
SRRH
3743
3744static void show_snapshot_percpu_help(struct seq_file *m)
3745{
fa6f0cc7 3746 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3747#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3748 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3749 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3750#else
d79ac28f
RV
3751 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3752 "# Must use main snapshot file to allocate.\n");
f1affcaa 3753#endif
d79ac28f
RV
3754 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3755 "# (Doesn't have to be '2' works with any number that\n"
3756 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3757}
3758
d8741e2e
SRRH
3759static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3760{
45ad21ca 3761 if (iter->tr->allocated_snapshot)
fa6f0cc7 3762 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3763 else
fa6f0cc7 3764 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3765
fa6f0cc7 3766 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3767 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3768 show_snapshot_main_help(m);
3769 else
3770 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3771}
3772#else
3773/* Should never be called */
3774static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3775#endif
3776
bc0c38d1
SR
3777static int s_show(struct seq_file *m, void *v)
3778{
3779 struct trace_iterator *iter = v;
a63ce5b3 3780 int ret;
bc0c38d1
SR
3781
3782 if (iter->ent == NULL) {
3783 if (iter->tr) {
3784 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3785 seq_puts(m, "#\n");
e0a413f6 3786 test_ftrace_alive(m);
bc0c38d1 3787 }
d8741e2e
SRRH
3788 if (iter->snapshot && trace_empty(iter))
3789 print_snapshot_help(m, iter);
3790 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3791 iter->trace->print_header(m);
62b915f1
JO
3792 else
3793 trace_default_header(m);
3794
a63ce5b3
SR
3795 } else if (iter->leftover) {
3796 /*
3797 * If we filled the seq_file buffer earlier, we
3798 * want to just show it now.
3799 */
3800 ret = trace_print_seq(m, &iter->seq);
3801
3802 /* ret should this time be zero, but you never know */
3803 iter->leftover = ret;
3804
bc0c38d1 3805 } else {
f9896bf3 3806 print_trace_line(iter);
a63ce5b3
SR
3807 ret = trace_print_seq(m, &iter->seq);
3808 /*
3809 * If we overflow the seq_file buffer, then it will
3810 * ask us for this data again at start up.
3811 * Use that instead.
3812 * ret is 0 if seq_file write succeeded.
3813 * -1 otherwise.
3814 */
3815 iter->leftover = ret;
bc0c38d1
SR
3816 }
3817
3818 return 0;
3819}
3820
649e9c70
ON
3821/*
3822 * Should be used after trace_array_get(), trace_types_lock
3823 * ensures that i_cdev was already initialized.
3824 */
3825static inline int tracing_get_cpu(struct inode *inode)
3826{
3827 if (inode->i_cdev) /* See trace_create_cpu_file() */
3828 return (long)inode->i_cdev - 1;
3829 return RING_BUFFER_ALL_CPUS;
3830}
3831
88e9d34c 3832static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3833 .start = s_start,
3834 .next = s_next,
3835 .stop = s_stop,
3836 .show = s_show,
bc0c38d1
SR
3837};
3838
e309b41d 3839static struct trace_iterator *
6484c71c 3840__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3841{
6484c71c 3842 struct trace_array *tr = inode->i_private;
bc0c38d1 3843 struct trace_iterator *iter;
50e18b94 3844 int cpu;
bc0c38d1 3845
85a2f9b4
SR
3846 if (tracing_disabled)
3847 return ERR_PTR(-ENODEV);
60a11774 3848
50e18b94 3849 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3850 if (!iter)
3851 return ERR_PTR(-ENOMEM);
bc0c38d1 3852
72917235 3853 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3854 GFP_KERNEL);
93574fcc
DC
3855 if (!iter->buffer_iter)
3856 goto release;
3857
d7350c3f
FW
3858 /*
3859 * We make a copy of the current tracer to avoid concurrent
3860 * changes on it while we are reading.
3861 */
bc0c38d1 3862 mutex_lock(&trace_types_lock);
d7350c3f 3863 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3864 if (!iter->trace)
d7350c3f 3865 goto fail;
85a2f9b4 3866
2b6080f2 3867 *iter->trace = *tr->current_trace;
d7350c3f 3868
79f55997 3869 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3870 goto fail;
3871
12883efb
SRRH
3872 iter->tr = tr;
3873
3874#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3875 /* Currently only the top directory has a snapshot */
3876 if (tr->current_trace->print_max || snapshot)
12883efb 3877 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3878 else
12883efb
SRRH
3879#endif
3880 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3881 iter->snapshot = snapshot;
bc0c38d1 3882 iter->pos = -1;
6484c71c 3883 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3884 mutex_init(&iter->mutex);
bc0c38d1 3885
8bba1bf5
MM
3886 /* Notify the tracer early; before we stop tracing. */
3887 if (iter->trace && iter->trace->open)
a93751ca 3888 iter->trace->open(iter);
8bba1bf5 3889
12ef7d44 3890 /* Annotate start of buffers if we had overruns */
12883efb 3891 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3892 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3893
8be0709f 3894 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3895 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3896 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3897
debdd57f
HT
3898 /* stop the trace while dumping if we are not opening "snapshot" */
3899 if (!iter->snapshot)
2b6080f2 3900 tracing_stop_tr(tr);
2f26ebd5 3901
ae3b5093 3902 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3903 for_each_tracing_cpu(cpu) {
b04cc6b1 3904 iter->buffer_iter[cpu] =
12883efb 3905 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3906 }
3907 ring_buffer_read_prepare_sync();
3908 for_each_tracing_cpu(cpu) {
3909 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3910 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3911 }
3912 } else {
3913 cpu = iter->cpu_file;
3928a8a2 3914 iter->buffer_iter[cpu] =
12883efb 3915 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3916 ring_buffer_read_prepare_sync();
3917 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3918 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3919 }
3920
bc0c38d1
SR
3921 mutex_unlock(&trace_types_lock);
3922
bc0c38d1 3923 return iter;
3928a8a2 3924
d7350c3f 3925 fail:
3928a8a2 3926 mutex_unlock(&trace_types_lock);
d7350c3f 3927 kfree(iter->trace);
6d158a81 3928 kfree(iter->buffer_iter);
93574fcc 3929release:
50e18b94
JO
3930 seq_release_private(inode, file);
3931 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3932}
3933
3934int tracing_open_generic(struct inode *inode, struct file *filp)
3935{
60a11774
SR
3936 if (tracing_disabled)
3937 return -ENODEV;
3938
bc0c38d1
SR
3939 filp->private_data = inode->i_private;
3940 return 0;
3941}
3942
2e86421d
GB
3943bool tracing_is_disabled(void)
3944{
3945 return (tracing_disabled) ? true: false;
3946}
3947
7b85af63
SRRH
3948/*
3949 * Open and update trace_array ref count.
3950 * Must have the current trace_array passed to it.
3951 */
dcc30223 3952static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3953{
3954 struct trace_array *tr = inode->i_private;
3955
3956 if (tracing_disabled)
3957 return -ENODEV;
3958
3959 if (trace_array_get(tr) < 0)
3960 return -ENODEV;
3961
3962 filp->private_data = inode->i_private;
3963
3964 return 0;
7b85af63
SRRH
3965}
3966
4fd27358 3967static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3968{
6484c71c 3969 struct trace_array *tr = inode->i_private;
907f2784 3970 struct seq_file *m = file->private_data;
4acd4d00 3971 struct trace_iterator *iter;
3928a8a2 3972 int cpu;
bc0c38d1 3973
ff451961 3974 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3975 trace_array_put(tr);
4acd4d00 3976 return 0;
ff451961 3977 }
4acd4d00 3978
6484c71c 3979 /* Writes do not use seq_file */
4acd4d00 3980 iter = m->private;
bc0c38d1 3981 mutex_lock(&trace_types_lock);
a695cb58 3982
3928a8a2
SR
3983 for_each_tracing_cpu(cpu) {
3984 if (iter->buffer_iter[cpu])
3985 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3986 }
3987
bc0c38d1
SR
3988 if (iter->trace && iter->trace->close)
3989 iter->trace->close(iter);
3990
debdd57f
HT
3991 if (!iter->snapshot)
3992 /* reenable tracing if it was previously enabled */
2b6080f2 3993 tracing_start_tr(tr);
f77d09a3
AL
3994
3995 __trace_array_put(tr);
3996
bc0c38d1
SR
3997 mutex_unlock(&trace_types_lock);
3998
d7350c3f 3999 mutex_destroy(&iter->mutex);
b0dfa978 4000 free_cpumask_var(iter->started);
d7350c3f 4001 kfree(iter->trace);
6d158a81 4002 kfree(iter->buffer_iter);
50e18b94 4003 seq_release_private(inode, file);
ff451961 4004
bc0c38d1
SR
4005 return 0;
4006}
4007
7b85af63
SRRH
4008static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4009{
4010 struct trace_array *tr = inode->i_private;
4011
4012 trace_array_put(tr);
bc0c38d1
SR
4013 return 0;
4014}
4015
7b85af63
SRRH
4016static int tracing_single_release_tr(struct inode *inode, struct file *file)
4017{
4018 struct trace_array *tr = inode->i_private;
4019
4020 trace_array_put(tr);
4021
4022 return single_release(inode, file);
4023}
4024
bc0c38d1
SR
4025static int tracing_open(struct inode *inode, struct file *file)
4026{
6484c71c 4027 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
4028 struct trace_iterator *iter;
4029 int ret = 0;
bc0c38d1 4030
ff451961
SRRH
4031 if (trace_array_get(tr) < 0)
4032 return -ENODEV;
4033
4acd4d00 4034 /* If this file was open for write, then erase contents */
6484c71c
ON
4035 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4036 int cpu = tracing_get_cpu(inode);
8dd33bcb
BY
4037 struct trace_buffer *trace_buf = &tr->trace_buffer;
4038
4039#ifdef CONFIG_TRACER_MAX_TRACE
4040 if (tr->current_trace->print_max)
4041 trace_buf = &tr->max_buffer;
4042#endif
6484c71c
ON
4043
4044 if (cpu == RING_BUFFER_ALL_CPUS)
8dd33bcb 4045 tracing_reset_online_cpus(trace_buf);
4acd4d00 4046 else
8dd33bcb 4047 tracing_reset(trace_buf, cpu);
4acd4d00 4048 }
bc0c38d1 4049
4acd4d00 4050 if (file->f_mode & FMODE_READ) {
6484c71c 4051 iter = __tracing_open(inode, file, false);
4acd4d00
SR
4052 if (IS_ERR(iter))
4053 ret = PTR_ERR(iter);
983f938a 4054 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
4055 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4056 }
ff451961
SRRH
4057
4058 if (ret < 0)
4059 trace_array_put(tr);
4060
bc0c38d1
SR
4061 return ret;
4062}
4063
607e2ea1
SRRH
4064/*
4065 * Some tracers are not suitable for instance buffers.
4066 * A tracer is always available for the global array (toplevel)
4067 * or if it explicitly states that it is.
4068 */
4069static bool
4070trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4071{
4072 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4073}
4074
4075/* Find the next tracer that this trace array may use */
4076static struct tracer *
4077get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4078{
4079 while (t && !trace_ok_for_array(t, tr))
4080 t = t->next;
4081
4082 return t;
4083}
4084
e309b41d 4085static void *
bc0c38d1
SR
4086t_next(struct seq_file *m, void *v, loff_t *pos)
4087{
607e2ea1 4088 struct trace_array *tr = m->private;
f129e965 4089 struct tracer *t = v;
bc0c38d1
SR
4090
4091 (*pos)++;
4092
4093 if (t)
607e2ea1 4094 t = get_tracer_for_array(tr, t->next);
bc0c38d1 4095
bc0c38d1
SR
4096 return t;
4097}
4098
4099static void *t_start(struct seq_file *m, loff_t *pos)
4100{
607e2ea1 4101 struct trace_array *tr = m->private;
f129e965 4102 struct tracer *t;
bc0c38d1
SR
4103 loff_t l = 0;
4104
4105 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
4106
4107 t = get_tracer_for_array(tr, trace_types);
4108 for (; t && l < *pos; t = t_next(m, t, &l))
4109 ;
bc0c38d1
SR
4110
4111 return t;
4112}
4113
4114static void t_stop(struct seq_file *m, void *p)
4115{
4116 mutex_unlock(&trace_types_lock);
4117}
4118
4119static int t_show(struct seq_file *m, void *v)
4120{
4121 struct tracer *t = v;
4122
4123 if (!t)
4124 return 0;
4125
fa6f0cc7 4126 seq_puts(m, t->name);
bc0c38d1
SR
4127 if (t->next)
4128 seq_putc(m, ' ');
4129 else
4130 seq_putc(m, '\n');
4131
4132 return 0;
4133}
4134
88e9d34c 4135static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
4136 .start = t_start,
4137 .next = t_next,
4138 .stop = t_stop,
4139 .show = t_show,
bc0c38d1
SR
4140};
4141
4142static int show_traces_open(struct inode *inode, struct file *file)
4143{
607e2ea1
SRRH
4144 struct trace_array *tr = inode->i_private;
4145 struct seq_file *m;
4146 int ret;
4147
60a11774
SR
4148 if (tracing_disabled)
4149 return -ENODEV;
4150
607e2ea1
SRRH
4151 ret = seq_open(file, &show_traces_seq_ops);
4152 if (ret)
4153 return ret;
4154
4155 m = file->private_data;
4156 m->private = tr;
4157
4158 return 0;
bc0c38d1
SR
4159}
4160
4acd4d00
SR
4161static ssize_t
4162tracing_write_stub(struct file *filp, const char __user *ubuf,
4163 size_t count, loff_t *ppos)
4164{
4165 return count;
4166}
4167
098c879e 4168loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4169{
098c879e
SRRH
4170 int ret;
4171
364829b1 4172 if (file->f_mode & FMODE_READ)
098c879e 4173 ret = seq_lseek(file, offset, whence);
364829b1 4174 else
098c879e
SRRH
4175 file->f_pos = ret = 0;
4176
4177 return ret;
364829b1
SP
4178}
4179
5e2336a0 4180static const struct file_operations tracing_fops = {
4bf39a94
IM
4181 .open = tracing_open,
4182 .read = seq_read,
4acd4d00 4183 .write = tracing_write_stub,
098c879e 4184 .llseek = tracing_lseek,
4bf39a94 4185 .release = tracing_release,
bc0c38d1
SR
4186};
4187
5e2336a0 4188static const struct file_operations show_traces_fops = {
c7078de1
IM
4189 .open = show_traces_open,
4190 .read = seq_read,
4191 .release = seq_release,
b444786f 4192 .llseek = seq_lseek,
c7078de1
IM
4193};
4194
4195static ssize_t
4196tracing_cpumask_read(struct file *filp, char __user *ubuf,
4197 size_t count, loff_t *ppos)
4198{
ccfe9e42 4199 struct trace_array *tr = file_inode(filp)->i_private;
90e406f9 4200 char *mask_str;
36dfe925 4201 int len;
c7078de1 4202
90e406f9
CD
4203 len = snprintf(NULL, 0, "%*pb\n",
4204 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4205 mask_str = kmalloc(len, GFP_KERNEL);
4206 if (!mask_str)
4207 return -ENOMEM;
36dfe925 4208
90e406f9 4209 len = snprintf(mask_str, len, "%*pb\n",
1a40243b
TH
4210 cpumask_pr_args(tr->tracing_cpumask));
4211 if (len >= count) {
36dfe925
IM
4212 count = -EINVAL;
4213 goto out_err;
4214 }
90e406f9 4215 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
36dfe925
IM
4216
4217out_err:
90e406f9 4218 kfree(mask_str);
c7078de1
IM
4219
4220 return count;
4221}
4222
4223static ssize_t
4224tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4225 size_t count, loff_t *ppos)
4226{
ccfe9e42 4227 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4228 cpumask_var_t tracing_cpumask_new;
2b6080f2 4229 int err, cpu;
9e01c1b7
RR
4230
4231 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4232 return -ENOMEM;
c7078de1 4233
9e01c1b7 4234 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4235 if (err)
36dfe925
IM
4236 goto err_unlock;
4237
a5e25883 4238 local_irq_disable();
0b9b12c1 4239 arch_spin_lock(&tr->max_lock);
ab46428c 4240 for_each_tracing_cpu(cpu) {
36dfe925
IM
4241 /*
4242 * Increase/decrease the disabled counter if we are
4243 * about to flip a bit in the cpumask:
4244 */
ccfe9e42 4245 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4246 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4247 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4248 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4249 }
ccfe9e42 4250 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4251 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4252 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4253 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4254 }
4255 }
0b9b12c1 4256 arch_spin_unlock(&tr->max_lock);
a5e25883 4257 local_irq_enable();
36dfe925 4258
ccfe9e42 4259 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
9e01c1b7 4260 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4261
4262 return count;
36dfe925
IM
4263
4264err_unlock:
215368e8 4265 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4266
4267 return err;
c7078de1
IM
4268}
4269
5e2336a0 4270static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4271 .open = tracing_open_generic_tr,
c7078de1
IM
4272 .read = tracing_cpumask_read,
4273 .write = tracing_cpumask_write,
ccfe9e42 4274 .release = tracing_release_generic_tr,
b444786f 4275 .llseek = generic_file_llseek,
bc0c38d1
SR
4276};
4277
fdb372ed 4278static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4279{
d8e83d26 4280 struct tracer_opt *trace_opts;
2b6080f2 4281 struct trace_array *tr = m->private;
d8e83d26 4282 u32 tracer_flags;
d8e83d26 4283 int i;
adf9f195 4284
d8e83d26 4285 mutex_lock(&trace_types_lock);
2b6080f2
SR
4286 tracer_flags = tr->current_trace->flags->val;
4287 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4288
bc0c38d1 4289 for (i = 0; trace_options[i]; i++) {
983f938a 4290 if (tr->trace_flags & (1 << i))
fdb372ed 4291 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4292 else
fdb372ed 4293 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4294 }
4295
adf9f195
FW
4296 for (i = 0; trace_opts[i].name; i++) {
4297 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4298 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4299 else
fdb372ed 4300 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4301 }
d8e83d26 4302 mutex_unlock(&trace_types_lock);
adf9f195 4303
fdb372ed 4304 return 0;
bc0c38d1 4305}
bc0c38d1 4306
8c1a49ae 4307static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4308 struct tracer_flags *tracer_flags,
4309 struct tracer_opt *opts, int neg)
4310{
d39cdd20 4311 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4312 int ret;
bc0c38d1 4313
8c1a49ae 4314 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4315 if (ret)
4316 return ret;
4317
4318 if (neg)
4319 tracer_flags->val &= ~opts->bit;
4320 else
4321 tracer_flags->val |= opts->bit;
4322 return 0;
bc0c38d1
SR
4323}
4324
adf9f195 4325/* Try to assign a tracer specific option */
8c1a49ae 4326static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4327{
8c1a49ae 4328 struct tracer *trace = tr->current_trace;
7770841e 4329 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4330 struct tracer_opt *opts = NULL;
8d18eaaf 4331 int i;
adf9f195 4332
7770841e
Z
4333 for (i = 0; tracer_flags->opts[i].name; i++) {
4334 opts = &tracer_flags->opts[i];
adf9f195 4335
8d18eaaf 4336 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4337 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4338 }
adf9f195 4339
8d18eaaf 4340 return -EINVAL;
adf9f195
FW
4341}
4342
613f04a0
SRRH
4343/* Some tracers require overwrite to stay enabled */
4344int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4345{
4346 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4347 return -1;
4348
4349 return 0;
4350}
4351
2b6080f2 4352int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4353{
4354 /* do nothing if flag is already set */
983f938a 4355 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4356 return 0;
4357
4358 /* Give the tracer a chance to approve the change */
2b6080f2 4359 if (tr->current_trace->flag_changed)
bf6065b5 4360 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4361 return -EINVAL;
af4617bd
SR
4362
4363 if (enabled)
983f938a 4364 tr->trace_flags |= mask;
af4617bd 4365 else
983f938a 4366 tr->trace_flags &= ~mask;
e870e9a1
LZ
4367
4368 if (mask == TRACE_ITER_RECORD_CMD)
4369 trace_event_enable_cmd_record(enabled);
750912fa 4370
d914ba37
JF
4371 if (mask == TRACE_ITER_RECORD_TGID) {
4372 if (!tgid_map)
6396bb22
KC
4373 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4374 sizeof(*tgid_map),
d914ba37
JF
4375 GFP_KERNEL);
4376 if (!tgid_map) {
4377 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4378 return -ENOMEM;
4379 }
4380
4381 trace_event_enable_tgid_record(enabled);
4382 }
4383
c37775d5
SR
4384 if (mask == TRACE_ITER_EVENT_FORK)
4385 trace_event_follow_fork(tr, enabled);
4386
1e10486f
NK
4387 if (mask == TRACE_ITER_FUNC_FORK)
4388 ftrace_pid_follow_fork(tr, enabled);
4389
80902822 4390 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4391 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4392#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4393 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4394#endif
4395 }
81698831 4396
b9f9108c 4397 if (mask == TRACE_ITER_PRINTK) {
81698831 4398 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4399 trace_printk_control(enabled);
4400 }
613f04a0
SRRH
4401
4402 return 0;
af4617bd
SR
4403}
4404
2b6080f2 4405static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4406{
8d18eaaf 4407 char *cmp;
bc0c38d1 4408 int neg = 0;
591a033d 4409 int ret;
a4d1e688 4410 size_t orig_len = strlen(option);
3d739c1f 4411 int len;
bc0c38d1 4412
7bcfaf54 4413 cmp = strstrip(option);
bc0c38d1 4414
3d739c1f
SRV
4415 len = str_has_prefix(cmp, "no");
4416 if (len)
bc0c38d1 4417 neg = 1;
3d739c1f
SRV
4418
4419 cmp += len;
bc0c38d1 4420
69d34da2
SRRH
4421 mutex_lock(&trace_types_lock);
4422
591a033d 4423 ret = match_string(trace_options, -1, cmp);
adf9f195 4424 /* If no option could be set, test the specific tracer options */
591a033d 4425 if (ret < 0)
8c1a49ae 4426 ret = set_tracer_option(tr, cmp, neg);
591a033d
YX
4427 else
4428 ret = set_tracer_flag(tr, 1 << ret, !neg);
69d34da2
SRRH
4429
4430 mutex_unlock(&trace_types_lock);
bc0c38d1 4431
a4d1e688
JW
4432 /*
4433 * If the first trailing whitespace is replaced with '\0' by strstrip,
4434 * turn it back into a space.
4435 */
4436 if (orig_len > strlen(option))
4437 option[strlen(option)] = ' ';
4438
7bcfaf54
SR
4439 return ret;
4440}
4441
a4d1e688
JW
4442static void __init apply_trace_boot_options(void)
4443{
4444 char *buf = trace_boot_options_buf;
4445 char *option;
4446
4447 while (true) {
4448 option = strsep(&buf, ",");
4449
4450 if (!option)
4451 break;
a4d1e688 4452
43ed3843
SRRH
4453 if (*option)
4454 trace_set_options(&global_trace, option);
a4d1e688
JW
4455
4456 /* Put back the comma to allow this to be called again */
4457 if (buf)
4458 *(buf - 1) = ',';
4459 }
4460}
4461
7bcfaf54
SR
4462static ssize_t
4463tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4464 size_t cnt, loff_t *ppos)
4465{
2b6080f2
SR
4466 struct seq_file *m = filp->private_data;
4467 struct trace_array *tr = m->private;
7bcfaf54 4468 char buf[64];
613f04a0 4469 int ret;
7bcfaf54
SR
4470
4471 if (cnt >= sizeof(buf))
4472 return -EINVAL;
4473
4afe6495 4474 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4475 return -EFAULT;
4476
a8dd2176
SR
4477 buf[cnt] = 0;
4478
2b6080f2 4479 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4480 if (ret < 0)
4481 return ret;
7bcfaf54 4482
cf8517cf 4483 *ppos += cnt;
bc0c38d1
SR
4484
4485 return cnt;
4486}
4487
fdb372ed
LZ
4488static int tracing_trace_options_open(struct inode *inode, struct file *file)
4489{
7b85af63 4490 struct trace_array *tr = inode->i_private;
f77d09a3 4491 int ret;
7b85af63 4492
fdb372ed
LZ
4493 if (tracing_disabled)
4494 return -ENODEV;
2b6080f2 4495
7b85af63
SRRH
4496 if (trace_array_get(tr) < 0)
4497 return -ENODEV;
4498
f77d09a3
AL
4499 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4500 if (ret < 0)
4501 trace_array_put(tr);
4502
4503 return ret;
fdb372ed
LZ
4504}
4505
5e2336a0 4506static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4507 .open = tracing_trace_options_open,
4508 .read = seq_read,
4509 .llseek = seq_lseek,
7b85af63 4510 .release = tracing_single_release_tr,
ee6bce52 4511 .write = tracing_trace_options_write,
bc0c38d1
SR
4512};
4513
7bd2f24c
IM
4514static const char readme_msg[] =
4515 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4516 "# echo 0 > tracing_on : quick way to disable tracing\n"
4517 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4518 " Important files:\n"
4519 " trace\t\t\t- The static contents of the buffer\n"
4520 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4521 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4522 " current_tracer\t- function and latency tracers\n"
4523 " available_tracers\t- list of configured tracers for current_tracer\n"
4524 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4525 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4526 " trace_clock\t\t-change the clock used to order events\n"
4527 " local: Per cpu clock but may not be synced across CPUs\n"
4528 " global: Synced across CPUs but slows tracing down.\n"
4529 " counter: Not a clock, but just an increment\n"
4530 " uptime: Jiffy counter from time of boot\n"
4531 " perf: Same clock that perf events use\n"
4532#ifdef CONFIG_X86_64
4533 " x86-tsc: TSC cycle counter\n"
4534#endif
2c1ea60b
TZ
4535 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4536 " delta: Delta difference against a buffer-wide timestamp\n"
4537 " absolute: Absolute (standalone) timestamp\n"
22f45649 4538 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4539 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4540 " tracing_cpumask\t- Limit which CPUs to trace\n"
4541 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4542 "\t\t\t Remove sub-buffer with rmdir\n"
4543 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4544 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4545 "\t\t\t option name\n"
939c7a4f 4546 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4547#ifdef CONFIG_DYNAMIC_FTRACE
4548 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4549 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4550 "\t\t\t functions\n"
60f1d5e3 4551 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4552 "\t modules: Can select a group via module\n"
4553 "\t Format: :mod:<module-name>\n"
4554 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4555 "\t triggers: a command to perform when function is hit\n"
4556 "\t Format: <function>:<trigger>[:count]\n"
4557 "\t trigger: traceon, traceoff\n"
4558 "\t\t enable_event:<system>:<event>\n"
4559 "\t\t disable_event:<system>:<event>\n"
22f45649 4560#ifdef CONFIG_STACKTRACE
71485c45 4561 "\t\t stacktrace\n"
22f45649
SRRH
4562#endif
4563#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4564 "\t\t snapshot\n"
22f45649 4565#endif
17a280ea
SRRH
4566 "\t\t dump\n"
4567 "\t\t cpudump\n"
71485c45
SRRH
4568 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4569 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4570 "\t The first one will disable tracing every time do_fault is hit\n"
4571 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4572 "\t The first time do trap is hit and it disables tracing, the\n"
4573 "\t counter will decrement to 2. If tracing is already disabled,\n"
4574 "\t the counter will not decrement. It only decrements when the\n"
4575 "\t trigger did work\n"
4576 "\t To remove trigger without count:\n"
4577 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4578 "\t To remove trigger with a count:\n"
4579 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4580 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4581 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4582 "\t modules: Can select a group via module command :mod:\n"
4583 "\t Does not accept triggers\n"
22f45649
SRRH
4584#endif /* CONFIG_DYNAMIC_FTRACE */
4585#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4586 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4587 "\t\t (function)\n"
22f45649
SRRH
4588#endif
4589#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4590 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4591 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4592 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4593#endif
4594#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4595 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4596 "\t\t\t snapshot buffer. Read the contents for more\n"
4597 "\t\t\t information\n"
22f45649 4598#endif
991821c8 4599#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4600 " stack_trace\t\t- Shows the max stack trace when active\n"
4601 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4602 "\t\t\t Write into this file to reset the max size (trigger a\n"
4603 "\t\t\t new trace)\n"
22f45649 4604#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4605 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4606 "\t\t\t traces\n"
22f45649 4607#endif
991821c8 4608#endif /* CONFIG_STACK_TRACER */
5448d44c
MH
4609#ifdef CONFIG_DYNAMIC_EVENTS
4610 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4611 "\t\t\t Write into this file to define/undefine new trace events.\n"
4612#endif
6b0b7551 4613#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4614 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4615 "\t\t\t Write into this file to define/undefine new trace events.\n"
4616#endif
6b0b7551 4617#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4618 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4619 "\t\t\t Write into this file to define/undefine new trace events.\n"
4620#endif
6b0b7551 4621#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625 4622 "\t accepts: event-definitions (one definition per line)\n"
c3ca46ef
MH
4623 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4624 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
7bbab38d
MH
4625#ifdef CONFIG_HIST_TRIGGERS
4626 "\t s:[synthetic/]<event> <field> [<field>]\n"
4627#endif
86425625 4628 "\t -:[<group>/]<event>\n"
6b0b7551 4629#ifdef CONFIG_KPROBE_EVENTS
86425625 4630 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4631 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4632#endif
6b0b7551 4633#ifdef CONFIG_UPROBE_EVENTS
1cc33161 4634 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
86425625
MH
4635#endif
4636 "\t args: <name>=fetcharg[:type]\n"
4637 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
a1303af5
MH
4638#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4639 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4640#else
86425625 4641 "\t $stack<index>, $stack, $retval, $comm\n"
a1303af5 4642#endif
60c2e0ce 4643 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
40b53b77
MH
4644 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4645 "\t <type>\\[<array-size>\\]\n"
7bbab38d
MH
4646#ifdef CONFIG_HIST_TRIGGERS
4647 "\t field: <stype> <name>;\n"
4648 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4649 "\t [unsigned] char/int/long\n"
4650#endif
86425625 4651#endif
26f25564
TZ
4652 " events/\t\t- Directory containing all trace event subsystems:\n"
4653 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4654 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4655 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4656 "\t\t\t events\n"
26f25564 4657 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4658 " events/<system>/<event>/\t- Directory containing control files for\n"
4659 "\t\t\t <event>:\n"
26f25564
TZ
4660 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4661 " filter\t\t- If set, only events passing filter are traced\n"
4662 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4663 "\t Format: <trigger>[:count][if <filter>]\n"
4664 "\t trigger: traceon, traceoff\n"
4665 "\t enable_event:<system>:<event>\n"
4666 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4667#ifdef CONFIG_HIST_TRIGGERS
4668 "\t enable_hist:<system>:<event>\n"
4669 "\t disable_hist:<system>:<event>\n"
4670#endif
26f25564 4671#ifdef CONFIG_STACKTRACE
71485c45 4672 "\t\t stacktrace\n"
26f25564
TZ
4673#endif
4674#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4675 "\t\t snapshot\n"
7ef224d1
TZ
4676#endif
4677#ifdef CONFIG_HIST_TRIGGERS
4678 "\t\t hist (see below)\n"
26f25564 4679#endif
71485c45
SRRH
4680 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4681 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4682 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4683 "\t events/block/block_unplug/trigger\n"
4684 "\t The first disables tracing every time block_unplug is hit.\n"
4685 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4686 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4687 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4688 "\t Like function triggers, the counter is only decremented if it\n"
4689 "\t enabled or disabled tracing.\n"
4690 "\t To remove a trigger without a count:\n"
4691 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4692 "\t To remove a trigger with a count:\n"
4693 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4694 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4695#ifdef CONFIG_HIST_TRIGGERS
4696 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4697 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4698 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4699 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4700 "\t [:size=#entries]\n"
e86ae9ba 4701 "\t [:pause][:continue][:clear]\n"
5463bfda 4702 "\t [:name=histname1]\n"
7ef224d1
TZ
4703 "\t [if <filter>]\n\n"
4704 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4705 "\t table using the key(s) and value(s) named, and the value of a\n"
4706 "\t sum called 'hitcount' is incremented. Keys and values\n"
4707 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4708 "\t can be any field, or the special string 'stacktrace'.\n"
4709 "\t Compound keys consisting of up to two fields can be specified\n"
4710 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4711 "\t fields. Sort keys consisting of up to two fields can be\n"
4712 "\t specified using the 'sort' keyword. The sort direction can\n"
4713 "\t be modified by appending '.descending' or '.ascending' to a\n"
4714 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4715 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4716 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4717 "\t its histogram data will be shared with other triggers of the\n"
4718 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4719 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4720 "\t table in its entirety to stdout. If there are multiple hist\n"
4721 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4722 "\t trigger in the output. The table displayed for a named\n"
4723 "\t trigger will be the same as any other instance having the\n"
4724 "\t same name. The default format used to display a given field\n"
4725 "\t can be modified by appending any of the following modifiers\n"
4726 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4727 "\t .hex display a number as a hex value\n"
4728 "\t .sym display an address as a symbol\n"
6b4827ad 4729 "\t .sym-offset display an address as a symbol and offset\n"
31696198 4730 "\t .execname display a common_pid as a program name\n"
860f9f6b
TZ
4731 "\t .syscall display a syscall id as a syscall name\n"
4732 "\t .log2 display log2 value rather than raw number\n"
4733 "\t .usecs display a common_timestamp in microseconds\n\n"
83e99914
TZ
4734 "\t The 'pause' parameter can be used to pause an existing hist\n"
4735 "\t trigger or to start a hist trigger but not log any events\n"
4736 "\t until told to do so. 'continue' can be used to start or\n"
4737 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4738 "\t The 'clear' parameter will clear the contents of a running\n"
4739 "\t hist trigger and leave its current paused/active state\n"
4740 "\t unchanged.\n\n"
d0bad49b
TZ
4741 "\t The enable_hist and disable_hist triggers can be used to\n"
4742 "\t have one event conditionally start and stop another event's\n"
4743 "\t already-attached hist trigger. The syntax is analagous to\n"
4744 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4745#endif
7bd2f24c
IM
4746;
4747
4748static ssize_t
4749tracing_readme_read(struct file *filp, char __user *ubuf,
4750 size_t cnt, loff_t *ppos)
4751{
4752 return simple_read_from_buffer(ubuf, cnt, ppos,
4753 readme_msg, strlen(readme_msg));
4754}
4755
5e2336a0 4756static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4757 .open = tracing_open_generic,
4758 .read = tracing_readme_read,
b444786f 4759 .llseek = generic_file_llseek,
7bd2f24c
IM
4760};
4761
99c621d7
MS
4762static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4763{
4764 int *ptr = v;
4765
4766 if (*pos || m->count)
4767 ptr++;
4768
4769 (*pos)++;
4770
4771 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4772 if (trace_find_tgid(*ptr))
4773 return ptr;
4774 }
4775
4776 return NULL;
4777}
4778
4779static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4780{
4781 void *v;
4782 loff_t l = 0;
4783
4784 if (!tgid_map)
4785 return NULL;
4786
4787 v = &tgid_map[0];
4788 while (l <= *pos) {
4789 v = saved_tgids_next(m, v, &l);
4790 if (!v)
4791 return NULL;
4792 }
4793
4794 return v;
4795}
4796
4797static void saved_tgids_stop(struct seq_file *m, void *v)
4798{
4799}
4800
4801static int saved_tgids_show(struct seq_file *m, void *v)
4802{
4803 int pid = (int *)v - tgid_map;
4804
4805 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4806 return 0;
4807}
4808
4809static const struct seq_operations tracing_saved_tgids_seq_ops = {
4810 .start = saved_tgids_start,
4811 .stop = saved_tgids_stop,
4812 .next = saved_tgids_next,
4813 .show = saved_tgids_show,
4814};
4815
4816static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4817{
4818 if (tracing_disabled)
4819 return -ENODEV;
4820
4821 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4822}
4823
4824
4825static const struct file_operations tracing_saved_tgids_fops = {
4826 .open = tracing_saved_tgids_open,
4827 .read = seq_read,
4828 .llseek = seq_lseek,
4829 .release = seq_release,
4830};
4831
42584c81
YY
4832static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4833{
4834 unsigned int *ptr = v;
69abe6a5 4835
42584c81
YY
4836 if (*pos || m->count)
4837 ptr++;
69abe6a5 4838
42584c81 4839 (*pos)++;
69abe6a5 4840
939c7a4f
YY
4841 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4842 ptr++) {
42584c81
YY
4843 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4844 continue;
69abe6a5 4845
42584c81
YY
4846 return ptr;
4847 }
69abe6a5 4848
42584c81
YY
4849 return NULL;
4850}
4851
4852static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4853{
4854 void *v;
4855 loff_t l = 0;
69abe6a5 4856
4c27e756
SRRH
4857 preempt_disable();
4858 arch_spin_lock(&trace_cmdline_lock);
4859
939c7a4f 4860 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4861 while (l <= *pos) {
4862 v = saved_cmdlines_next(m, v, &l);
4863 if (!v)
4864 return NULL;
69abe6a5
AP
4865 }
4866
42584c81
YY
4867 return v;
4868}
4869
4870static void saved_cmdlines_stop(struct seq_file *m, void *v)
4871{
4c27e756
SRRH
4872 arch_spin_unlock(&trace_cmdline_lock);
4873 preempt_enable();
42584c81 4874}
69abe6a5 4875
42584c81
YY
4876static int saved_cmdlines_show(struct seq_file *m, void *v)
4877{
4878 char buf[TASK_COMM_LEN];
4879 unsigned int *pid = v;
69abe6a5 4880
4c27e756 4881 __trace_find_cmdline(*pid, buf);
42584c81
YY
4882 seq_printf(m, "%d %s\n", *pid, buf);
4883 return 0;
4884}
4885
4886static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4887 .start = saved_cmdlines_start,
4888 .next = saved_cmdlines_next,
4889 .stop = saved_cmdlines_stop,
4890 .show = saved_cmdlines_show,
4891};
4892
4893static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4894{
4895 if (tracing_disabled)
4896 return -ENODEV;
4897
4898 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4899}
4900
4901static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4902 .open = tracing_saved_cmdlines_open,
4903 .read = seq_read,
4904 .llseek = seq_lseek,
4905 .release = seq_release,
69abe6a5
AP
4906};
4907
939c7a4f
YY
4908static ssize_t
4909tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4910 size_t cnt, loff_t *ppos)
4911{
4912 char buf[64];
4913 int r;
4914
4915 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4916 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4917 arch_spin_unlock(&trace_cmdline_lock);
4918
4919 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4920}
4921
4922static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4923{
4924 kfree(s->saved_cmdlines);
4925 kfree(s->map_cmdline_to_pid);
4926 kfree(s);
4927}
4928
4929static int tracing_resize_saved_cmdlines(unsigned int val)
4930{
4931 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4932
a6af8fbf 4933 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4934 if (!s)
4935 return -ENOMEM;
4936
4937 if (allocate_cmdlines_buffer(val, s) < 0) {
4938 kfree(s);
4939 return -ENOMEM;
4940 }
4941
4942 arch_spin_lock(&trace_cmdline_lock);
4943 savedcmd_temp = savedcmd;
4944 savedcmd = s;
4945 arch_spin_unlock(&trace_cmdline_lock);
4946 free_saved_cmdlines_buffer(savedcmd_temp);
4947
4948 return 0;
4949}
4950
4951static ssize_t
4952tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4953 size_t cnt, loff_t *ppos)
4954{
4955 unsigned long val;
4956 int ret;
4957
4958 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4959 if (ret)
4960 return ret;
4961
4962 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4963 if (!val || val > PID_MAX_DEFAULT)
4964 return -EINVAL;
4965
4966 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4967 if (ret < 0)
4968 return ret;
4969
4970 *ppos += cnt;
4971
4972 return cnt;
4973}
4974
4975static const struct file_operations tracing_saved_cmdlines_size_fops = {
4976 .open = tracing_open_generic,
4977 .read = tracing_saved_cmdlines_size_read,
4978 .write = tracing_saved_cmdlines_size_write,
4979};
4980
681bec03 4981#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 4982static union trace_eval_map_item *
f57a4143 4983update_eval_map(union trace_eval_map_item *ptr)
9828413d 4984{
00f4b652 4985 if (!ptr->map.eval_string) {
9828413d
SRRH
4986 if (ptr->tail.next) {
4987 ptr = ptr->tail.next;
4988 /* Set ptr to the next real item (skip head) */
4989 ptr++;
4990 } else
4991 return NULL;
4992 }
4993 return ptr;
4994}
4995
f57a4143 4996static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 4997{
23bf8cb8 4998 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4999
5000 /*
5001 * Paranoid! If ptr points to end, we don't want to increment past it.
5002 * This really should never happen.
5003 */
f57a4143 5004 ptr = update_eval_map(ptr);
9828413d
SRRH
5005 if (WARN_ON_ONCE(!ptr))
5006 return NULL;
5007
5008 ptr++;
5009
5010 (*pos)++;
5011
f57a4143 5012 ptr = update_eval_map(ptr);
9828413d
SRRH
5013
5014 return ptr;
5015}
5016
f57a4143 5017static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 5018{
23bf8cb8 5019 union trace_eval_map_item *v;
9828413d
SRRH
5020 loff_t l = 0;
5021
1793ed93 5022 mutex_lock(&trace_eval_mutex);
9828413d 5023
23bf8cb8 5024 v = trace_eval_maps;
9828413d
SRRH
5025 if (v)
5026 v++;
5027
5028 while (v && l < *pos) {
f57a4143 5029 v = eval_map_next(m, v, &l);
9828413d
SRRH
5030 }
5031
5032 return v;
5033}
5034
f57a4143 5035static void eval_map_stop(struct seq_file *m, void *v)
9828413d 5036{
1793ed93 5037 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5038}
5039
f57a4143 5040static int eval_map_show(struct seq_file *m, void *v)
9828413d 5041{
23bf8cb8 5042 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5043
5044 seq_printf(m, "%s %ld (%s)\n",
00f4b652 5045 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
5046 ptr->map.system);
5047
5048 return 0;
5049}
5050
f57a4143
JL
5051static const struct seq_operations tracing_eval_map_seq_ops = {
5052 .start = eval_map_start,
5053 .next = eval_map_next,
5054 .stop = eval_map_stop,
5055 .show = eval_map_show,
9828413d
SRRH
5056};
5057
f57a4143 5058static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
5059{
5060 if (tracing_disabled)
5061 return -ENODEV;
5062
f57a4143 5063 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
5064}
5065
f57a4143
JL
5066static const struct file_operations tracing_eval_map_fops = {
5067 .open = tracing_eval_map_open,
9828413d
SRRH
5068 .read = seq_read,
5069 .llseek = seq_lseek,
5070 .release = seq_release,
5071};
5072
23bf8cb8 5073static inline union trace_eval_map_item *
5f60b351 5074trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
5075{
5076 /* Return tail of array given the head */
5077 return ptr + ptr->head.length + 1;
5078}
5079
5080static void
f57a4143 5081trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
5082 int len)
5083{
00f4b652
JL
5084 struct trace_eval_map **stop;
5085 struct trace_eval_map **map;
23bf8cb8
JL
5086 union trace_eval_map_item *map_array;
5087 union trace_eval_map_item *ptr;
9828413d
SRRH
5088
5089 stop = start + len;
5090
5091 /*
23bf8cb8 5092 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
5093 * where the head holds the module and length of array, and the
5094 * tail holds a pointer to the next list.
5095 */
6da2ec56 5096 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
9828413d 5097 if (!map_array) {
f57a4143 5098 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
5099 return;
5100 }
5101
1793ed93 5102 mutex_lock(&trace_eval_mutex);
9828413d 5103
23bf8cb8
JL
5104 if (!trace_eval_maps)
5105 trace_eval_maps = map_array;
9828413d 5106 else {
23bf8cb8 5107 ptr = trace_eval_maps;
9828413d 5108 for (;;) {
5f60b351 5109 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
5110 if (!ptr->tail.next)
5111 break;
5112 ptr = ptr->tail.next;
5113
5114 }
5115 ptr->tail.next = map_array;
5116 }
5117 map_array->head.mod = mod;
5118 map_array->head.length = len;
5119 map_array++;
5120
5121 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5122 map_array->map = **map;
5123 map_array++;
5124 }
5125 memset(map_array, 0, sizeof(*map_array));
5126
1793ed93 5127 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5128}
5129
f57a4143 5130static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 5131{
681bec03 5132 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 5133 NULL, &tracing_eval_map_fops);
9828413d
SRRH
5134}
5135
681bec03 5136#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
5137static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5138static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 5139 struct trace_eval_map **start, int len) { }
681bec03 5140#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 5141
f57a4143 5142static void trace_insert_eval_map(struct module *mod,
00f4b652 5143 struct trace_eval_map **start, int len)
0c564a53 5144{
00f4b652 5145 struct trace_eval_map **map;
0c564a53
SRRH
5146
5147 if (len <= 0)
5148 return;
5149
5150 map = start;
5151
f57a4143 5152 trace_event_eval_update(map, len);
9828413d 5153
f57a4143 5154 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
5155}
5156
bc0c38d1
SR
5157static ssize_t
5158tracing_set_trace_read(struct file *filp, char __user *ubuf,
5159 size_t cnt, loff_t *ppos)
5160{
2b6080f2 5161 struct trace_array *tr = filp->private_data;
ee6c2c1b 5162 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
5163 int r;
5164
5165 mutex_lock(&trace_types_lock);
2b6080f2 5166 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
5167 mutex_unlock(&trace_types_lock);
5168
4bf39a94 5169 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5170}
5171
b6f11df2
ACM
5172int tracer_init(struct tracer *t, struct trace_array *tr)
5173{
12883efb 5174 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
5175 return t->init(tr);
5176}
5177
12883efb 5178static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
5179{
5180 int cpu;
737223fb 5181
438ced17 5182 for_each_tracing_cpu(cpu)
12883efb 5183 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
5184}
5185
12883efb 5186#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 5187/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
5188static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5189 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
5190{
5191 int cpu, ret = 0;
5192
5193 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5194 for_each_tracing_cpu(cpu) {
12883efb
SRRH
5195 ret = ring_buffer_resize(trace_buf->buffer,
5196 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
5197 if (ret < 0)
5198 break;
12883efb
SRRH
5199 per_cpu_ptr(trace_buf->data, cpu)->entries =
5200 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
5201 }
5202 } else {
12883efb
SRRH
5203 ret = ring_buffer_resize(trace_buf->buffer,
5204 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 5205 if (ret == 0)
12883efb
SRRH
5206 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5207 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
5208 }
5209
5210 return ret;
5211}
12883efb 5212#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 5213
2b6080f2
SR
5214static int __tracing_resize_ring_buffer(struct trace_array *tr,
5215 unsigned long size, int cpu)
73c5162a
SR
5216{
5217 int ret;
5218
5219 /*
5220 * If kernel or user changes the size of the ring buffer
a123c52b
SR
5221 * we use the size that was given, and we can forget about
5222 * expanding it later.
73c5162a 5223 */
55034cd6 5224 ring_buffer_expanded = true;
73c5162a 5225
b382ede6 5226 /* May be called before buffers are initialized */
12883efb 5227 if (!tr->trace_buffer.buffer)
b382ede6
SR
5228 return 0;
5229
12883efb 5230 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
5231 if (ret < 0)
5232 return ret;
5233
12883efb 5234#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5235 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5236 !tr->current_trace->use_max_tr)
ef710e10
KM
5237 goto out;
5238
12883efb 5239 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5240 if (ret < 0) {
12883efb
SRRH
5241 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5242 &tr->trace_buffer, cpu);
73c5162a 5243 if (r < 0) {
a123c52b
SR
5244 /*
5245 * AARGH! We are left with different
5246 * size max buffer!!!!
5247 * The max buffer is our "snapshot" buffer.
5248 * When a tracer needs a snapshot (one of the
5249 * latency tracers), it swaps the max buffer
5250 * with the saved snap shot. We succeeded to
5251 * update the size of the main buffer, but failed to
5252 * update the size of the max buffer. But when we tried
5253 * to reset the main buffer to the original size, we
5254 * failed there too. This is very unlikely to
5255 * happen, but if it does, warn and kill all
5256 * tracing.
5257 */
73c5162a
SR
5258 WARN_ON(1);
5259 tracing_disabled = 1;
5260 }
5261 return ret;
5262 }
5263
438ced17 5264 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5265 set_buffer_entries(&tr->max_buffer, size);
438ced17 5266 else
12883efb 5267 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5268
ef710e10 5269 out:
12883efb
SRRH
5270#endif /* CONFIG_TRACER_MAX_TRACE */
5271
438ced17 5272 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5273 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5274 else
12883efb 5275 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5276
5277 return ret;
5278}
5279
2b6080f2
SR
5280static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5281 unsigned long size, int cpu_id)
4f271a2a 5282{
83f40318 5283 int ret = size;
4f271a2a
VN
5284
5285 mutex_lock(&trace_types_lock);
5286
438ced17
VN
5287 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5288 /* make sure, this cpu is enabled in the mask */
5289 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5290 ret = -EINVAL;
5291 goto out;
5292 }
5293 }
4f271a2a 5294
2b6080f2 5295 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5296 if (ret < 0)
5297 ret = -ENOMEM;
5298
438ced17 5299out:
4f271a2a
VN
5300 mutex_unlock(&trace_types_lock);
5301
5302 return ret;
5303}
5304
ef710e10 5305
1852fcce
SR
5306/**
5307 * tracing_update_buffers - used by tracing facility to expand ring buffers
5308 *
5309 * To save on memory when the tracing is never used on a system with it
5310 * configured in. The ring buffers are set to a minimum size. But once
5311 * a user starts to use the tracing facility, then they need to grow
5312 * to their default size.
5313 *
5314 * This function is to be called when a tracer is about to be used.
5315 */
5316int tracing_update_buffers(void)
5317{
5318 int ret = 0;
5319
1027fcb2 5320 mutex_lock(&trace_types_lock);
1852fcce 5321 if (!ring_buffer_expanded)
2b6080f2 5322 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5323 RING_BUFFER_ALL_CPUS);
1027fcb2 5324 mutex_unlock(&trace_types_lock);
1852fcce
SR
5325
5326 return ret;
5327}
5328
577b785f
SR
5329struct trace_option_dentry;
5330
37aea98b 5331static void
2b6080f2 5332create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5333
6b450d25
SRRH
5334/*
5335 * Used to clear out the tracer before deletion of an instance.
5336 * Must have trace_types_lock held.
5337 */
5338static void tracing_set_nop(struct trace_array *tr)
5339{
5340 if (tr->current_trace == &nop_trace)
5341 return;
5342
50512ab5 5343 tr->current_trace->enabled--;
6b450d25
SRRH
5344
5345 if (tr->current_trace->reset)
5346 tr->current_trace->reset(tr);
5347
5348 tr->current_trace = &nop_trace;
5349}
5350
41d9c0be 5351static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5352{
09d23a1d
SRRH
5353 /* Only enable if the directory has been created already. */
5354 if (!tr->dir)
5355 return;
5356
37aea98b 5357 create_trace_option_files(tr, t);
09d23a1d
SRRH
5358}
5359
5360static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5361{
bc0c38d1 5362 struct tracer *t;
12883efb 5363#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5364 bool had_max_tr;
12883efb 5365#endif
d9e54076 5366 int ret = 0;
bc0c38d1 5367
1027fcb2
SR
5368 mutex_lock(&trace_types_lock);
5369
73c5162a 5370 if (!ring_buffer_expanded) {
2b6080f2 5371 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5372 RING_BUFFER_ALL_CPUS);
73c5162a 5373 if (ret < 0)
59f586db 5374 goto out;
73c5162a
SR
5375 ret = 0;
5376 }
5377
bc0c38d1
SR
5378 for (t = trace_types; t; t = t->next) {
5379 if (strcmp(t->name, buf) == 0)
5380 break;
5381 }
c2931e05
FW
5382 if (!t) {
5383 ret = -EINVAL;
5384 goto out;
5385 }
2b6080f2 5386 if (t == tr->current_trace)
bc0c38d1
SR
5387 goto out;
5388
c7b3ae0b
ZSZ
5389 /* Some tracers won't work on kernel command line */
5390 if (system_state < SYSTEM_RUNNING && t->noboot) {
5391 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5392 t->name);
5393 goto out;
5394 }
5395
607e2ea1
SRRH
5396 /* Some tracers are only allowed for the top level buffer */
5397 if (!trace_ok_for_array(t, tr)) {
5398 ret = -EINVAL;
5399 goto out;
5400 }
5401
cf6ab6d9
SRRH
5402 /* If trace pipe files are being read, we can't change the tracer */
5403 if (tr->current_trace->ref) {
5404 ret = -EBUSY;
5405 goto out;
5406 }
5407
9f029e83 5408 trace_branch_disable();
613f04a0 5409
50512ab5 5410 tr->current_trace->enabled--;
613f04a0 5411
2b6080f2
SR
5412 if (tr->current_trace->reset)
5413 tr->current_trace->reset(tr);
34600f0e 5414
74401729 5415 /* Current trace needs to be nop_trace before synchronize_rcu */
2b6080f2 5416 tr->current_trace = &nop_trace;
34600f0e 5417
45ad21ca
SRRH
5418#ifdef CONFIG_TRACER_MAX_TRACE
5419 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5420
5421 if (had_max_tr && !t->use_max_tr) {
5422 /*
5423 * We need to make sure that the update_max_tr sees that
5424 * current_trace changed to nop_trace to keep it from
5425 * swapping the buffers after we resize it.
5426 * The update_max_tr is called from interrupts disabled
5427 * so a synchronized_sched() is sufficient.
5428 */
74401729 5429 synchronize_rcu();
3209cff4 5430 free_snapshot(tr);
ef710e10 5431 }
12883efb 5432#endif
12883efb
SRRH
5433
5434#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5435 if (t->use_max_tr && !had_max_tr) {
2824f503 5436 ret = tracing_alloc_snapshot_instance(tr);
d60da506
HT
5437 if (ret < 0)
5438 goto out;
ef710e10 5439 }
12883efb 5440#endif
577b785f 5441
1c80025a 5442 if (t->init) {
b6f11df2 5443 ret = tracer_init(t, tr);
1c80025a
FW
5444 if (ret)
5445 goto out;
5446 }
bc0c38d1 5447
2b6080f2 5448 tr->current_trace = t;
50512ab5 5449 tr->current_trace->enabled++;
9f029e83 5450 trace_branch_enable(tr);
bc0c38d1
SR
5451 out:
5452 mutex_unlock(&trace_types_lock);
5453
d9e54076
PZ
5454 return ret;
5455}
5456
5457static ssize_t
5458tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5459 size_t cnt, loff_t *ppos)
5460{
607e2ea1 5461 struct trace_array *tr = filp->private_data;
ee6c2c1b 5462 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5463 int i;
5464 size_t ret;
e6e7a65a
FW
5465 int err;
5466
5467 ret = cnt;
d9e54076 5468
ee6c2c1b
LZ
5469 if (cnt > MAX_TRACER_SIZE)
5470 cnt = MAX_TRACER_SIZE;
d9e54076 5471
4afe6495 5472 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5473 return -EFAULT;
5474
5475 buf[cnt] = 0;
5476
5477 /* strip ending whitespace. */
5478 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5479 buf[i] = 0;
5480
607e2ea1 5481 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5482 if (err)
5483 return err;
d9e54076 5484
cf8517cf 5485 *ppos += ret;
bc0c38d1 5486
c2931e05 5487 return ret;
bc0c38d1
SR
5488}
5489
5490static ssize_t
6508fa76
SF
5491tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5492 size_t cnt, loff_t *ppos)
bc0c38d1 5493{
bc0c38d1
SR
5494 char buf[64];
5495 int r;
5496
cffae437 5497 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5498 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5499 if (r > sizeof(buf))
5500 r = sizeof(buf);
4bf39a94 5501 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5502}
5503
5504static ssize_t
6508fa76
SF
5505tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5506 size_t cnt, loff_t *ppos)
bc0c38d1 5507{
5e39841c 5508 unsigned long val;
c6caeeb1 5509 int ret;
bc0c38d1 5510
22fe9b54
PH
5511 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5512 if (ret)
c6caeeb1 5513 return ret;
bc0c38d1
SR
5514
5515 *ptr = val * 1000;
5516
5517 return cnt;
5518}
5519
6508fa76
SF
5520static ssize_t
5521tracing_thresh_read(struct file *filp, char __user *ubuf,
5522 size_t cnt, loff_t *ppos)
5523{
5524 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5525}
5526
5527static ssize_t
5528tracing_thresh_write(struct file *filp, const char __user *ubuf,
5529 size_t cnt, loff_t *ppos)
5530{
5531 struct trace_array *tr = filp->private_data;
5532 int ret;
5533
5534 mutex_lock(&trace_types_lock);
5535 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5536 if (ret < 0)
5537 goto out;
5538
5539 if (tr->current_trace->update_thresh) {
5540 ret = tr->current_trace->update_thresh(tr);
5541 if (ret < 0)
5542 goto out;
5543 }
5544
5545 ret = cnt;
5546out:
5547 mutex_unlock(&trace_types_lock);
5548
5549 return ret;
5550}
5551
f971cc9a 5552#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5553
6508fa76
SF
5554static ssize_t
5555tracing_max_lat_read(struct file *filp, char __user *ubuf,
5556 size_t cnt, loff_t *ppos)
5557{
5558 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5559}
5560
5561static ssize_t
5562tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5563 size_t cnt, loff_t *ppos)
5564{
5565 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5566}
5567
e428abbb
CG
5568#endif
5569
b3806b43
SR
5570static int tracing_open_pipe(struct inode *inode, struct file *filp)
5571{
15544209 5572 struct trace_array *tr = inode->i_private;
b3806b43 5573 struct trace_iterator *iter;
b04cc6b1 5574 int ret = 0;
b3806b43
SR
5575
5576 if (tracing_disabled)
5577 return -ENODEV;
5578
7b85af63
SRRH
5579 if (trace_array_get(tr) < 0)
5580 return -ENODEV;
5581
b04cc6b1
FW
5582 mutex_lock(&trace_types_lock);
5583
b3806b43
SR
5584 /* create a buffer to store the information to pass to userspace */
5585 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5586 if (!iter) {
5587 ret = -ENOMEM;
f77d09a3 5588 __trace_array_put(tr);
b04cc6b1
FW
5589 goto out;
5590 }
b3806b43 5591
3a161d99 5592 trace_seq_init(&iter->seq);
d716ff71 5593 iter->trace = tr->current_trace;
d7350c3f 5594
4462344e 5595 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5596 ret = -ENOMEM;
d7350c3f 5597 goto fail;
4462344e
RR
5598 }
5599
a309720c 5600 /* trace pipe does not show start of buffer */
4462344e 5601 cpumask_setall(iter->started);
a309720c 5602
983f938a 5603 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5604 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5605
8be0709f 5606 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5607 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5608 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5609
15544209
ON
5610 iter->tr = tr;
5611 iter->trace_buffer = &tr->trace_buffer;
5612 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5613 mutex_init(&iter->mutex);
b3806b43
SR
5614 filp->private_data = iter;
5615
107bad8b
SR
5616 if (iter->trace->pipe_open)
5617 iter->trace->pipe_open(iter);
107bad8b 5618
b444786f 5619 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5620
5621 tr->current_trace->ref++;
b04cc6b1
FW
5622out:
5623 mutex_unlock(&trace_types_lock);
5624 return ret;
d7350c3f
FW
5625
5626fail:
d7350c3f 5627 kfree(iter);
7b85af63 5628 __trace_array_put(tr);
d7350c3f
FW
5629 mutex_unlock(&trace_types_lock);
5630 return ret;
b3806b43
SR
5631}
5632
5633static int tracing_release_pipe(struct inode *inode, struct file *file)
5634{
5635 struct trace_iterator *iter = file->private_data;
15544209 5636 struct trace_array *tr = inode->i_private;
b3806b43 5637
b04cc6b1
FW
5638 mutex_lock(&trace_types_lock);
5639
cf6ab6d9
SRRH
5640 tr->current_trace->ref--;
5641
29bf4a5e 5642 if (iter->trace->pipe_close)
c521efd1
SR
5643 iter->trace->pipe_close(iter);
5644
b04cc6b1
FW
5645 mutex_unlock(&trace_types_lock);
5646
4462344e 5647 free_cpumask_var(iter->started);
d7350c3f 5648 mutex_destroy(&iter->mutex);
b3806b43 5649 kfree(iter);
b3806b43 5650
7b85af63
SRRH
5651 trace_array_put(tr);
5652
b3806b43
SR
5653 return 0;
5654}
5655
9dd95748 5656static __poll_t
cc60cdc9 5657trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5658{
983f938a
SRRH
5659 struct trace_array *tr = iter->tr;
5660
15693458
SRRH
5661 /* Iterators are static, they should be filled or empty */
5662 if (trace_buffer_iter(iter, iter->cpu_file))
a9a08845 5663 return EPOLLIN | EPOLLRDNORM;
2a2cc8f7 5664
983f938a 5665 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5666 /*
5667 * Always select as readable when in blocking mode
5668 */
a9a08845 5669 return EPOLLIN | EPOLLRDNORM;
15693458 5670 else
12883efb 5671 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5672 filp, poll_table);
2a2cc8f7 5673}
2a2cc8f7 5674
9dd95748 5675static __poll_t
cc60cdc9
SR
5676tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5677{
5678 struct trace_iterator *iter = filp->private_data;
5679
5680 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5681}
5682
d716ff71 5683/* Must be called with iter->mutex held. */
ff98781b 5684static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5685{
5686 struct trace_iterator *iter = filp->private_data;
8b8b3683 5687 int ret;
b3806b43 5688
b3806b43 5689 while (trace_empty(iter)) {
2dc8f095 5690
107bad8b 5691 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5692 return -EAGAIN;
107bad8b 5693 }
2dc8f095 5694
b3806b43 5695 /*
250bfd3d 5696 * We block until we read something and tracing is disabled.
b3806b43
SR
5697 * We still block if tracing is disabled, but we have never
5698 * read anything. This allows a user to cat this file, and
5699 * then enable tracing. But after we have read something,
5700 * we give an EOF when tracing is again disabled.
5701 *
5702 * iter->pos will be 0 if we haven't read anything.
5703 */
75df6e68 5704 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
b3806b43 5705 break;
f4874261
SRRH
5706
5707 mutex_unlock(&iter->mutex);
5708
2c2b0a78 5709 ret = wait_on_pipe(iter, 0);
f4874261
SRRH
5710
5711 mutex_lock(&iter->mutex);
5712
8b8b3683
SRRH
5713 if (ret)
5714 return ret;
b3806b43
SR
5715 }
5716
ff98781b
EGM
5717 return 1;
5718}
5719
5720/*
5721 * Consumer reader.
5722 */
5723static ssize_t
5724tracing_read_pipe(struct file *filp, char __user *ubuf,
5725 size_t cnt, loff_t *ppos)
5726{
5727 struct trace_iterator *iter = filp->private_data;
5728 ssize_t sret;
5729
d7350c3f
FW
5730 /*
5731 * Avoid more than one consumer on a single file descriptor
5732 * This is just a matter of traces coherency, the ring buffer itself
5733 * is protected.
5734 */
5735 mutex_lock(&iter->mutex);
1245800c
SRRH
5736
5737 /* return any leftover data */
5738 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5739 if (sret != -EBUSY)
5740 goto out;
5741
5742 trace_seq_init(&iter->seq);
5743
ff98781b
EGM
5744 if (iter->trace->read) {
5745 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5746 if (sret)
5747 goto out;
5748 }
5749
5750waitagain:
5751 sret = tracing_wait_pipe(filp);
5752 if (sret <= 0)
5753 goto out;
5754
b3806b43 5755 /* stop when tracing is finished */
ff98781b
EGM
5756 if (trace_empty(iter)) {
5757 sret = 0;
107bad8b 5758 goto out;
ff98781b 5759 }
b3806b43
SR
5760
5761 if (cnt >= PAGE_SIZE)
5762 cnt = PAGE_SIZE - 1;
5763
53d0aa77 5764 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5765 memset(&iter->seq, 0,
5766 sizeof(struct trace_iterator) -
5767 offsetof(struct trace_iterator, seq));
ed5467da 5768 cpumask_clear(iter->started);
4823ed7e 5769 iter->pos = -1;
b3806b43 5770
4f535968 5771 trace_event_read_lock();
7e53bd42 5772 trace_access_lock(iter->cpu_file);
955b61e5 5773 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5774 enum print_line_t ret;
5ac48378 5775 int save_len = iter->seq.seq.len;
088b1e42 5776
f9896bf3 5777 ret = print_trace_line(iter);
2c4f035f 5778 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5779 /* don't print partial lines */
5ac48378 5780 iter->seq.seq.len = save_len;
b3806b43 5781 break;
088b1e42 5782 }
b91facc3
FW
5783 if (ret != TRACE_TYPE_NO_CONSUME)
5784 trace_consume(iter);
b3806b43 5785
5ac48378 5786 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5787 break;
ee5e51f5
JO
5788
5789 /*
5790 * Setting the full flag means we reached the trace_seq buffer
5791 * size and we should leave by partial output condition above.
5792 * One of the trace_seq_* functions is not used properly.
5793 */
5794 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5795 iter->ent->type);
b3806b43 5796 }
7e53bd42 5797 trace_access_unlock(iter->cpu_file);
4f535968 5798 trace_event_read_unlock();
b3806b43 5799
b3806b43 5800 /* Now copy what we have to the user */
6c6c2796 5801 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5802 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5803 trace_seq_init(&iter->seq);
9ff4b974
PP
5804
5805 /*
25985edc 5806 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5807 * entries, go back to wait for more entries.
5808 */
6c6c2796 5809 if (sret == -EBUSY)
9ff4b974 5810 goto waitagain;
b3806b43 5811
107bad8b 5812out:
d7350c3f 5813 mutex_unlock(&iter->mutex);
107bad8b 5814
6c6c2796 5815 return sret;
b3806b43
SR
5816}
5817
3c56819b
EGM
5818static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5819 unsigned int idx)
5820{
5821 __free_page(spd->pages[idx]);
5822}
5823
28dfef8f 5824static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5825 .can_merge = 0,
34cd4998 5826 .confirm = generic_pipe_buf_confirm,
92fdd98c 5827 .release = generic_pipe_buf_release,
34cd4998
SR
5828 .steal = generic_pipe_buf_steal,
5829 .get = generic_pipe_buf_get,
3c56819b
EGM
5830};
5831
34cd4998 5832static size_t
fa7c7f6e 5833tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5834{
5835 size_t count;
74f06bb7 5836 int save_len;
34cd4998
SR
5837 int ret;
5838
5839 /* Seq buffer is page-sized, exactly what we need. */
5840 for (;;) {
74f06bb7 5841 save_len = iter->seq.seq.len;
34cd4998 5842 ret = print_trace_line(iter);
74f06bb7
SRRH
5843
5844 if (trace_seq_has_overflowed(&iter->seq)) {
5845 iter->seq.seq.len = save_len;
34cd4998
SR
5846 break;
5847 }
74f06bb7
SRRH
5848
5849 /*
5850 * This should not be hit, because it should only
5851 * be set if the iter->seq overflowed. But check it
5852 * anyway to be safe.
5853 */
34cd4998 5854 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5855 iter->seq.seq.len = save_len;
5856 break;
5857 }
5858
5ac48378 5859 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5860 if (rem < count) {
5861 rem = 0;
5862 iter->seq.seq.len = save_len;
34cd4998
SR
5863 break;
5864 }
5865
74e7ff8c
LJ
5866 if (ret != TRACE_TYPE_NO_CONSUME)
5867 trace_consume(iter);
34cd4998 5868 rem -= count;
955b61e5 5869 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5870 rem = 0;
5871 iter->ent = NULL;
5872 break;
5873 }
5874 }
5875
5876 return rem;
5877}
5878
3c56819b
EGM
5879static ssize_t tracing_splice_read_pipe(struct file *filp,
5880 loff_t *ppos,
5881 struct pipe_inode_info *pipe,
5882 size_t len,
5883 unsigned int flags)
5884{
35f3d14d
JA
5885 struct page *pages_def[PIPE_DEF_BUFFERS];
5886 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5887 struct trace_iterator *iter = filp->private_data;
5888 struct splice_pipe_desc spd = {
35f3d14d
JA
5889 .pages = pages_def,
5890 .partial = partial_def,
34cd4998 5891 .nr_pages = 0, /* This gets updated below. */
047fe360 5892 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5893 .ops = &tracing_pipe_buf_ops,
5894 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5895 };
5896 ssize_t ret;
34cd4998 5897 size_t rem;
3c56819b
EGM
5898 unsigned int i;
5899
35f3d14d
JA
5900 if (splice_grow_spd(pipe, &spd))
5901 return -ENOMEM;
5902
d7350c3f 5903 mutex_lock(&iter->mutex);
3c56819b
EGM
5904
5905 if (iter->trace->splice_read) {
5906 ret = iter->trace->splice_read(iter, filp,
5907 ppos, pipe, len, flags);
5908 if (ret)
34cd4998 5909 goto out_err;
3c56819b
EGM
5910 }
5911
5912 ret = tracing_wait_pipe(filp);
5913 if (ret <= 0)
34cd4998 5914 goto out_err;
3c56819b 5915
955b61e5 5916 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5917 ret = -EFAULT;
34cd4998 5918 goto out_err;
3c56819b
EGM
5919 }
5920
4f535968 5921 trace_event_read_lock();
7e53bd42 5922 trace_access_lock(iter->cpu_file);
4f535968 5923
3c56819b 5924 /* Fill as many pages as possible. */
a786c06d 5925 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5926 spd.pages[i] = alloc_page(GFP_KERNEL);
5927 if (!spd.pages[i])
34cd4998 5928 break;
3c56819b 5929
fa7c7f6e 5930 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5931
5932 /* Copy the data into the page, so we can start over. */
5933 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5934 page_address(spd.pages[i]),
5ac48378 5935 trace_seq_used(&iter->seq));
3c56819b 5936 if (ret < 0) {
35f3d14d 5937 __free_page(spd.pages[i]);
3c56819b
EGM
5938 break;
5939 }
35f3d14d 5940 spd.partial[i].offset = 0;
5ac48378 5941 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5942
f9520750 5943 trace_seq_init(&iter->seq);
3c56819b
EGM
5944 }
5945
7e53bd42 5946 trace_access_unlock(iter->cpu_file);
4f535968 5947 trace_event_read_unlock();
d7350c3f 5948 mutex_unlock(&iter->mutex);
3c56819b
EGM
5949
5950 spd.nr_pages = i;
5951
a29054d9
SRRH
5952 if (i)
5953 ret = splice_to_pipe(pipe, &spd);
5954 else
5955 ret = 0;
35f3d14d 5956out:
047fe360 5957 splice_shrink_spd(&spd);
35f3d14d 5958 return ret;
3c56819b 5959
34cd4998 5960out_err:
d7350c3f 5961 mutex_unlock(&iter->mutex);
35f3d14d 5962 goto out;
3c56819b
EGM
5963}
5964
a98a3c3f
SR
5965static ssize_t
5966tracing_entries_read(struct file *filp, char __user *ubuf,
5967 size_t cnt, loff_t *ppos)
5968{
0bc392ee
ON
5969 struct inode *inode = file_inode(filp);
5970 struct trace_array *tr = inode->i_private;
5971 int cpu = tracing_get_cpu(inode);
438ced17
VN
5972 char buf[64];
5973 int r = 0;
5974 ssize_t ret;
a98a3c3f 5975
db526ca3 5976 mutex_lock(&trace_types_lock);
438ced17 5977
0bc392ee 5978 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5979 int cpu, buf_size_same;
5980 unsigned long size;
5981
5982 size = 0;
5983 buf_size_same = 1;
5984 /* check if all cpu sizes are same */
5985 for_each_tracing_cpu(cpu) {
5986 /* fill in the size from first enabled cpu */
5987 if (size == 0)
12883efb
SRRH
5988 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5989 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5990 buf_size_same = 0;
5991 break;
5992 }
5993 }
5994
5995 if (buf_size_same) {
5996 if (!ring_buffer_expanded)
5997 r = sprintf(buf, "%lu (expanded: %lu)\n",
5998 size >> 10,
5999 trace_buf_size >> 10);
6000 else
6001 r = sprintf(buf, "%lu\n", size >> 10);
6002 } else
6003 r = sprintf(buf, "X\n");
6004 } else
0bc392ee 6005 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 6006
db526ca3
SR
6007 mutex_unlock(&trace_types_lock);
6008
438ced17
VN
6009 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6010 return ret;
a98a3c3f
SR
6011}
6012
6013static ssize_t
6014tracing_entries_write(struct file *filp, const char __user *ubuf,
6015 size_t cnt, loff_t *ppos)
6016{
0bc392ee
ON
6017 struct inode *inode = file_inode(filp);
6018 struct trace_array *tr = inode->i_private;
a98a3c3f 6019 unsigned long val;
4f271a2a 6020 int ret;
a98a3c3f 6021
22fe9b54
PH
6022 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6023 if (ret)
c6caeeb1 6024 return ret;
a98a3c3f
SR
6025
6026 /* must have at least 1 entry */
6027 if (!val)
6028 return -EINVAL;
6029
1696b2b0
SR
6030 /* value is in KB */
6031 val <<= 10;
0bc392ee 6032 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
6033 if (ret < 0)
6034 return ret;
a98a3c3f 6035
cf8517cf 6036 *ppos += cnt;
a98a3c3f 6037
4f271a2a
VN
6038 return cnt;
6039}
bf5e6519 6040
f81ab074
VN
6041static ssize_t
6042tracing_total_entries_read(struct file *filp, char __user *ubuf,
6043 size_t cnt, loff_t *ppos)
6044{
6045 struct trace_array *tr = filp->private_data;
6046 char buf[64];
6047 int r, cpu;
6048 unsigned long size = 0, expanded_size = 0;
6049
6050 mutex_lock(&trace_types_lock);
6051 for_each_tracing_cpu(cpu) {
12883efb 6052 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
6053 if (!ring_buffer_expanded)
6054 expanded_size += trace_buf_size >> 10;
6055 }
6056 if (ring_buffer_expanded)
6057 r = sprintf(buf, "%lu\n", size);
6058 else
6059 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6060 mutex_unlock(&trace_types_lock);
6061
6062 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6063}
6064
4f271a2a
VN
6065static ssize_t
6066tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6067 size_t cnt, loff_t *ppos)
6068{
6069 /*
6070 * There is no need to read what the user has written, this function
6071 * is just to make sure that there is no error when "echo" is used
6072 */
6073
6074 *ppos += cnt;
a98a3c3f
SR
6075
6076 return cnt;
6077}
6078
4f271a2a
VN
6079static int
6080tracing_free_buffer_release(struct inode *inode, struct file *filp)
6081{
2b6080f2
SR
6082 struct trace_array *tr = inode->i_private;
6083
cf30cf67 6084 /* disable tracing ? */
983f938a 6085 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 6086 tracer_tracing_off(tr);
4f271a2a 6087 /* resize the ring buffer to 0 */
2b6080f2 6088 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 6089
7b85af63
SRRH
6090 trace_array_put(tr);
6091
4f271a2a
VN
6092 return 0;
6093}
6094
5bf9a1ee
PP
6095static ssize_t
6096tracing_mark_write(struct file *filp, const char __user *ubuf,
6097 size_t cnt, loff_t *fpos)
6098{
2d71619c 6099 struct trace_array *tr = filp->private_data;
d696b58c 6100 struct ring_buffer_event *event;
3dd80953 6101 enum event_trigger_type tt = ETT_NONE;
d696b58c
SR
6102 struct ring_buffer *buffer;
6103 struct print_entry *entry;
6104 unsigned long irq_flags;
656c7f0d 6105 const char faulted[] = "<faulted>";
d696b58c 6106 ssize_t written;
d696b58c
SR
6107 int size;
6108 int len;
fa32e855 6109
656c7f0d
SRRH
6110/* Used in tracing_mark_raw_write() as well */
6111#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 6112
c76f0694 6113 if (tracing_disabled)
5bf9a1ee
PP
6114 return -EINVAL;
6115
983f938a 6116 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
6117 return -EINVAL;
6118
5bf9a1ee
PP
6119 if (cnt > TRACE_BUF_SIZE)
6120 cnt = TRACE_BUF_SIZE;
6121
d696b58c 6122 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 6123
d696b58c 6124 local_save_flags(irq_flags);
656c7f0d 6125 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 6126
656c7f0d
SRRH
6127 /* If less than "<faulted>", then make sure we can still add that */
6128 if (cnt < FAULTED_SIZE)
6129 size += FAULTED_SIZE - cnt;
d696b58c 6130
2d71619c 6131 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6132 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6133 irq_flags, preempt_count());
656c7f0d 6134 if (unlikely(!event))
d696b58c 6135 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6136 return -EBADF;
d696b58c
SR
6137
6138 entry = ring_buffer_event_data(event);
6139 entry->ip = _THIS_IP_;
6140
656c7f0d
SRRH
6141 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6142 if (len) {
6143 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6144 cnt = FAULTED_SIZE;
6145 written = -EFAULT;
c13d2f7c 6146 } else
656c7f0d
SRRH
6147 written = cnt;
6148 len = cnt;
5bf9a1ee 6149
3dd80953
SRV
6150 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6151 /* do not add \n before testing triggers, but add \0 */
6152 entry->buf[cnt] = '\0';
6153 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6154 }
6155
d696b58c
SR
6156 if (entry->buf[cnt - 1] != '\n') {
6157 entry->buf[cnt] = '\n';
6158 entry->buf[cnt + 1] = '\0';
6159 } else
6160 entry->buf[cnt] = '\0';
6161
7ffbd48d 6162 __buffer_unlock_commit(buffer, event);
5bf9a1ee 6163
3dd80953
SRV
6164 if (tt)
6165 event_triggers_post_call(tr->trace_marker_file, tt);
6166
656c7f0d
SRRH
6167 if (written > 0)
6168 *fpos += written;
5bf9a1ee 6169
fa32e855
SR
6170 return written;
6171}
6172
6173/* Limit it for now to 3K (including tag) */
6174#define RAW_DATA_MAX_SIZE (1024*3)
6175
6176static ssize_t
6177tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6178 size_t cnt, loff_t *fpos)
6179{
6180 struct trace_array *tr = filp->private_data;
6181 struct ring_buffer_event *event;
6182 struct ring_buffer *buffer;
6183 struct raw_data_entry *entry;
656c7f0d 6184 const char faulted[] = "<faulted>";
fa32e855 6185 unsigned long irq_flags;
fa32e855 6186 ssize_t written;
fa32e855
SR
6187 int size;
6188 int len;
6189
656c7f0d
SRRH
6190#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6191
fa32e855
SR
6192 if (tracing_disabled)
6193 return -EINVAL;
6194
6195 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6196 return -EINVAL;
6197
6198 /* The marker must at least have a tag id */
6199 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6200 return -EINVAL;
6201
6202 if (cnt > TRACE_BUF_SIZE)
6203 cnt = TRACE_BUF_SIZE;
6204
6205 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6206
fa32e855
SR
6207 local_save_flags(irq_flags);
6208 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
6209 if (cnt < FAULT_SIZE_ID)
6210 size += FAULT_SIZE_ID - cnt;
6211
fa32e855 6212 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6213 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6214 irq_flags, preempt_count());
656c7f0d 6215 if (!event)
fa32e855 6216 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6217 return -EBADF;
fa32e855
SR
6218
6219 entry = ring_buffer_event_data(event);
6220
656c7f0d
SRRH
6221 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6222 if (len) {
6223 entry->id = -1;
6224 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6225 written = -EFAULT;
fa32e855 6226 } else
656c7f0d 6227 written = cnt;
fa32e855
SR
6228
6229 __buffer_unlock_commit(buffer, event);
6230
656c7f0d
SRRH
6231 if (written > 0)
6232 *fpos += written;
1aa54bca
MS
6233
6234 return written;
5bf9a1ee
PP
6235}
6236
13f16d20 6237static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 6238{
2b6080f2 6239 struct trace_array *tr = m->private;
5079f326
Z
6240 int i;
6241
6242 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 6243 seq_printf(m,
5079f326 6244 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
6245 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6246 i == tr->clock_id ? "]" : "");
13f16d20 6247 seq_putc(m, '\n');
5079f326 6248
13f16d20 6249 return 0;
5079f326
Z
6250}
6251
d71bd34d 6252int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6253{
5079f326
Z
6254 int i;
6255
5079f326
Z
6256 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6257 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6258 break;
6259 }
6260 if (i == ARRAY_SIZE(trace_clocks))
6261 return -EINVAL;
6262
5079f326
Z
6263 mutex_lock(&trace_types_lock);
6264
2b6080f2
SR
6265 tr->clock_id = i;
6266
12883efb 6267 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6268
60303ed3
DS
6269 /*
6270 * New clock may not be consistent with the previous clock.
6271 * Reset the buffer so that it doesn't have incomparable timestamps.
6272 */
9457158b 6273 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6274
6275#ifdef CONFIG_TRACER_MAX_TRACE
170b3b10 6276 if (tr->max_buffer.buffer)
12883efb 6277 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6278 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6279#endif
60303ed3 6280
5079f326
Z
6281 mutex_unlock(&trace_types_lock);
6282
e1e232ca
SR
6283 return 0;
6284}
6285
6286static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6287 size_t cnt, loff_t *fpos)
6288{
6289 struct seq_file *m = filp->private_data;
6290 struct trace_array *tr = m->private;
6291 char buf[64];
6292 const char *clockstr;
6293 int ret;
6294
6295 if (cnt >= sizeof(buf))
6296 return -EINVAL;
6297
4afe6495 6298 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6299 return -EFAULT;
6300
6301 buf[cnt] = 0;
6302
6303 clockstr = strstrip(buf);
6304
6305 ret = tracing_set_clock(tr, clockstr);
6306 if (ret)
6307 return ret;
6308
5079f326
Z
6309 *fpos += cnt;
6310
6311 return cnt;
6312}
6313
13f16d20
LZ
6314static int tracing_clock_open(struct inode *inode, struct file *file)
6315{
7b85af63
SRRH
6316 struct trace_array *tr = inode->i_private;
6317 int ret;
6318
13f16d20
LZ
6319 if (tracing_disabled)
6320 return -ENODEV;
2b6080f2 6321
7b85af63
SRRH
6322 if (trace_array_get(tr))
6323 return -ENODEV;
6324
6325 ret = single_open(file, tracing_clock_show, inode->i_private);
6326 if (ret < 0)
6327 trace_array_put(tr);
6328
6329 return ret;
13f16d20
LZ
6330}
6331
2c1ea60b
TZ
6332static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6333{
6334 struct trace_array *tr = m->private;
6335
6336 mutex_lock(&trace_types_lock);
6337
6338 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6339 seq_puts(m, "delta [absolute]\n");
6340 else
6341 seq_puts(m, "[delta] absolute\n");
6342
6343 mutex_unlock(&trace_types_lock);
6344
6345 return 0;
6346}
6347
6348static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6349{
6350 struct trace_array *tr = inode->i_private;
6351 int ret;
6352
6353 if (tracing_disabled)
6354 return -ENODEV;
6355
6356 if (trace_array_get(tr))
6357 return -ENODEV;
6358
6359 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6360 if (ret < 0)
6361 trace_array_put(tr);
6362
6363 return ret;
6364}
6365
00b41452
TZ
6366int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6367{
6368 int ret = 0;
6369
6370 mutex_lock(&trace_types_lock);
6371
6372 if (abs && tr->time_stamp_abs_ref++)
6373 goto out;
6374
6375 if (!abs) {
6376 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6377 ret = -EINVAL;
6378 goto out;
6379 }
6380
6381 if (--tr->time_stamp_abs_ref)
6382 goto out;
6383 }
6384
6385 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6386
6387#ifdef CONFIG_TRACER_MAX_TRACE
6388 if (tr->max_buffer.buffer)
6389 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6390#endif
6391 out:
6392 mutex_unlock(&trace_types_lock);
6393
6394 return ret;
6395}
6396
6de58e62
SRRH
6397struct ftrace_buffer_info {
6398 struct trace_iterator iter;
6399 void *spare;
73a757e6 6400 unsigned int spare_cpu;
6de58e62
SRRH
6401 unsigned int read;
6402};
6403
debdd57f
HT
6404#ifdef CONFIG_TRACER_SNAPSHOT
6405static int tracing_snapshot_open(struct inode *inode, struct file *file)
6406{
6484c71c 6407 struct trace_array *tr = inode->i_private;
debdd57f 6408 struct trace_iterator *iter;
2b6080f2 6409 struct seq_file *m;
debdd57f
HT
6410 int ret = 0;
6411
ff451961
SRRH
6412 if (trace_array_get(tr) < 0)
6413 return -ENODEV;
6414
debdd57f 6415 if (file->f_mode & FMODE_READ) {
6484c71c 6416 iter = __tracing_open(inode, file, true);
debdd57f
HT
6417 if (IS_ERR(iter))
6418 ret = PTR_ERR(iter);
2b6080f2
SR
6419 } else {
6420 /* Writes still need the seq_file to hold the private data */
f77d09a3 6421 ret = -ENOMEM;
2b6080f2
SR
6422 m = kzalloc(sizeof(*m), GFP_KERNEL);
6423 if (!m)
f77d09a3 6424 goto out;
2b6080f2
SR
6425 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6426 if (!iter) {
6427 kfree(m);
f77d09a3 6428 goto out;
2b6080f2 6429 }
f77d09a3
AL
6430 ret = 0;
6431
ff451961 6432 iter->tr = tr;
6484c71c
ON
6433 iter->trace_buffer = &tr->max_buffer;
6434 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6435 m->private = iter;
6436 file->private_data = m;
debdd57f 6437 }
f77d09a3 6438out:
ff451961
SRRH
6439 if (ret < 0)
6440 trace_array_put(tr);
6441
debdd57f
HT
6442 return ret;
6443}
6444
6445static ssize_t
6446tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6447 loff_t *ppos)
6448{
2b6080f2
SR
6449 struct seq_file *m = filp->private_data;
6450 struct trace_iterator *iter = m->private;
6451 struct trace_array *tr = iter->tr;
debdd57f
HT
6452 unsigned long val;
6453 int ret;
6454
6455 ret = tracing_update_buffers();
6456 if (ret < 0)
6457 return ret;
6458
6459 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6460 if (ret)
6461 return ret;
6462
6463 mutex_lock(&trace_types_lock);
6464
2b6080f2 6465 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6466 ret = -EBUSY;
6467 goto out;
6468 }
6469
6470 switch (val) {
6471 case 0:
f1affcaa
SRRH
6472 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6473 ret = -EINVAL;
6474 break;
debdd57f 6475 }
3209cff4
SRRH
6476 if (tr->allocated_snapshot)
6477 free_snapshot(tr);
debdd57f
HT
6478 break;
6479 case 1:
f1affcaa
SRRH
6480/* Only allow per-cpu swap if the ring buffer supports it */
6481#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6482 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6483 ret = -EINVAL;
6484 break;
6485 }
6486#endif
45ad21ca 6487 if (!tr->allocated_snapshot) {
2824f503 6488 ret = tracing_alloc_snapshot_instance(tr);
debdd57f
HT
6489 if (ret < 0)
6490 break;
debdd57f 6491 }
debdd57f
HT
6492 local_irq_disable();
6493 /* Now, we're going to swap */
f1affcaa 6494 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6495 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6496 else
ce9bae55 6497 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6498 local_irq_enable();
6499 break;
6500 default:
45ad21ca 6501 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6502 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6503 tracing_reset_online_cpus(&tr->max_buffer);
6504 else
6505 tracing_reset(&tr->max_buffer, iter->cpu_file);
6506 }
debdd57f
HT
6507 break;
6508 }
6509
6510 if (ret >= 0) {
6511 *ppos += cnt;
6512 ret = cnt;
6513 }
6514out:
6515 mutex_unlock(&trace_types_lock);
6516 return ret;
6517}
2b6080f2
SR
6518
6519static int tracing_snapshot_release(struct inode *inode, struct file *file)
6520{
6521 struct seq_file *m = file->private_data;
ff451961
SRRH
6522 int ret;
6523
6524 ret = tracing_release(inode, file);
2b6080f2
SR
6525
6526 if (file->f_mode & FMODE_READ)
ff451961 6527 return ret;
2b6080f2
SR
6528
6529 /* If write only, the seq_file is just a stub */
6530 if (m)
6531 kfree(m->private);
6532 kfree(m);
6533
6534 return 0;
6535}
6536
6de58e62
SRRH
6537static int tracing_buffers_open(struct inode *inode, struct file *filp);
6538static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6539 size_t count, loff_t *ppos);
6540static int tracing_buffers_release(struct inode *inode, struct file *file);
6541static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6542 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6543
6544static int snapshot_raw_open(struct inode *inode, struct file *filp)
6545{
6546 struct ftrace_buffer_info *info;
6547 int ret;
6548
6549 ret = tracing_buffers_open(inode, filp);
6550 if (ret < 0)
6551 return ret;
6552
6553 info = filp->private_data;
6554
6555 if (info->iter.trace->use_max_tr) {
6556 tracing_buffers_release(inode, filp);
6557 return -EBUSY;
6558 }
6559
6560 info->iter.snapshot = true;
6561 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6562
6563 return ret;
6564}
6565
debdd57f
HT
6566#endif /* CONFIG_TRACER_SNAPSHOT */
6567
6568
6508fa76
SF
6569static const struct file_operations tracing_thresh_fops = {
6570 .open = tracing_open_generic,
6571 .read = tracing_thresh_read,
6572 .write = tracing_thresh_write,
6573 .llseek = generic_file_llseek,
6574};
6575
f971cc9a 6576#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6577static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6578 .open = tracing_open_generic,
6579 .read = tracing_max_lat_read,
6580 .write = tracing_max_lat_write,
b444786f 6581 .llseek = generic_file_llseek,
bc0c38d1 6582};
e428abbb 6583#endif
bc0c38d1 6584
5e2336a0 6585static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6586 .open = tracing_open_generic,
6587 .read = tracing_set_trace_read,
6588 .write = tracing_set_trace_write,
b444786f 6589 .llseek = generic_file_llseek,
bc0c38d1
SR
6590};
6591
5e2336a0 6592static const struct file_operations tracing_pipe_fops = {
4bf39a94 6593 .open = tracing_open_pipe,
2a2cc8f7 6594 .poll = tracing_poll_pipe,
4bf39a94 6595 .read = tracing_read_pipe,
3c56819b 6596 .splice_read = tracing_splice_read_pipe,
4bf39a94 6597 .release = tracing_release_pipe,
b444786f 6598 .llseek = no_llseek,
b3806b43
SR
6599};
6600
5e2336a0 6601static const struct file_operations tracing_entries_fops = {
0bc392ee 6602 .open = tracing_open_generic_tr,
a98a3c3f
SR
6603 .read = tracing_entries_read,
6604 .write = tracing_entries_write,
b444786f 6605 .llseek = generic_file_llseek,
0bc392ee 6606 .release = tracing_release_generic_tr,
a98a3c3f
SR
6607};
6608
f81ab074 6609static const struct file_operations tracing_total_entries_fops = {
7b85af63 6610 .open = tracing_open_generic_tr,
f81ab074
VN
6611 .read = tracing_total_entries_read,
6612 .llseek = generic_file_llseek,
7b85af63 6613 .release = tracing_release_generic_tr,
f81ab074
VN
6614};
6615
4f271a2a 6616static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6617 .open = tracing_open_generic_tr,
4f271a2a
VN
6618 .write = tracing_free_buffer_write,
6619 .release = tracing_free_buffer_release,
6620};
6621
5e2336a0 6622static const struct file_operations tracing_mark_fops = {
7b85af63 6623 .open = tracing_open_generic_tr,
5bf9a1ee 6624 .write = tracing_mark_write,
b444786f 6625 .llseek = generic_file_llseek,
7b85af63 6626 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6627};
6628
fa32e855
SR
6629static const struct file_operations tracing_mark_raw_fops = {
6630 .open = tracing_open_generic_tr,
6631 .write = tracing_mark_raw_write,
6632 .llseek = generic_file_llseek,
6633 .release = tracing_release_generic_tr,
6634};
6635
5079f326 6636static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6637 .open = tracing_clock_open,
6638 .read = seq_read,
6639 .llseek = seq_lseek,
7b85af63 6640 .release = tracing_single_release_tr,
5079f326
Z
6641 .write = tracing_clock_write,
6642};
6643
2c1ea60b
TZ
6644static const struct file_operations trace_time_stamp_mode_fops = {
6645 .open = tracing_time_stamp_mode_open,
6646 .read = seq_read,
6647 .llseek = seq_lseek,
6648 .release = tracing_single_release_tr,
6649};
6650
debdd57f
HT
6651#ifdef CONFIG_TRACER_SNAPSHOT
6652static const struct file_operations snapshot_fops = {
6653 .open = tracing_snapshot_open,
6654 .read = seq_read,
6655 .write = tracing_snapshot_write,
098c879e 6656 .llseek = tracing_lseek,
2b6080f2 6657 .release = tracing_snapshot_release,
debdd57f 6658};
debdd57f 6659
6de58e62
SRRH
6660static const struct file_operations snapshot_raw_fops = {
6661 .open = snapshot_raw_open,
6662 .read = tracing_buffers_read,
6663 .release = tracing_buffers_release,
6664 .splice_read = tracing_buffers_splice_read,
6665 .llseek = no_llseek,
2cadf913
SR
6666};
6667
6de58e62
SRRH
6668#endif /* CONFIG_TRACER_SNAPSHOT */
6669
2cadf913
SR
6670static int tracing_buffers_open(struct inode *inode, struct file *filp)
6671{
46ef2be0 6672 struct trace_array *tr = inode->i_private;
2cadf913 6673 struct ftrace_buffer_info *info;
7b85af63 6674 int ret;
2cadf913
SR
6675
6676 if (tracing_disabled)
6677 return -ENODEV;
6678
7b85af63
SRRH
6679 if (trace_array_get(tr) < 0)
6680 return -ENODEV;
6681
2cadf913 6682 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6683 if (!info) {
6684 trace_array_put(tr);
2cadf913 6685 return -ENOMEM;
7b85af63 6686 }
2cadf913 6687
a695cb58
SRRH
6688 mutex_lock(&trace_types_lock);
6689
cc60cdc9 6690 info->iter.tr = tr;
46ef2be0 6691 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6692 info->iter.trace = tr->current_trace;
12883efb 6693 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6694 info->spare = NULL;
2cadf913 6695 /* Force reading ring buffer for first read */
cc60cdc9 6696 info->read = (unsigned int)-1;
2cadf913
SR
6697
6698 filp->private_data = info;
6699
cf6ab6d9
SRRH
6700 tr->current_trace->ref++;
6701
a695cb58
SRRH
6702 mutex_unlock(&trace_types_lock);
6703
7b85af63
SRRH
6704 ret = nonseekable_open(inode, filp);
6705 if (ret < 0)
6706 trace_array_put(tr);
6707
6708 return ret;
2cadf913
SR
6709}
6710
9dd95748 6711static __poll_t
cc60cdc9
SR
6712tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6713{
6714 struct ftrace_buffer_info *info = filp->private_data;
6715 struct trace_iterator *iter = &info->iter;
6716
6717 return trace_poll(iter, filp, poll_table);
6718}
6719
2cadf913
SR
6720static ssize_t
6721tracing_buffers_read(struct file *filp, char __user *ubuf,
6722 size_t count, loff_t *ppos)
6723{
6724 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6725 struct trace_iterator *iter = &info->iter;
a7e52ad7 6726 ssize_t ret = 0;
6de58e62 6727 ssize_t size;
2cadf913 6728
2dc5d12b
SR
6729 if (!count)
6730 return 0;
6731
6de58e62 6732#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6733 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6734 return -EBUSY;
6de58e62
SRRH
6735#endif
6736
73a757e6 6737 if (!info->spare) {
12883efb
SRRH
6738 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6739 iter->cpu_file);
a7e52ad7
SRV
6740 if (IS_ERR(info->spare)) {
6741 ret = PTR_ERR(info->spare);
6742 info->spare = NULL;
6743 } else {
6744 info->spare_cpu = iter->cpu_file;
6745 }
73a757e6 6746 }
ddd538f3 6747 if (!info->spare)
a7e52ad7 6748 return ret;
ddd538f3 6749
2cadf913
SR
6750 /* Do we have previous read data to read? */
6751 if (info->read < PAGE_SIZE)
6752 goto read;
6753
b627344f 6754 again:
cc60cdc9 6755 trace_access_lock(iter->cpu_file);
12883efb 6756 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6757 &info->spare,
6758 count,
cc60cdc9
SR
6759 iter->cpu_file, 0);
6760 trace_access_unlock(iter->cpu_file);
2cadf913 6761
b627344f
SR
6762 if (ret < 0) {
6763 if (trace_empty(iter)) {
d716ff71
SRRH
6764 if ((filp->f_flags & O_NONBLOCK))
6765 return -EAGAIN;
6766
2c2b0a78 6767 ret = wait_on_pipe(iter, 0);
d716ff71
SRRH
6768 if (ret)
6769 return ret;
6770
b627344f
SR
6771 goto again;
6772 }
d716ff71 6773 return 0;
b627344f 6774 }
436fc280 6775
436fc280 6776 info->read = 0;
b627344f 6777 read:
2cadf913
SR
6778 size = PAGE_SIZE - info->read;
6779 if (size > count)
6780 size = count;
6781
6782 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6783 if (ret == size)
6784 return -EFAULT;
6785
2dc5d12b
SR
6786 size -= ret;
6787
2cadf913
SR
6788 *ppos += size;
6789 info->read += size;
6790
6791 return size;
6792}
6793
6794static int tracing_buffers_release(struct inode *inode, struct file *file)
6795{
6796 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6797 struct trace_iterator *iter = &info->iter;
2cadf913 6798
a695cb58
SRRH
6799 mutex_lock(&trace_types_lock);
6800
cf6ab6d9
SRRH
6801 iter->tr->current_trace->ref--;
6802
ff451961 6803 __trace_array_put(iter->tr);
2cadf913 6804
ddd538f3 6805 if (info->spare)
73a757e6
SRV
6806 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6807 info->spare_cpu, info->spare);
2cadf913
SR
6808 kfree(info);
6809
a695cb58
SRRH
6810 mutex_unlock(&trace_types_lock);
6811
2cadf913
SR
6812 return 0;
6813}
6814
6815struct buffer_ref {
6816 struct ring_buffer *buffer;
6817 void *page;
73a757e6 6818 int cpu;
2cadf913
SR
6819 int ref;
6820};
6821
6822static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6823 struct pipe_buffer *buf)
6824{
6825 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6826
6827 if (--ref->ref)
6828 return;
6829
73a757e6 6830 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6831 kfree(ref);
6832 buf->private = 0;
6833}
6834
2cadf913
SR
6835static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6836 struct pipe_buffer *buf)
6837{
6838 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6839
6840 ref->ref++;
6841}
6842
6843/* Pipe buffer operations for a buffer. */
28dfef8f 6844static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6845 .can_merge = 0,
2cadf913
SR
6846 .confirm = generic_pipe_buf_confirm,
6847 .release = buffer_pipe_buf_release,
d55cb6cf 6848 .steal = generic_pipe_buf_steal,
2cadf913
SR
6849 .get = buffer_pipe_buf_get,
6850};
6851
6852/*
6853 * Callback from splice_to_pipe(), if we need to release some pages
6854 * at the end of the spd in case we error'ed out in filling the pipe.
6855 */
6856static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6857{
6858 struct buffer_ref *ref =
6859 (struct buffer_ref *)spd->partial[i].private;
6860
6861 if (--ref->ref)
6862 return;
6863
73a757e6 6864 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6865 kfree(ref);
6866 spd->partial[i].private = 0;
6867}
6868
6869static ssize_t
6870tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6871 struct pipe_inode_info *pipe, size_t len,
6872 unsigned int flags)
6873{
6874 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6875 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6876 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6877 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6878 struct splice_pipe_desc spd = {
35f3d14d
JA
6879 .pages = pages_def,
6880 .partial = partial_def,
047fe360 6881 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6882 .ops = &buffer_pipe_buf_ops,
6883 .spd_release = buffer_spd_release,
6884 };
6885 struct buffer_ref *ref;
6b7e633f 6886 int entries, i;
07906da7 6887 ssize_t ret = 0;
2cadf913 6888
6de58e62 6889#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6890 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6891 return -EBUSY;
6de58e62
SRRH
6892#endif
6893
d716ff71
SRRH
6894 if (*ppos & (PAGE_SIZE - 1))
6895 return -EINVAL;
93cfb3c9
LJ
6896
6897 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6898 if (len < PAGE_SIZE)
6899 return -EINVAL;
93cfb3c9
LJ
6900 len &= PAGE_MASK;
6901 }
6902
1ae2293d
AV
6903 if (splice_grow_spd(pipe, &spd))
6904 return -ENOMEM;
6905
cc60cdc9
SR
6906 again:
6907 trace_access_lock(iter->cpu_file);
12883efb 6908 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6909
a786c06d 6910 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6911 struct page *page;
6912 int r;
6913
6914 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6915 if (!ref) {
6916 ret = -ENOMEM;
2cadf913 6917 break;
07906da7 6918 }
2cadf913 6919
7267fa68 6920 ref->ref = 1;
12883efb 6921 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6922 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
a7e52ad7
SRV
6923 if (IS_ERR(ref->page)) {
6924 ret = PTR_ERR(ref->page);
6925 ref->page = NULL;
2cadf913
SR
6926 kfree(ref);
6927 break;
6928 }
73a757e6 6929 ref->cpu = iter->cpu_file;
2cadf913
SR
6930
6931 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6932 len, iter->cpu_file, 1);
2cadf913 6933 if (r < 0) {
73a757e6
SRV
6934 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6935 ref->page);
2cadf913
SR
6936 kfree(ref);
6937 break;
6938 }
6939
2cadf913
SR
6940 page = virt_to_page(ref->page);
6941
6942 spd.pages[i] = page;
6943 spd.partial[i].len = PAGE_SIZE;
6944 spd.partial[i].offset = 0;
6945 spd.partial[i].private = (unsigned long)ref;
6946 spd.nr_pages++;
93cfb3c9 6947 *ppos += PAGE_SIZE;
93459c6c 6948
12883efb 6949 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6950 }
6951
cc60cdc9 6952 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6953 spd.nr_pages = i;
6954
6955 /* did we read anything? */
6956 if (!spd.nr_pages) {
07906da7 6957 if (ret)
1ae2293d 6958 goto out;
d716ff71 6959
1ae2293d 6960 ret = -EAGAIN;
d716ff71 6961 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6962 goto out;
07906da7 6963
03329f99 6964 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8b8b3683 6965 if (ret)
1ae2293d 6966 goto out;
e30f53aa 6967
cc60cdc9 6968 goto again;
2cadf913
SR
6969 }
6970
6971 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6972out:
047fe360 6973 splice_shrink_spd(&spd);
6de58e62 6974
2cadf913
SR
6975 return ret;
6976}
6977
6978static const struct file_operations tracing_buffers_fops = {
6979 .open = tracing_buffers_open,
6980 .read = tracing_buffers_read,
cc60cdc9 6981 .poll = tracing_buffers_poll,
2cadf913
SR
6982 .release = tracing_buffers_release,
6983 .splice_read = tracing_buffers_splice_read,
6984 .llseek = no_llseek,
6985};
6986
c8d77183
SR
6987static ssize_t
6988tracing_stats_read(struct file *filp, char __user *ubuf,
6989 size_t count, loff_t *ppos)
6990{
4d3435b8
ON
6991 struct inode *inode = file_inode(filp);
6992 struct trace_array *tr = inode->i_private;
12883efb 6993 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6994 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6995 struct trace_seq *s;
6996 unsigned long cnt;
c64e148a
VN
6997 unsigned long long t;
6998 unsigned long usec_rem;
c8d77183 6999
e4f2d10f 7000 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 7001 if (!s)
a646365c 7002 return -ENOMEM;
c8d77183
SR
7003
7004 trace_seq_init(s);
7005
12883efb 7006 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7007 trace_seq_printf(s, "entries: %ld\n", cnt);
7008
12883efb 7009 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7010 trace_seq_printf(s, "overrun: %ld\n", cnt);
7011
12883efb 7012 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7013 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7014
12883efb 7015 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
7016 trace_seq_printf(s, "bytes: %ld\n", cnt);
7017
58e8eedf 7018 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 7019 /* local or global for trace_clock */
12883efb 7020 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
7021 usec_rem = do_div(t, USEC_PER_SEC);
7022 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7023 t, usec_rem);
7024
12883efb 7025 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
7026 usec_rem = do_div(t, USEC_PER_SEC);
7027 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7028 } else {
7029 /* counter or tsc mode for trace_clock */
7030 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 7031 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 7032
11043d8b 7033 trace_seq_printf(s, "now ts: %llu\n",
12883efb 7034 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 7035 }
c64e148a 7036
12883efb 7037 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
7038 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7039
12883efb 7040 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
7041 trace_seq_printf(s, "read events: %ld\n", cnt);
7042
5ac48378
SRRH
7043 count = simple_read_from_buffer(ubuf, count, ppos,
7044 s->buffer, trace_seq_used(s));
c8d77183
SR
7045
7046 kfree(s);
7047
7048 return count;
7049}
7050
7051static const struct file_operations tracing_stats_fops = {
4d3435b8 7052 .open = tracing_open_generic_tr,
c8d77183 7053 .read = tracing_stats_read,
b444786f 7054 .llseek = generic_file_llseek,
4d3435b8 7055 .release = tracing_release_generic_tr,
c8d77183
SR
7056};
7057
bc0c38d1
SR
7058#ifdef CONFIG_DYNAMIC_FTRACE
7059
7060static ssize_t
b807c3d0 7061tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
7062 size_t cnt, loff_t *ppos)
7063{
7064 unsigned long *p = filp->private_data;
6a9c981b 7065 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
7066 int r;
7067
6a9c981b 7068 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
7069 buf[r++] = '\n';
7070
6a9c981b 7071 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
7072}
7073
5e2336a0 7074static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 7075 .open = tracing_open_generic,
b807c3d0 7076 .read = tracing_read_dyn_info,
b444786f 7077 .llseek = generic_file_llseek,
bc0c38d1 7078};
77fd5c15 7079#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 7080
77fd5c15
SRRH
7081#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7082static void
bca6c8d0 7083ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7084 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7085 void *data)
77fd5c15 7086{
cab50379 7087 tracing_snapshot_instance(tr);
77fd5c15 7088}
bc0c38d1 7089
77fd5c15 7090static void
bca6c8d0 7091ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7092 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7093 void *data)
bc0c38d1 7094{
6e444319 7095 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7096 long *count = NULL;
77fd5c15 7097
1a93f8bd
SRV
7098 if (mapper)
7099 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7100
7101 if (count) {
7102
7103 if (*count <= 0)
7104 return;
bc0c38d1 7105
77fd5c15 7106 (*count)--;
1a93f8bd 7107 }
77fd5c15 7108
cab50379 7109 tracing_snapshot_instance(tr);
77fd5c15
SRRH
7110}
7111
7112static int
7113ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7114 struct ftrace_probe_ops *ops, void *data)
7115{
6e444319 7116 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7117 long *count = NULL;
77fd5c15
SRRH
7118
7119 seq_printf(m, "%ps:", (void *)ip);
7120
fa6f0cc7 7121 seq_puts(m, "snapshot");
77fd5c15 7122
1a93f8bd
SRV
7123 if (mapper)
7124 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7125
7126 if (count)
7127 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 7128 else
1a93f8bd 7129 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
7130
7131 return 0;
7132}
7133
1a93f8bd 7134static int
b5f081b5 7135ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7136 unsigned long ip, void *init_data, void **data)
1a93f8bd 7137{
6e444319
SRV
7138 struct ftrace_func_mapper *mapper = *data;
7139
7140 if (!mapper) {
7141 mapper = allocate_ftrace_func_mapper();
7142 if (!mapper)
7143 return -ENOMEM;
7144 *data = mapper;
7145 }
1a93f8bd 7146
6e444319 7147 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
7148}
7149
7150static void
b5f081b5 7151ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7152 unsigned long ip, void *data)
1a93f8bd 7153{
6e444319
SRV
7154 struct ftrace_func_mapper *mapper = data;
7155
7156 if (!ip) {
7157 if (!mapper)
7158 return;
7159 free_ftrace_func_mapper(mapper, NULL);
7160 return;
7161 }
1a93f8bd
SRV
7162
7163 ftrace_func_mapper_remove_ip(mapper, ip);
7164}
7165
77fd5c15
SRRH
7166static struct ftrace_probe_ops snapshot_probe_ops = {
7167 .func = ftrace_snapshot,
7168 .print = ftrace_snapshot_print,
7169};
7170
7171static struct ftrace_probe_ops snapshot_count_probe_ops = {
7172 .func = ftrace_count_snapshot,
7173 .print = ftrace_snapshot_print,
1a93f8bd
SRV
7174 .init = ftrace_snapshot_init,
7175 .free = ftrace_snapshot_free,
77fd5c15
SRRH
7176};
7177
7178static int
04ec7bb6 7179ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
7180 char *glob, char *cmd, char *param, int enable)
7181{
7182 struct ftrace_probe_ops *ops;
7183 void *count = (void *)-1;
7184 char *number;
7185 int ret;
7186
0f179765
SRV
7187 if (!tr)
7188 return -ENODEV;
7189
77fd5c15
SRRH
7190 /* hash funcs only work with set_ftrace_filter */
7191 if (!enable)
7192 return -EINVAL;
7193
7194 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7195
d3d532d7 7196 if (glob[0] == '!')
7b60f3d8 7197 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
7198
7199 if (!param)
7200 goto out_reg;
7201
7202 number = strsep(&param, ":");
7203
7204 if (!strlen(number))
7205 goto out_reg;
7206
7207 /*
7208 * We use the callback data field (which is a pointer)
7209 * as our counter.
7210 */
7211 ret = kstrtoul(number, 0, (unsigned long *)&count);
7212 if (ret)
7213 return ret;
7214
7215 out_reg:
2824f503 7216 ret = tracing_alloc_snapshot_instance(tr);
df62db5b
SRV
7217 if (ret < 0)
7218 goto out;
77fd5c15 7219
4c174688 7220 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 7221
df62db5b 7222 out:
77fd5c15
SRRH
7223 return ret < 0 ? ret : 0;
7224}
7225
7226static struct ftrace_func_command ftrace_snapshot_cmd = {
7227 .name = "snapshot",
7228 .func = ftrace_trace_snapshot_callback,
7229};
7230
38de93ab 7231static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
7232{
7233 return register_ftrace_command(&ftrace_snapshot_cmd);
7234}
7235#else
38de93ab 7236static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 7237#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 7238
7eeafbca 7239static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 7240{
8434dc93
SRRH
7241 if (WARN_ON(!tr->dir))
7242 return ERR_PTR(-ENODEV);
7243
7244 /* Top directory uses NULL as the parent */
7245 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7246 return NULL;
7247
7248 /* All sub buffers have a descriptor */
2b6080f2 7249 return tr->dir;
bc0c38d1
SR
7250}
7251
2b6080f2 7252static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 7253{
b04cc6b1
FW
7254 struct dentry *d_tracer;
7255
2b6080f2
SR
7256 if (tr->percpu_dir)
7257 return tr->percpu_dir;
b04cc6b1 7258
7eeafbca 7259 d_tracer = tracing_get_dentry(tr);
14a5ae40 7260 if (IS_ERR(d_tracer))
b04cc6b1
FW
7261 return NULL;
7262
8434dc93 7263 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 7264
2b6080f2 7265 WARN_ONCE(!tr->percpu_dir,
8434dc93 7266 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 7267
2b6080f2 7268 return tr->percpu_dir;
b04cc6b1
FW
7269}
7270
649e9c70
ON
7271static struct dentry *
7272trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7273 void *data, long cpu, const struct file_operations *fops)
7274{
7275 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7276
7277 if (ret) /* See tracing_get_cpu() */
7682c918 7278 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
7279 return ret;
7280}
7281
2b6080f2 7282static void
8434dc93 7283tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 7284{
2b6080f2 7285 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 7286 struct dentry *d_cpu;
dd49a38c 7287 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 7288
0a3d7ce7
NK
7289 if (!d_percpu)
7290 return;
7291
dd49a38c 7292 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 7293 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 7294 if (!d_cpu) {
a395d6a7 7295 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
7296 return;
7297 }
b04cc6b1 7298
8656e7a2 7299 /* per cpu trace_pipe */
649e9c70 7300 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 7301 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
7302
7303 /* per cpu trace */
649e9c70 7304 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 7305 tr, cpu, &tracing_fops);
7f96f93f 7306
649e9c70 7307 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 7308 tr, cpu, &tracing_buffers_fops);
7f96f93f 7309
649e9c70 7310 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 7311 tr, cpu, &tracing_stats_fops);
438ced17 7312
649e9c70 7313 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 7314 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
7315
7316#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 7317 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 7318 tr, cpu, &snapshot_fops);
6de58e62 7319
649e9c70 7320 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 7321 tr, cpu, &snapshot_raw_fops);
f1affcaa 7322#endif
b04cc6b1
FW
7323}
7324
60a11774
SR
7325#ifdef CONFIG_FTRACE_SELFTEST
7326/* Let selftest have access to static functions in this file */
7327#include "trace_selftest.c"
7328#endif
7329
577b785f
SR
7330static ssize_t
7331trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7332 loff_t *ppos)
7333{
7334 struct trace_option_dentry *topt = filp->private_data;
7335 char *buf;
7336
7337 if (topt->flags->val & topt->opt->bit)
7338 buf = "1\n";
7339 else
7340 buf = "0\n";
7341
7342 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7343}
7344
7345static ssize_t
7346trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7347 loff_t *ppos)
7348{
7349 struct trace_option_dentry *topt = filp->private_data;
7350 unsigned long val;
577b785f
SR
7351 int ret;
7352
22fe9b54
PH
7353 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7354 if (ret)
577b785f
SR
7355 return ret;
7356
8d18eaaf
LZ
7357 if (val != 0 && val != 1)
7358 return -EINVAL;
577b785f 7359
8d18eaaf 7360 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7361 mutex_lock(&trace_types_lock);
8c1a49ae 7362 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7363 topt->opt, !val);
577b785f
SR
7364 mutex_unlock(&trace_types_lock);
7365 if (ret)
7366 return ret;
577b785f
SR
7367 }
7368
7369 *ppos += cnt;
7370
7371 return cnt;
7372}
7373
7374
7375static const struct file_operations trace_options_fops = {
7376 .open = tracing_open_generic,
7377 .read = trace_options_read,
7378 .write = trace_options_write,
b444786f 7379 .llseek = generic_file_llseek,
577b785f
SR
7380};
7381
9a38a885
SRRH
7382/*
7383 * In order to pass in both the trace_array descriptor as well as the index
7384 * to the flag that the trace option file represents, the trace_array
7385 * has a character array of trace_flags_index[], which holds the index
7386 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7387 * The address of this character array is passed to the flag option file
7388 * read/write callbacks.
7389 *
7390 * In order to extract both the index and the trace_array descriptor,
7391 * get_tr_index() uses the following algorithm.
7392 *
7393 * idx = *ptr;
7394 *
7395 * As the pointer itself contains the address of the index (remember
7396 * index[1] == 1).
7397 *
7398 * Then to get the trace_array descriptor, by subtracting that index
7399 * from the ptr, we get to the start of the index itself.
7400 *
7401 * ptr - idx == &index[0]
7402 *
7403 * Then a simple container_of() from that pointer gets us to the
7404 * trace_array descriptor.
7405 */
7406static void get_tr_index(void *data, struct trace_array **ptr,
7407 unsigned int *pindex)
7408{
7409 *pindex = *(unsigned char *)data;
7410
7411 *ptr = container_of(data - *pindex, struct trace_array,
7412 trace_flags_index);
7413}
7414
a8259075
SR
7415static ssize_t
7416trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7417 loff_t *ppos)
7418{
9a38a885
SRRH
7419 void *tr_index = filp->private_data;
7420 struct trace_array *tr;
7421 unsigned int index;
a8259075
SR
7422 char *buf;
7423
9a38a885
SRRH
7424 get_tr_index(tr_index, &tr, &index);
7425
7426 if (tr->trace_flags & (1 << index))
a8259075
SR
7427 buf = "1\n";
7428 else
7429 buf = "0\n";
7430
7431 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7432}
7433
7434static ssize_t
7435trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7436 loff_t *ppos)
7437{
9a38a885
SRRH
7438 void *tr_index = filp->private_data;
7439 struct trace_array *tr;
7440 unsigned int index;
a8259075
SR
7441 unsigned long val;
7442 int ret;
7443
9a38a885
SRRH
7444 get_tr_index(tr_index, &tr, &index);
7445
22fe9b54
PH
7446 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7447 if (ret)
a8259075
SR
7448 return ret;
7449
f2d84b65 7450 if (val != 0 && val != 1)
a8259075 7451 return -EINVAL;
69d34da2
SRRH
7452
7453 mutex_lock(&trace_types_lock);
2b6080f2 7454 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7455 mutex_unlock(&trace_types_lock);
a8259075 7456
613f04a0
SRRH
7457 if (ret < 0)
7458 return ret;
7459
a8259075
SR
7460 *ppos += cnt;
7461
7462 return cnt;
7463}
7464
a8259075
SR
7465static const struct file_operations trace_options_core_fops = {
7466 .open = tracing_open_generic,
7467 .read = trace_options_core_read,
7468 .write = trace_options_core_write,
b444786f 7469 .llseek = generic_file_llseek,
a8259075
SR
7470};
7471
5452af66 7472struct dentry *trace_create_file(const char *name,
f4ae40a6 7473 umode_t mode,
5452af66
FW
7474 struct dentry *parent,
7475 void *data,
7476 const struct file_operations *fops)
7477{
7478 struct dentry *ret;
7479
8434dc93 7480 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7481 if (!ret)
a395d6a7 7482 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7483
7484 return ret;
7485}
7486
7487
2b6080f2 7488static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7489{
7490 struct dentry *d_tracer;
a8259075 7491
2b6080f2
SR
7492 if (tr->options)
7493 return tr->options;
a8259075 7494
7eeafbca 7495 d_tracer = tracing_get_dentry(tr);
14a5ae40 7496 if (IS_ERR(d_tracer))
a8259075
SR
7497 return NULL;
7498
8434dc93 7499 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7500 if (!tr->options) {
a395d6a7 7501 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7502 return NULL;
7503 }
7504
2b6080f2 7505 return tr->options;
a8259075
SR
7506}
7507
577b785f 7508static void
2b6080f2
SR
7509create_trace_option_file(struct trace_array *tr,
7510 struct trace_option_dentry *topt,
577b785f
SR
7511 struct tracer_flags *flags,
7512 struct tracer_opt *opt)
7513{
7514 struct dentry *t_options;
577b785f 7515
2b6080f2 7516 t_options = trace_options_init_dentry(tr);
577b785f
SR
7517 if (!t_options)
7518 return;
7519
7520 topt->flags = flags;
7521 topt->opt = opt;
2b6080f2 7522 topt->tr = tr;
577b785f 7523
5452af66 7524 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7525 &trace_options_fops);
7526
577b785f
SR
7527}
7528
37aea98b 7529static void
2b6080f2 7530create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7531{
7532 struct trace_option_dentry *topts;
37aea98b 7533 struct trace_options *tr_topts;
577b785f
SR
7534 struct tracer_flags *flags;
7535 struct tracer_opt *opts;
7536 int cnt;
37aea98b 7537 int i;
577b785f
SR
7538
7539 if (!tracer)
37aea98b 7540 return;
577b785f
SR
7541
7542 flags = tracer->flags;
7543
7544 if (!flags || !flags->opts)
37aea98b
SRRH
7545 return;
7546
7547 /*
7548 * If this is an instance, only create flags for tracers
7549 * the instance may have.
7550 */
7551 if (!trace_ok_for_array(tracer, tr))
7552 return;
7553
7554 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7555 /* Make sure there's no duplicate flags. */
7556 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7557 return;
7558 }
577b785f
SR
7559
7560 opts = flags->opts;
7561
7562 for (cnt = 0; opts[cnt].name; cnt++)
7563 ;
7564
0cfe8245 7565 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7566 if (!topts)
37aea98b
SRRH
7567 return;
7568
7569 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7570 GFP_KERNEL);
7571 if (!tr_topts) {
7572 kfree(topts);
7573 return;
7574 }
7575
7576 tr->topts = tr_topts;
7577 tr->topts[tr->nr_topts].tracer = tracer;
7578 tr->topts[tr->nr_topts].topts = topts;
7579 tr->nr_topts++;
577b785f 7580
41d9c0be 7581 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7582 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7583 &opts[cnt]);
41d9c0be
SRRH
7584 WARN_ONCE(topts[cnt].entry == NULL,
7585 "Failed to create trace option: %s",
7586 opts[cnt].name);
7587 }
577b785f
SR
7588}
7589
a8259075 7590static struct dentry *
2b6080f2
SR
7591create_trace_option_core_file(struct trace_array *tr,
7592 const char *option, long index)
a8259075
SR
7593{
7594 struct dentry *t_options;
a8259075 7595
2b6080f2 7596 t_options = trace_options_init_dentry(tr);
a8259075
SR
7597 if (!t_options)
7598 return NULL;
7599
9a38a885
SRRH
7600 return trace_create_file(option, 0644, t_options,
7601 (void *)&tr->trace_flags_index[index],
7602 &trace_options_core_fops);
a8259075
SR
7603}
7604
16270145 7605static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7606{
7607 struct dentry *t_options;
16270145 7608 bool top_level = tr == &global_trace;
a8259075
SR
7609 int i;
7610
2b6080f2 7611 t_options = trace_options_init_dentry(tr);
a8259075
SR
7612 if (!t_options)
7613 return;
7614
16270145
SRRH
7615 for (i = 0; trace_options[i]; i++) {
7616 if (top_level ||
7617 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7618 create_trace_option_core_file(tr, trace_options[i], i);
7619 }
a8259075
SR
7620}
7621
499e5470
SR
7622static ssize_t
7623rb_simple_read(struct file *filp, char __user *ubuf,
7624 size_t cnt, loff_t *ppos)
7625{
348f0fc2 7626 struct trace_array *tr = filp->private_data;
499e5470
SR
7627 char buf[64];
7628 int r;
7629
10246fa3 7630 r = tracer_tracing_is_on(tr);
499e5470
SR
7631 r = sprintf(buf, "%d\n", r);
7632
7633 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7634}
7635
7636static ssize_t
7637rb_simple_write(struct file *filp, const char __user *ubuf,
7638 size_t cnt, loff_t *ppos)
7639{
348f0fc2 7640 struct trace_array *tr = filp->private_data;
12883efb 7641 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7642 unsigned long val;
7643 int ret;
7644
7645 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7646 if (ret)
7647 return ret;
7648
7649 if (buffer) {
2df8f8a6 7650 mutex_lock(&trace_types_lock);
f143641b
SRV
7651 if (!!val == tracer_tracing_is_on(tr)) {
7652 val = 0; /* do nothing */
7653 } else if (val) {
10246fa3 7654 tracer_tracing_on(tr);
2b6080f2
SR
7655 if (tr->current_trace->start)
7656 tr->current_trace->start(tr);
2df8f8a6 7657 } else {
10246fa3 7658 tracer_tracing_off(tr);
2b6080f2
SR
7659 if (tr->current_trace->stop)
7660 tr->current_trace->stop(tr);
2df8f8a6
SR
7661 }
7662 mutex_unlock(&trace_types_lock);
499e5470
SR
7663 }
7664
7665 (*ppos)++;
7666
7667 return cnt;
7668}
7669
7670static const struct file_operations rb_simple_fops = {
7b85af63 7671 .open = tracing_open_generic_tr,
499e5470
SR
7672 .read = rb_simple_read,
7673 .write = rb_simple_write,
7b85af63 7674 .release = tracing_release_generic_tr,
499e5470
SR
7675 .llseek = default_llseek,
7676};
7677
03329f99
SRV
7678static ssize_t
7679buffer_percent_read(struct file *filp, char __user *ubuf,
7680 size_t cnt, loff_t *ppos)
7681{
7682 struct trace_array *tr = filp->private_data;
7683 char buf[64];
7684 int r;
7685
7686 r = tr->buffer_percent;
7687 r = sprintf(buf, "%d\n", r);
7688
7689 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7690}
7691
7692static ssize_t
7693buffer_percent_write(struct file *filp, const char __user *ubuf,
7694 size_t cnt, loff_t *ppos)
7695{
7696 struct trace_array *tr = filp->private_data;
7697 unsigned long val;
7698 int ret;
7699
7700 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7701 if (ret)
7702 return ret;
7703
7704 if (val > 100)
7705 return -EINVAL;
7706
7707 if (!val)
7708 val = 1;
7709
7710 tr->buffer_percent = val;
7711
7712 (*ppos)++;
7713
7714 return cnt;
7715}
7716
7717static const struct file_operations buffer_percent_fops = {
7718 .open = tracing_open_generic_tr,
7719 .read = buffer_percent_read,
7720 .write = buffer_percent_write,
7721 .release = tracing_release_generic_tr,
7722 .llseek = default_llseek,
7723};
7724
277ba044
SR
7725struct dentry *trace_instance_dir;
7726
7727static void
8434dc93 7728init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7729
55034cd6
SRRH
7730static int
7731allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7732{
7733 enum ring_buffer_flags rb_flags;
737223fb 7734
983f938a 7735 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7736
dced341b
SRRH
7737 buf->tr = tr;
7738
55034cd6
SRRH
7739 buf->buffer = ring_buffer_alloc(size, rb_flags);
7740 if (!buf->buffer)
7741 return -ENOMEM;
737223fb 7742
55034cd6
SRRH
7743 buf->data = alloc_percpu(struct trace_array_cpu);
7744 if (!buf->data) {
7745 ring_buffer_free(buf->buffer);
4397f045 7746 buf->buffer = NULL;
55034cd6
SRRH
7747 return -ENOMEM;
7748 }
737223fb 7749
737223fb
SRRH
7750 /* Allocate the first page for all buffers */
7751 set_buffer_entries(&tr->trace_buffer,
7752 ring_buffer_size(tr->trace_buffer.buffer, 0));
7753
55034cd6
SRRH
7754 return 0;
7755}
737223fb 7756
55034cd6
SRRH
7757static int allocate_trace_buffers(struct trace_array *tr, int size)
7758{
7759 int ret;
737223fb 7760
55034cd6
SRRH
7761 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7762 if (ret)
7763 return ret;
737223fb 7764
55034cd6
SRRH
7765#ifdef CONFIG_TRACER_MAX_TRACE
7766 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7767 allocate_snapshot ? size : 1);
7768 if (WARN_ON(ret)) {
737223fb 7769 ring_buffer_free(tr->trace_buffer.buffer);
24f2aaf9 7770 tr->trace_buffer.buffer = NULL;
55034cd6 7771 free_percpu(tr->trace_buffer.data);
24f2aaf9 7772 tr->trace_buffer.data = NULL;
55034cd6
SRRH
7773 return -ENOMEM;
7774 }
7775 tr->allocated_snapshot = allocate_snapshot;
737223fb 7776
55034cd6
SRRH
7777 /*
7778 * Only the top level trace array gets its snapshot allocated
7779 * from the kernel command line.
7780 */
7781 allocate_snapshot = false;
737223fb 7782#endif
55034cd6 7783 return 0;
737223fb
SRRH
7784}
7785
f0b70cc4
SRRH
7786static void free_trace_buffer(struct trace_buffer *buf)
7787{
7788 if (buf->buffer) {
7789 ring_buffer_free(buf->buffer);
7790 buf->buffer = NULL;
7791 free_percpu(buf->data);
7792 buf->data = NULL;
7793 }
7794}
7795
23aaa3c1
SRRH
7796static void free_trace_buffers(struct trace_array *tr)
7797{
7798 if (!tr)
7799 return;
7800
f0b70cc4 7801 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7802
7803#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7804 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7805#endif
7806}
7807
9a38a885
SRRH
7808static void init_trace_flags_index(struct trace_array *tr)
7809{
7810 int i;
7811
7812 /* Used by the trace options files */
7813 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7814 tr->trace_flags_index[i] = i;
7815}
7816
37aea98b
SRRH
7817static void __update_tracer_options(struct trace_array *tr)
7818{
7819 struct tracer *t;
7820
7821 for (t = trace_types; t; t = t->next)
7822 add_tracer_options(tr, t);
7823}
7824
7825static void update_tracer_options(struct trace_array *tr)
7826{
7827 mutex_lock(&trace_types_lock);
7828 __update_tracer_options(tr);
7829 mutex_unlock(&trace_types_lock);
7830}
7831
eae47358 7832static int instance_mkdir(const char *name)
737223fb 7833{
277ba044
SR
7834 struct trace_array *tr;
7835 int ret;
277ba044 7836
12ecef0c 7837 mutex_lock(&event_mutex);
277ba044
SR
7838 mutex_lock(&trace_types_lock);
7839
7840 ret = -EEXIST;
7841 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7842 if (tr->name && strcmp(tr->name, name) == 0)
7843 goto out_unlock;
7844 }
7845
7846 ret = -ENOMEM;
7847 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7848 if (!tr)
7849 goto out_unlock;
7850
7851 tr->name = kstrdup(name, GFP_KERNEL);
7852 if (!tr->name)
7853 goto out_free_tr;
7854
ccfe9e42
AL
7855 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7856 goto out_free_tr;
7857
20550622 7858 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7859
ccfe9e42
AL
7860 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7861
277ba044
SR
7862 raw_spin_lock_init(&tr->start_lock);
7863
0b9b12c1
SRRH
7864 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7865
277ba044
SR
7866 tr->current_trace = &nop_trace;
7867
7868 INIT_LIST_HEAD(&tr->systems);
7869 INIT_LIST_HEAD(&tr->events);
067fe038 7870 INIT_LIST_HEAD(&tr->hist_vars);
277ba044 7871
737223fb 7872 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7873 goto out_free_tr;
7874
8434dc93 7875 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7876 if (!tr->dir)
7877 goto out_free_tr;
7878
7879 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7880 if (ret) {
8434dc93 7881 tracefs_remove_recursive(tr->dir);
277ba044 7882 goto out_free_tr;
609e85a7 7883 }
277ba044 7884
04ec7bb6
SRV
7885 ftrace_init_trace_array(tr);
7886
8434dc93 7887 init_tracer_tracefs(tr, tr->dir);
9a38a885 7888 init_trace_flags_index(tr);
37aea98b 7889 __update_tracer_options(tr);
277ba044
SR
7890
7891 list_add(&tr->list, &ftrace_trace_arrays);
7892
7893 mutex_unlock(&trace_types_lock);
12ecef0c 7894 mutex_unlock(&event_mutex);
277ba044
SR
7895
7896 return 0;
7897
7898 out_free_tr:
23aaa3c1 7899 free_trace_buffers(tr);
ccfe9e42 7900 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7901 kfree(tr->name);
7902 kfree(tr);
7903
7904 out_unlock:
7905 mutex_unlock(&trace_types_lock);
12ecef0c 7906 mutex_unlock(&event_mutex);
277ba044
SR
7907
7908 return ret;
7909
7910}
7911
eae47358 7912static int instance_rmdir(const char *name)
0c8916c3
SR
7913{
7914 struct trace_array *tr;
7915 int found = 0;
7916 int ret;
37aea98b 7917 int i;
0c8916c3 7918
12ecef0c 7919 mutex_lock(&event_mutex);
0c8916c3
SR
7920 mutex_lock(&trace_types_lock);
7921
7922 ret = -ENODEV;
7923 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7924 if (tr->name && strcmp(tr->name, name) == 0) {
7925 found = 1;
7926 break;
7927 }
7928 }
7929 if (!found)
7930 goto out_unlock;
7931
a695cb58 7932 ret = -EBUSY;
cf6ab6d9 7933 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7934 goto out_unlock;
7935
0c8916c3
SR
7936 list_del(&tr->list);
7937
20550622
SRRH
7938 /* Disable all the flags that were enabled coming in */
7939 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7940 if ((1 << i) & ZEROED_TRACE_FLAGS)
7941 set_tracer_flag(tr, 1 << i, 0);
7942 }
7943
6b450d25 7944 tracing_set_nop(tr);
a0e6369e 7945 clear_ftrace_function_probes(tr);
0c8916c3 7946 event_trace_del_tracer(tr);
d879d0b8 7947 ftrace_clear_pids(tr);
591dffda 7948 ftrace_destroy_function_files(tr);
681a4a2f 7949 tracefs_remove_recursive(tr->dir);
a9fcaaac 7950 free_trace_buffers(tr);
0c8916c3 7951
37aea98b
SRRH
7952 for (i = 0; i < tr->nr_topts; i++) {
7953 kfree(tr->topts[i].topts);
7954 }
7955 kfree(tr->topts);
7956
db9108e0 7957 free_cpumask_var(tr->tracing_cpumask);
0c8916c3
SR
7958 kfree(tr->name);
7959 kfree(tr);
7960
7961 ret = 0;
7962
7963 out_unlock:
7964 mutex_unlock(&trace_types_lock);
12ecef0c 7965 mutex_unlock(&event_mutex);
0c8916c3
SR
7966
7967 return ret;
7968}
7969
277ba044
SR
7970static __init void create_trace_instances(struct dentry *d_tracer)
7971{
eae47358
SRRH
7972 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7973 instance_mkdir,
7974 instance_rmdir);
277ba044
SR
7975 if (WARN_ON(!trace_instance_dir))
7976 return;
277ba044
SR
7977}
7978
2b6080f2 7979static void
8434dc93 7980init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7981{
3dd80953 7982 struct trace_event_file *file;
121aaee7 7983 int cpu;
2b6080f2 7984
607e2ea1
SRRH
7985 trace_create_file("available_tracers", 0444, d_tracer,
7986 tr, &show_traces_fops);
7987
7988 trace_create_file("current_tracer", 0644, d_tracer,
7989 tr, &set_tracer_fops);
7990
ccfe9e42
AL
7991 trace_create_file("tracing_cpumask", 0644, d_tracer,
7992 tr, &tracing_cpumask_fops);
7993
2b6080f2
SR
7994 trace_create_file("trace_options", 0644, d_tracer,
7995 tr, &tracing_iter_fops);
7996
7997 trace_create_file("trace", 0644, d_tracer,
6484c71c 7998 tr, &tracing_fops);
2b6080f2
SR
7999
8000 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 8001 tr, &tracing_pipe_fops);
2b6080f2
SR
8002
8003 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 8004 tr, &tracing_entries_fops);
2b6080f2
SR
8005
8006 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8007 tr, &tracing_total_entries_fops);
8008
238ae93d 8009 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
8010 tr, &tracing_free_buffer_fops);
8011
8012 trace_create_file("trace_marker", 0220, d_tracer,
8013 tr, &tracing_mark_fops);
8014
3dd80953
SRV
8015 file = __find_event_file(tr, "ftrace", "print");
8016 if (file && file->dir)
8017 trace_create_file("trigger", 0644, file->dir, file,
8018 &event_trigger_fops);
8019 tr->trace_marker_file = file;
8020
fa32e855
SR
8021 trace_create_file("trace_marker_raw", 0220, d_tracer,
8022 tr, &tracing_mark_raw_fops);
8023
2b6080f2
SR
8024 trace_create_file("trace_clock", 0644, d_tracer, tr,
8025 &trace_clock_fops);
8026
8027 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 8028 tr, &rb_simple_fops);
ce9bae55 8029
2c1ea60b
TZ
8030 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8031 &trace_time_stamp_mode_fops);
8032
a7b1d74e 8033 tr->buffer_percent = 50;
03329f99
SRV
8034
8035 trace_create_file("buffer_percent", 0444, d_tracer,
8036 tr, &buffer_percent_fops);
8037
16270145
SRRH
8038 create_trace_options_dir(tr);
8039
f971cc9a 8040#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
8041 trace_create_file("tracing_max_latency", 0644, d_tracer,
8042 &tr->max_latency, &tracing_max_lat_fops);
8043#endif
8044
591dffda
SRRH
8045 if (ftrace_create_function_files(tr, d_tracer))
8046 WARN(1, "Could not allocate function filter files");
8047
ce9bae55
SRRH
8048#ifdef CONFIG_TRACER_SNAPSHOT
8049 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 8050 tr, &snapshot_fops);
ce9bae55 8051#endif
121aaee7
SRRH
8052
8053 for_each_tracing_cpu(cpu)
8434dc93 8054 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 8055
345ddcc8 8056 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
8057}
8058
93faccbb 8059static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
8060{
8061 struct vfsmount *mnt;
8062 struct file_system_type *type;
8063
8064 /*
8065 * To maintain backward compatibility for tools that mount
8066 * debugfs to get to the tracing facility, tracefs is automatically
8067 * mounted to the debugfs/tracing directory.
8068 */
8069 type = get_fs_type("tracefs");
8070 if (!type)
8071 return NULL;
93faccbb 8072 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
8073 put_filesystem(type);
8074 if (IS_ERR(mnt))
8075 return NULL;
8076 mntget(mnt);
8077
8078 return mnt;
8079}
8080
7eeafbca
SRRH
8081/**
8082 * tracing_init_dentry - initialize top level trace array
8083 *
8084 * This is called when creating files or directories in the tracing
8085 * directory. It is called via fs_initcall() by any of the boot up code
8086 * and expects to return the dentry of the top level tracing directory.
8087 */
8088struct dentry *tracing_init_dentry(void)
8089{
8090 struct trace_array *tr = &global_trace;
8091
f76180bc 8092 /* The top level trace array uses NULL as parent */
7eeafbca 8093 if (tr->dir)
f76180bc 8094 return NULL;
7eeafbca 8095
8b129199
JW
8096 if (WARN_ON(!tracefs_initialized()) ||
8097 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8098 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
8099 return ERR_PTR(-ENODEV);
8100
f76180bc
SRRH
8101 /*
8102 * As there may still be users that expect the tracing
8103 * files to exist in debugfs/tracing, we must automount
8104 * the tracefs file system there, so older tools still
8105 * work with the newer kerenl.
8106 */
8107 tr->dir = debugfs_create_automount("tracing", NULL,
8108 trace_automount, NULL);
7eeafbca
SRRH
8109 if (!tr->dir) {
8110 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8111 return ERR_PTR(-ENOMEM);
8112 }
8113
8434dc93 8114 return NULL;
7eeafbca
SRRH
8115}
8116
00f4b652
JL
8117extern struct trace_eval_map *__start_ftrace_eval_maps[];
8118extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 8119
5f60b351 8120static void __init trace_eval_init(void)
0c564a53 8121{
3673b8e4
SRRH
8122 int len;
8123
02fd7f68 8124 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 8125 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
8126}
8127
8128#ifdef CONFIG_MODULES
f57a4143 8129static void trace_module_add_evals(struct module *mod)
3673b8e4 8130{
99be647c 8131 if (!mod->num_trace_evals)
3673b8e4
SRRH
8132 return;
8133
8134 /*
8135 * Modules with bad taint do not have events created, do
8136 * not bother with enums either.
8137 */
8138 if (trace_module_has_bad_taint(mod))
8139 return;
8140
f57a4143 8141 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
8142}
8143
681bec03 8144#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 8145static void trace_module_remove_evals(struct module *mod)
9828413d 8146{
23bf8cb8
JL
8147 union trace_eval_map_item *map;
8148 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 8149
99be647c 8150 if (!mod->num_trace_evals)
9828413d
SRRH
8151 return;
8152
1793ed93 8153 mutex_lock(&trace_eval_mutex);
9828413d 8154
23bf8cb8 8155 map = trace_eval_maps;
9828413d
SRRH
8156
8157 while (map) {
8158 if (map->head.mod == mod)
8159 break;
5f60b351 8160 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
8161 last = &map->tail.next;
8162 map = map->tail.next;
8163 }
8164 if (!map)
8165 goto out;
8166
5f60b351 8167 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
8168 kfree(map);
8169 out:
1793ed93 8170 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
8171}
8172#else
f57a4143 8173static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 8174#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 8175
3673b8e4
SRRH
8176static int trace_module_notify(struct notifier_block *self,
8177 unsigned long val, void *data)
8178{
8179 struct module *mod = data;
8180
8181 switch (val) {
8182 case MODULE_STATE_COMING:
f57a4143 8183 trace_module_add_evals(mod);
3673b8e4 8184 break;
9828413d 8185 case MODULE_STATE_GOING:
f57a4143 8186 trace_module_remove_evals(mod);
9828413d 8187 break;
3673b8e4
SRRH
8188 }
8189
8190 return 0;
0c564a53
SRRH
8191}
8192
3673b8e4
SRRH
8193static struct notifier_block trace_module_nb = {
8194 .notifier_call = trace_module_notify,
8195 .priority = 0,
8196};
9828413d 8197#endif /* CONFIG_MODULES */
3673b8e4 8198
8434dc93 8199static __init int tracer_init_tracefs(void)
bc0c38d1
SR
8200{
8201 struct dentry *d_tracer;
bc0c38d1 8202
7e53bd42
LJ
8203 trace_access_lock_init();
8204
bc0c38d1 8205 d_tracer = tracing_init_dentry();
14a5ae40 8206 if (IS_ERR(d_tracer))
ed6f1c99 8207 return 0;
bc0c38d1 8208
58b92547
SRV
8209 event_trace_init();
8210
8434dc93 8211 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 8212 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 8213
5452af66 8214 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 8215 &global_trace, &tracing_thresh_fops);
a8259075 8216
339ae5d3 8217 trace_create_file("README", 0444, d_tracer,
5452af66
FW
8218 NULL, &tracing_readme_fops);
8219
69abe6a5
AP
8220 trace_create_file("saved_cmdlines", 0444, d_tracer,
8221 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 8222
939c7a4f
YY
8223 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8224 NULL, &tracing_saved_cmdlines_size_fops);
8225
99c621d7
MS
8226 trace_create_file("saved_tgids", 0444, d_tracer,
8227 NULL, &tracing_saved_tgids_fops);
8228
5f60b351 8229 trace_eval_init();
0c564a53 8230
f57a4143 8231 trace_create_eval_file(d_tracer);
9828413d 8232
3673b8e4
SRRH
8233#ifdef CONFIG_MODULES
8234 register_module_notifier(&trace_module_nb);
8235#endif
8236
bc0c38d1 8237#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
8238 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8239 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 8240#endif
b04cc6b1 8241
277ba044 8242 create_trace_instances(d_tracer);
5452af66 8243
37aea98b 8244 update_tracer_options(&global_trace);
09d23a1d 8245
b5ad384e 8246 return 0;
bc0c38d1
SR
8247}
8248
3f5a54e3
SR
8249static int trace_panic_handler(struct notifier_block *this,
8250 unsigned long event, void *unused)
8251{
944ac425 8252 if (ftrace_dump_on_oops)
cecbca96 8253 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8254 return NOTIFY_OK;
8255}
8256
8257static struct notifier_block trace_panic_notifier = {
8258 .notifier_call = trace_panic_handler,
8259 .next = NULL,
8260 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8261};
8262
8263static int trace_die_handler(struct notifier_block *self,
8264 unsigned long val,
8265 void *data)
8266{
8267 switch (val) {
8268 case DIE_OOPS:
944ac425 8269 if (ftrace_dump_on_oops)
cecbca96 8270 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8271 break;
8272 default:
8273 break;
8274 }
8275 return NOTIFY_OK;
8276}
8277
8278static struct notifier_block trace_die_notifier = {
8279 .notifier_call = trace_die_handler,
8280 .priority = 200
8281};
8282
8283/*
8284 * printk is set to max of 1024, we really don't need it that big.
8285 * Nothing should be printing 1000 characters anyway.
8286 */
8287#define TRACE_MAX_PRINT 1000
8288
8289/*
8290 * Define here KERN_TRACE so that we have one place to modify
8291 * it if we decide to change what log level the ftrace dump
8292 * should be at.
8293 */
428aee14 8294#define KERN_TRACE KERN_EMERG
3f5a54e3 8295
955b61e5 8296void
3f5a54e3
SR
8297trace_printk_seq(struct trace_seq *s)
8298{
8299 /* Probably should print a warning here. */
3a161d99
SRRH
8300 if (s->seq.len >= TRACE_MAX_PRINT)
8301 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 8302
820b75f6
SRRH
8303 /*
8304 * More paranoid code. Although the buffer size is set to
8305 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8306 * an extra layer of protection.
8307 */
8308 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8309 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
8310
8311 /* should be zero ended, but we are paranoid. */
3a161d99 8312 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
8313
8314 printk(KERN_TRACE "%s", s->buffer);
8315
f9520750 8316 trace_seq_init(s);
3f5a54e3
SR
8317}
8318
955b61e5
JW
8319void trace_init_global_iter(struct trace_iterator *iter)
8320{
8321 iter->tr = &global_trace;
2b6080f2 8322 iter->trace = iter->tr->current_trace;
ae3b5093 8323 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 8324 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
8325
8326 if (iter->trace && iter->trace->open)
8327 iter->trace->open(iter);
8328
8329 /* Annotate start of buffers if we had overruns */
8330 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8331 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8332
8333 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8334 if (trace_clocks[iter->tr->clock_id].in_ns)
8335 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
8336}
8337
7fe70b57 8338void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 8339{
3f5a54e3
SR
8340 /* use static because iter can be a bit big for the stack */
8341 static struct trace_iterator iter;
7fe70b57 8342 static atomic_t dump_running;
983f938a 8343 struct trace_array *tr = &global_trace;
cf586b61 8344 unsigned int old_userobj;
d769041f
SR
8345 unsigned long flags;
8346 int cnt = 0, cpu;
3f5a54e3 8347
7fe70b57
SRRH
8348 /* Only allow one dump user at a time. */
8349 if (atomic_inc_return(&dump_running) != 1) {
8350 atomic_dec(&dump_running);
8351 return;
8352 }
3f5a54e3 8353
7fe70b57
SRRH
8354 /*
8355 * Always turn off tracing when we dump.
8356 * We don't need to show trace output of what happens
8357 * between multiple crashes.
8358 *
8359 * If the user does a sysrq-z, then they can re-enable
8360 * tracing with echo 1 > tracing_on.
8361 */
0ee6b6cf 8362 tracing_off();
cf586b61 8363
7fe70b57 8364 local_irq_save(flags);
03fc7f9c 8365 printk_nmi_direct_enter();
3f5a54e3 8366
38dbe0b1 8367 /* Simulate the iterator */
955b61e5
JW
8368 trace_init_global_iter(&iter);
8369
d769041f 8370 for_each_tracing_cpu(cpu) {
5e2d5ef8 8371 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
8372 }
8373
983f938a 8374 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 8375
b54d3de9 8376 /* don't look at user memory in panic mode */
983f938a 8377 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 8378
cecbca96
FW
8379 switch (oops_dump_mode) {
8380 case DUMP_ALL:
ae3b5093 8381 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8382 break;
8383 case DUMP_ORIG:
8384 iter.cpu_file = raw_smp_processor_id();
8385 break;
8386 case DUMP_NONE:
8387 goto out_enable;
8388 default:
8389 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 8390 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8391 }
8392
8393 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 8394
7fe70b57
SRRH
8395 /* Did function tracer already get disabled? */
8396 if (ftrace_is_dead()) {
8397 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8398 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8399 }
8400
3f5a54e3
SR
8401 /*
8402 * We need to stop all tracing on all CPUS to read the
8403 * the next buffer. This is a bit expensive, but is
8404 * not done often. We fill all what we can read,
8405 * and then release the locks again.
8406 */
8407
3f5a54e3
SR
8408 while (!trace_empty(&iter)) {
8409
8410 if (!cnt)
8411 printk(KERN_TRACE "---------------------------------\n");
8412
8413 cnt++;
8414
8415 /* reset all but tr, trace, and overruns */
8416 memset(&iter.seq, 0,
8417 sizeof(struct trace_iterator) -
8418 offsetof(struct trace_iterator, seq));
8419 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8420 iter.pos = -1;
8421
955b61e5 8422 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8423 int ret;
8424
8425 ret = print_trace_line(&iter);
8426 if (ret != TRACE_TYPE_NO_CONSUME)
8427 trace_consume(&iter);
3f5a54e3 8428 }
b892e5c8 8429 touch_nmi_watchdog();
3f5a54e3
SR
8430
8431 trace_printk_seq(&iter.seq);
8432 }
8433
8434 if (!cnt)
8435 printk(KERN_TRACE " (ftrace buffer empty)\n");
8436 else
8437 printk(KERN_TRACE "---------------------------------\n");
8438
cecbca96 8439 out_enable:
983f938a 8440 tr->trace_flags |= old_userobj;
cf586b61 8441
7fe70b57
SRRH
8442 for_each_tracing_cpu(cpu) {
8443 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8444 }
03fc7f9c
PM
8445 atomic_dec(&dump_running);
8446 printk_nmi_direct_exit();
cd891ae0 8447 local_irq_restore(flags);
3f5a54e3 8448}
a8eecf22 8449EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8450
7e465baa
TZ
8451int trace_run_command(const char *buf, int (*createfn)(int, char **))
8452{
8453 char **argv;
8454 int argc, ret;
8455
8456 argc = 0;
8457 ret = 0;
8458 argv = argv_split(GFP_KERNEL, buf, &argc);
8459 if (!argv)
8460 return -ENOMEM;
8461
8462 if (argc)
8463 ret = createfn(argc, argv);
8464
8465 argv_free(argv);
8466
8467 return ret;
8468}
8469
8470#define WRITE_BUFSIZE 4096
8471
8472ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8473 size_t count, loff_t *ppos,
8474 int (*createfn)(int, char **))
8475{
8476 char *kbuf, *buf, *tmp;
8477 int ret = 0;
8478 size_t done = 0;
8479 size_t size;
8480
8481 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8482 if (!kbuf)
8483 return -ENOMEM;
8484
8485 while (done < count) {
8486 size = count - done;
8487
8488 if (size >= WRITE_BUFSIZE)
8489 size = WRITE_BUFSIZE - 1;
8490
8491 if (copy_from_user(kbuf, buffer + done, size)) {
8492 ret = -EFAULT;
8493 goto out;
8494 }
8495 kbuf[size] = '\0';
8496 buf = kbuf;
8497 do {
8498 tmp = strchr(buf, '\n');
8499 if (tmp) {
8500 *tmp = '\0';
8501 size = tmp - buf + 1;
8502 } else {
8503 size = strlen(buf);
8504 if (done + size < count) {
8505 if (buf != kbuf)
8506 break;
8507 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8508 pr_warn("Line length is too long: Should be less than %d\n",
8509 WRITE_BUFSIZE - 2);
8510 ret = -EINVAL;
8511 goto out;
8512 }
8513 }
8514 done += size;
8515
8516 /* Remove comments */
8517 tmp = strchr(buf, '#');
8518
8519 if (tmp)
8520 *tmp = '\0';
8521
8522 ret = trace_run_command(buf, createfn);
8523 if (ret)
8524 goto out;
8525 buf += size;
8526
8527 } while (done < count);
8528 }
8529 ret = done;
8530
8531out:
8532 kfree(kbuf);
8533
8534 return ret;
8535}
8536
3928a8a2 8537__init static int tracer_alloc_buffers(void)
bc0c38d1 8538{
73c5162a 8539 int ring_buf_size;
9e01c1b7 8540 int ret = -ENOMEM;
4c11d7ae 8541
b5e87c05
SRRH
8542 /*
8543 * Make sure we don't accidently add more trace options
8544 * than we have bits for.
8545 */
9a38a885 8546 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8547
9e01c1b7
RR
8548 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8549 goto out;
8550
ccfe9e42 8551 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8552 goto out_free_buffer_mask;
4c11d7ae 8553
07d777fe
SR
8554 /* Only allocate trace_printk buffers if a trace_printk exists */
8555 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8556 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8557 trace_printk_init_buffers();
8558
73c5162a
SR
8559 /* To save memory, keep the ring buffer size to its minimum */
8560 if (ring_buffer_expanded)
8561 ring_buf_size = trace_buf_size;
8562 else
8563 ring_buf_size = 1;
8564
9e01c1b7 8565 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8566 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8567
2b6080f2
SR
8568 raw_spin_lock_init(&global_trace.start_lock);
8569
b32614c0
SAS
8570 /*
8571 * The prepare callbacks allocates some memory for the ring buffer. We
8572 * don't free the buffer if the if the CPU goes down. If we were to free
8573 * the buffer, then the user would lose any trace that was in the
8574 * buffer. The memory will be removed once the "instance" is removed.
8575 */
8576 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8577 "trace/RB:preapre", trace_rb_cpu_prepare,
8578 NULL);
8579 if (ret < 0)
8580 goto out_free_cpumask;
2c4a33ab 8581 /* Used for event triggers */
147d88e0 8582 ret = -ENOMEM;
2c4a33ab
SRRH
8583 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8584 if (!temp_buffer)
b32614c0 8585 goto out_rm_hp_state;
2c4a33ab 8586
939c7a4f
YY
8587 if (trace_create_savedcmd() < 0)
8588 goto out_free_temp_buffer;
8589
9e01c1b7 8590 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8591 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8592 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8593 WARN_ON(1);
939c7a4f 8594 goto out_free_savedcmd;
4c11d7ae 8595 }
a7603ff4 8596
499e5470
SR
8597 if (global_trace.buffer_disabled)
8598 tracing_off();
4c11d7ae 8599
e1e232ca
SR
8600 if (trace_boot_clock) {
8601 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8602 if (ret < 0)
a395d6a7
JP
8603 pr_warn("Trace clock %s not defined, going back to default\n",
8604 trace_boot_clock);
e1e232ca
SR
8605 }
8606
ca164318
SRRH
8607 /*
8608 * register_tracer() might reference current_trace, so it
8609 * needs to be set before we register anything. This is
8610 * just a bootstrap of current_trace anyway.
8611 */
2b6080f2
SR
8612 global_trace.current_trace = &nop_trace;
8613
0b9b12c1
SRRH
8614 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8615
4104d326
SRRH
8616 ftrace_init_global_array_ops(&global_trace);
8617
9a38a885
SRRH
8618 init_trace_flags_index(&global_trace);
8619
ca164318
SRRH
8620 register_tracer(&nop_trace);
8621
dbeafd0d
SRV
8622 /* Function tracing may start here (via kernel command line) */
8623 init_function_trace();
8624
60a11774
SR
8625 /* All seems OK, enable tracing */
8626 tracing_disabled = 0;
3928a8a2 8627
3f5a54e3
SR
8628 atomic_notifier_chain_register(&panic_notifier_list,
8629 &trace_panic_notifier);
8630
8631 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8632
ae63b31e
SR
8633 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8634
8635 INIT_LIST_HEAD(&global_trace.systems);
8636 INIT_LIST_HEAD(&global_trace.events);
067fe038 8637 INIT_LIST_HEAD(&global_trace.hist_vars);
ae63b31e
SR
8638 list_add(&global_trace.list, &ftrace_trace_arrays);
8639
a4d1e688 8640 apply_trace_boot_options();
7bcfaf54 8641
77fd5c15
SRRH
8642 register_snapshot_cmd();
8643
2fc1dfbe 8644 return 0;
3f5a54e3 8645
939c7a4f
YY
8646out_free_savedcmd:
8647 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8648out_free_temp_buffer:
8649 ring_buffer_free(temp_buffer);
b32614c0
SAS
8650out_rm_hp_state:
8651 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8652out_free_cpumask:
ccfe9e42 8653 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8654out_free_buffer_mask:
8655 free_cpumask_var(tracing_buffer_mask);
8656out:
8657 return ret;
bc0c38d1 8658}
b2821ae6 8659
e725c731 8660void __init early_trace_init(void)
5f893b26 8661{
0daa2302
SRRH
8662 if (tracepoint_printk) {
8663 tracepoint_print_iter =
8664 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8665 if (WARN_ON(!tracepoint_print_iter))
8666 tracepoint_printk = 0;
42391745
SRRH
8667 else
8668 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8669 }
5f893b26 8670 tracer_alloc_buffers();
e725c731
SRV
8671}
8672
8673void __init trace_init(void)
8674{
0c564a53 8675 trace_event_init();
5f893b26
SRRH
8676}
8677
b2821ae6
SR
8678__init static int clear_boot_tracer(void)
8679{
8680 /*
8681 * The default tracer at boot buffer is an init section.
8682 * This function is called in lateinit. If we did not
8683 * find the boot tracer, then clear it out, to prevent
8684 * later registration from accessing the buffer that is
8685 * about to be freed.
8686 */
8687 if (!default_bootup_tracer)
8688 return 0;
8689
8690 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8691 default_bootup_tracer);
8692 default_bootup_tracer = NULL;
8693
8694 return 0;
8695}
8696
8434dc93 8697fs_initcall(tracer_init_tracefs);
4bb0f0e7 8698late_initcall_sync(clear_boot_tracer);
3fd49c9e
CW
8699
8700#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8701__init static int tracing_set_default_clock(void)
8702{
8703 /* sched_clock_stable() is determined in late_initcall */
5125eee4 8704 if (!trace_boot_clock && !sched_clock_stable()) {
3fd49c9e
CW
8705 printk(KERN_WARNING
8706 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8707 "If you want to keep using the local clock, then add:\n"
8708 " \"trace_clock=local\"\n"
8709 "on the kernel command line\n");
8710 tracing_set_clock(&global_trace, "global");
8711 }
8712
8713 return 0;
8714}
8715late_initcall_sync(tracing_set_default_clock);
8716#endif