]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
tracing: Fix memory leak in tracing_err_log_open()
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
bc0c38d1
SR
2/*
3 * ring buffer based function tracer
4 *
2b6080f2 5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 13 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 14 */
2cadf913 15#include <linux/ring_buffer.h>
273b281f 16#include <generated/utsrelease.h>
2cadf913
SR
17#include <linux/stacktrace.h>
18#include <linux/writeback.h>
bc0c38d1
SR
19#include <linux/kallsyms.h>
20#include <linux/seq_file.h>
3f5a54e3 21#include <linux/notifier.h>
2cadf913 22#include <linux/irqflags.h>
bc0c38d1 23#include <linux/debugfs.h>
8434dc93 24#include <linux/tracefs.h>
4c11d7ae 25#include <linux/pagemap.h>
bc0c38d1
SR
26#include <linux/hardirq.h>
27#include <linux/linkage.h>
28#include <linux/uaccess.h>
76c813e2 29#include <linux/vmalloc.h>
bc0c38d1
SR
30#include <linux/ftrace.h>
31#include <linux/module.h>
32#include <linux/percpu.h>
2cadf913 33#include <linux/splice.h>
3f5a54e3 34#include <linux/kdebug.h>
5f0c6c03 35#include <linux/string.h>
f76180bc 36#include <linux/mount.h>
7e53bd42 37#include <linux/rwsem.h>
5a0e3ad6 38#include <linux/slab.h>
bc0c38d1
SR
39#include <linux/ctype.h>
40#include <linux/init.h>
2a2cc8f7 41#include <linux/poll.h>
b892e5c8 42#include <linux/nmi.h>
bc0c38d1 43#include <linux/fs.h>
478409dd 44#include <linux/trace.h>
3fd49c9e 45#include <linux/sched/clock.h>
8bd75c77 46#include <linux/sched/rt.h>
86387f7e 47
bc0c38d1 48#include "trace.h"
f0868d1e 49#include "trace_output.h"
bc0c38d1 50
73c5162a
SR
51/*
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
55034cd6 55bool ring_buffer_expanded;
73c5162a 56
8e1b82e0
FW
57/*
58 * We need to change this state when a selftest is running.
ff32504f
FW
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
5e1607a0 61 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
62 * at the same time, giving false positive or negative results.
63 */
8e1b82e0 64static bool __read_mostly tracing_selftest_running;
ff32504f 65
b2821ae6
SR
66/*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
020e5f85 69bool __read_mostly tracing_selftest_disabled;
b2821ae6 70
0daa2302
SRRH
71/* Pipe tracepoints to printk */
72struct trace_iterator *tracepoint_print_iter;
73int tracepoint_printk;
42391745 74static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 75
adf9f195
FW
76/* For tracers that don't implement custom flags */
77static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79};
80
8c1a49ae
SRRH
81static int
82dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
83{
84 return 0;
85}
0f048701 86
7ffbd48d
SR
87/*
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
d914ba37 92static DEFINE_PER_CPU(bool, trace_taskinfo_save);
7ffbd48d 93
0f048701
SR
94/*
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
4fd27358 100static int tracing_disabled = 1;
0f048701 101
955b61e5 102cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 103
944ac425
SR
104/*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 118 */
cecbca96
FW
119
120enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 121
de7edd31
SRRH
122/* When set, tracing will stop when a WARN*() is hit */
123int __disable_trace_on_warning;
124
681bec03
JL
125#ifdef CONFIG_TRACE_EVAL_MAP_FILE
126/* Map of enums to their values, for "eval_map" file */
23bf8cb8 127struct trace_eval_map_head {
9828413d
SRRH
128 struct module *mod;
129 unsigned long length;
130};
131
23bf8cb8 132union trace_eval_map_item;
9828413d 133
23bf8cb8 134struct trace_eval_map_tail {
9828413d
SRRH
135 /*
136 * "end" is first and points to NULL as it must be different
00f4b652 137 * than "mod" or "eval_string"
9828413d 138 */
23bf8cb8 139 union trace_eval_map_item *next;
9828413d
SRRH
140 const char *end; /* points to NULL */
141};
142
1793ed93 143static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
144
145/*
23bf8cb8 146 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
681bec03 150 * pointer to the next array of saved eval_map items.
9828413d 151 */
23bf8cb8 152union trace_eval_map_item {
00f4b652 153 struct trace_eval_map map;
23bf8cb8
JL
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
9828413d
SRRH
156};
157
23bf8cb8 158static union trace_eval_map_item *trace_eval_maps;
681bec03 159#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 160
607e2ea1 161static int tracing_set_tracer(struct trace_array *tr, const char *buf);
c438f140
TG
162static void ftrace_trace_userstack(struct ring_buffer *buffer,
163 unsigned long flags, int pc);
b2821ae6 164
ee6c2c1b
LZ
165#define MAX_TRACER_SIZE 100
166static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 167static char *default_bootup_tracer;
d9e54076 168
55034cd6
SRRH
169static bool allocate_snapshot;
170
1beee96b 171static int __init set_cmdline_ftrace(char *str)
d9e54076 172{
67012ab1 173 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 174 default_bootup_tracer = bootup_tracer_buf;
73c5162a 175 /* We are using ftrace early, expand it */
55034cd6 176 ring_buffer_expanded = true;
d9e54076
PZ
177 return 1;
178}
1beee96b 179__setup("ftrace=", set_cmdline_ftrace);
d9e54076 180
944ac425
SR
181static int __init set_ftrace_dump_on_oops(char *str)
182{
cecbca96
FW
183 if (*str++ != '=' || !*str) {
184 ftrace_dump_on_oops = DUMP_ALL;
185 return 1;
186 }
187
188 if (!strcmp("orig_cpu", str)) {
189 ftrace_dump_on_oops = DUMP_ORIG;
190 return 1;
191 }
192
193 return 0;
944ac425
SR
194}
195__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 196
de7edd31
SRRH
197static int __init stop_trace_on_warning(char *str)
198{
933ff9f2
LCG
199 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
200 __disable_trace_on_warning = 1;
de7edd31
SRRH
201 return 1;
202}
933ff9f2 203__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 204
3209cff4 205static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
206{
207 allocate_snapshot = true;
208 /* We also need the main ring buffer expanded */
209 ring_buffer_expanded = true;
210 return 1;
211}
3209cff4 212__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 213
7bcfaf54
SR
214
215static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
216
217static int __init set_trace_boot_options(char *str)
218{
67012ab1 219 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
220 return 0;
221}
222__setup("trace_options=", set_trace_boot_options);
223
e1e232ca
SR
224static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
225static char *trace_boot_clock __initdata;
226
227static int __init set_trace_boot_clock(char *str)
228{
229 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
230 trace_boot_clock = trace_boot_clock_buf;
231 return 0;
232}
233__setup("trace_clock=", set_trace_boot_clock);
234
0daa2302
SRRH
235static int __init set_tracepoint_printk(char *str)
236{
237 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
238 tracepoint_printk = 1;
239 return 1;
240}
241__setup("tp_printk", set_tracepoint_printk);
de7edd31 242
a5a1d1c2 243unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
244{
245 nsec += 500;
246 do_div(nsec, 1000);
247 return nsec;
248}
249
983f938a
SRRH
250/* trace_flags holds trace_options default values */
251#define TRACE_DEFAULT_FLAGS \
252 (FUNCTION_DEFAULT_FLAGS | \
253 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
254 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
255 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
256 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
257
16270145
SRRH
258/* trace_options that are only supported by global_trace */
259#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
260 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
261
20550622
SRRH
262/* trace_flags that are default zero for instances */
263#define ZEROED_TRACE_FLAGS \
1e10486f 264 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 265
4fcdae83 266/*
67d04bb2
JF
267 * The global_trace is the descriptor that holds the top-level tracing
268 * buffers for the live tracing.
4fcdae83 269 */
983f938a
SRRH
270static struct trace_array global_trace = {
271 .trace_flags = TRACE_DEFAULT_FLAGS,
272};
bc0c38d1 273
ae63b31e 274LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 275
ff451961
SRRH
276int trace_array_get(struct trace_array *this_tr)
277{
278 struct trace_array *tr;
279 int ret = -ENODEV;
280
281 mutex_lock(&trace_types_lock);
282 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
283 if (tr == this_tr) {
284 tr->ref++;
285 ret = 0;
286 break;
287 }
288 }
289 mutex_unlock(&trace_types_lock);
290
291 return ret;
292}
293
294static void __trace_array_put(struct trace_array *this_tr)
295{
296 WARN_ON(!this_tr->ref);
297 this_tr->ref--;
298}
299
300void trace_array_put(struct trace_array *this_tr)
301{
302 mutex_lock(&trace_types_lock);
303 __trace_array_put(this_tr);
304 mutex_unlock(&trace_types_lock);
305}
306
2425bcb9 307int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
308 struct ring_buffer *buffer,
309 struct ring_buffer_event *event)
310{
311 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
312 !filter_match_preds(call->filter, rec)) {
0fc1b09f 313 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
314 return 1;
315 }
316
317 return 0;
eb02ce01
TZ
318}
319
76c813e2
SRRH
320void trace_free_pid_list(struct trace_pid_list *pid_list)
321{
322 vfree(pid_list->pids);
323 kfree(pid_list);
324}
325
d8275c45
SR
326/**
327 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
328 * @filtered_pids: The list of pids to check
329 * @search_pid: The PID to find in @filtered_pids
330 *
331 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
332 */
333bool
334trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
335{
336 /*
337 * If pid_max changed after filtered_pids was created, we
338 * by default ignore all pids greater than the previous pid_max.
339 */
340 if (search_pid >= filtered_pids->pid_max)
341 return false;
342
343 return test_bit(search_pid, filtered_pids->pids);
344}
345
346/**
347 * trace_ignore_this_task - should a task be ignored for tracing
348 * @filtered_pids: The list of pids to check
349 * @task: The task that should be ignored if not filtered
350 *
351 * Checks if @task should be traced or not from @filtered_pids.
352 * Returns true if @task should *NOT* be traced.
353 * Returns false if @task should be traced.
354 */
355bool
356trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
357{
358 /*
359 * Return false, because if filtered_pids does not exist,
360 * all pids are good to trace.
361 */
362 if (!filtered_pids)
363 return false;
364
365 return !trace_find_filtered_pid(filtered_pids, task->pid);
366}
367
368/**
5a93bae2 369 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
d8275c45
SR
370 * @pid_list: The list to modify
371 * @self: The current task for fork or NULL for exit
372 * @task: The task to add or remove
373 *
374 * If adding a task, if @self is defined, the task is only added if @self
375 * is also included in @pid_list. This happens on fork and tasks should
376 * only be added when the parent is listed. If @self is NULL, then the
377 * @task pid will be removed from the list, which would happen on exit
378 * of a task.
379 */
380void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
381 struct task_struct *self,
382 struct task_struct *task)
383{
384 if (!pid_list)
385 return;
386
387 /* For forks, we only add if the forking task is listed */
388 if (self) {
389 if (!trace_find_filtered_pid(pid_list, self->pid))
390 return;
391 }
392
393 /* Sorry, but we don't support pid_max changing after setting */
394 if (task->pid >= pid_list->pid_max)
395 return;
396
397 /* "self" is set for forks, and NULL for exits */
398 if (self)
399 set_bit(task->pid, pid_list->pids);
400 else
401 clear_bit(task->pid, pid_list->pids);
402}
403
5cc8976b
SRRH
404/**
405 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
406 * @pid_list: The pid list to show
407 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
408 * @pos: The position of the file
409 *
410 * This is used by the seq_file "next" operation to iterate the pids
411 * listed in a trace_pid_list structure.
412 *
413 * Returns the pid+1 as we want to display pid of zero, but NULL would
414 * stop the iteration.
415 */
416void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
417{
418 unsigned long pid = (unsigned long)v;
419
420 (*pos)++;
421
422 /* pid already is +1 of the actual prevous bit */
423 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
424
425 /* Return pid + 1 to allow zero to be represented */
426 if (pid < pid_list->pid_max)
427 return (void *)(pid + 1);
428
429 return NULL;
430}
431
432/**
433 * trace_pid_start - Used for seq_file to start reading pid lists
434 * @pid_list: The pid list to show
435 * @pos: The position of the file
436 *
437 * This is used by seq_file "start" operation to start the iteration
438 * of listing pids.
439 *
440 * Returns the pid+1 as we want to display pid of zero, but NULL would
441 * stop the iteration.
442 */
443void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
444{
445 unsigned long pid;
446 loff_t l = 0;
447
448 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
449 if (pid >= pid_list->pid_max)
450 return NULL;
451
452 /* Return pid + 1 so that zero can be the exit value */
453 for (pid++; pid && l < *pos;
454 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
455 ;
456 return (void *)pid;
457}
458
459/**
460 * trace_pid_show - show the current pid in seq_file processing
461 * @m: The seq_file structure to write into
462 * @v: A void pointer of the pid (+1) value to display
463 *
464 * Can be directly used by seq_file operations to display the current
465 * pid value.
466 */
467int trace_pid_show(struct seq_file *m, void *v)
468{
469 unsigned long pid = (unsigned long)v - 1;
470
471 seq_printf(m, "%lu\n", pid);
472 return 0;
473}
474
76c813e2
SRRH
475/* 128 should be much more than enough */
476#define PID_BUF_SIZE 127
477
478int trace_pid_write(struct trace_pid_list *filtered_pids,
479 struct trace_pid_list **new_pid_list,
480 const char __user *ubuf, size_t cnt)
481{
482 struct trace_pid_list *pid_list;
483 struct trace_parser parser;
484 unsigned long val;
485 int nr_pids = 0;
486 ssize_t read = 0;
487 ssize_t ret = 0;
488 loff_t pos;
489 pid_t pid;
490
491 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
492 return -ENOMEM;
493
494 /*
495 * Always recreate a new array. The write is an all or nothing
496 * operation. Always create a new array when adding new pids by
497 * the user. If the operation fails, then the current list is
498 * not modified.
499 */
500 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
91862cc7
WW
501 if (!pid_list) {
502 trace_parser_put(&parser);
76c813e2 503 return -ENOMEM;
91862cc7 504 }
76c813e2
SRRH
505
506 pid_list->pid_max = READ_ONCE(pid_max);
507
508 /* Only truncating will shrink pid_max */
509 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
510 pid_list->pid_max = filtered_pids->pid_max;
511
512 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
513 if (!pid_list->pids) {
91862cc7 514 trace_parser_put(&parser);
76c813e2
SRRH
515 kfree(pid_list);
516 return -ENOMEM;
517 }
518
519 if (filtered_pids) {
520 /* copy the current bits to the new max */
67f20b08
WY
521 for_each_set_bit(pid, filtered_pids->pids,
522 filtered_pids->pid_max) {
76c813e2 523 set_bit(pid, pid_list->pids);
76c813e2
SRRH
524 nr_pids++;
525 }
526 }
527
528 while (cnt > 0) {
529
530 pos = 0;
531
532 ret = trace_get_user(&parser, ubuf, cnt, &pos);
533 if (ret < 0 || !trace_parser_loaded(&parser))
534 break;
535
536 read += ret;
537 ubuf += ret;
538 cnt -= ret;
539
76c813e2
SRRH
540 ret = -EINVAL;
541 if (kstrtoul(parser.buffer, 0, &val))
542 break;
543 if (val >= pid_list->pid_max)
544 break;
545
546 pid = (pid_t)val;
547
548 set_bit(pid, pid_list->pids);
549 nr_pids++;
550
551 trace_parser_clear(&parser);
552 ret = 0;
553 }
554 trace_parser_put(&parser);
555
556 if (ret < 0) {
557 trace_free_pid_list(pid_list);
558 return ret;
559 }
560
561 if (!nr_pids) {
562 /* Cleared the list of pids */
563 trace_free_pid_list(pid_list);
564 read = ret;
565 pid_list = NULL;
566 }
567
568 *new_pid_list = pid_list;
569
570 return read;
571}
572
a5a1d1c2 573static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
574{
575 u64 ts;
576
577 /* Early boot up does not have a buffer yet */
9457158b 578 if (!buf->buffer)
37886f6a
SR
579 return trace_clock_local();
580
9457158b
AL
581 ts = ring_buffer_time_stamp(buf->buffer, cpu);
582 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
583
584 return ts;
585}
bc0c38d1 586
a5a1d1c2 587u64 ftrace_now(int cpu)
9457158b
AL
588{
589 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
590}
591
10246fa3
SRRH
592/**
593 * tracing_is_enabled - Show if global_trace has been disabled
594 *
595 * Shows if the global trace has been enabled or not. It uses the
596 * mirror flag "buffer_disabled" to be used in fast paths such as for
597 * the irqsoff tracer. But it may be inaccurate due to races. If you
598 * need to know the accurate state, use tracing_is_on() which is a little
599 * slower, but accurate.
600 */
9036990d
SR
601int tracing_is_enabled(void)
602{
10246fa3
SRRH
603 /*
604 * For quick access (irqsoff uses this in fast path), just
605 * return the mirror variable of the state of the ring buffer.
606 * It's a little racy, but we don't really care.
607 */
608 smp_rmb();
609 return !global_trace.buffer_disabled;
9036990d
SR
610}
611
4fcdae83 612/*
3928a8a2
SR
613 * trace_buf_size is the size in bytes that is allocated
614 * for a buffer. Note, the number of bytes is always rounded
615 * to page size.
3f5a54e3
SR
616 *
617 * This number is purposely set to a low number of 16384.
618 * If the dump on oops happens, it will be much appreciated
619 * to not have to wait for all that output. Anyway this can be
620 * boot time and run time configurable.
4fcdae83 621 */
3928a8a2 622#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 623
3928a8a2 624static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 625
4fcdae83 626/* trace_types holds a link list of available tracers. */
bc0c38d1 627static struct tracer *trace_types __read_mostly;
4fcdae83 628
4fcdae83
SR
629/*
630 * trace_types_lock is used to protect the trace_types list.
4fcdae83 631 */
a8227415 632DEFINE_MUTEX(trace_types_lock);
4fcdae83 633
7e53bd42
LJ
634/*
635 * serialize the access of the ring buffer
636 *
637 * ring buffer serializes readers, but it is low level protection.
638 * The validity of the events (which returns by ring_buffer_peek() ..etc)
639 * are not protected by ring buffer.
640 *
641 * The content of events may become garbage if we allow other process consumes
642 * these events concurrently:
643 * A) the page of the consumed events may become a normal page
644 * (not reader page) in ring buffer, and this page will be rewrited
645 * by events producer.
646 * B) The page of the consumed events may become a page for splice_read,
647 * and this page will be returned to system.
648 *
649 * These primitives allow multi process access to different cpu ring buffer
650 * concurrently.
651 *
652 * These primitives don't distinguish read-only and read-consume access.
653 * Multi read-only access are also serialized.
654 */
655
656#ifdef CONFIG_SMP
657static DECLARE_RWSEM(all_cpu_access_lock);
658static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
659
660static inline void trace_access_lock(int cpu)
661{
ae3b5093 662 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
663 /* gain it for accessing the whole ring buffer. */
664 down_write(&all_cpu_access_lock);
665 } else {
666 /* gain it for accessing a cpu ring buffer. */
667
ae3b5093 668 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
669 down_read(&all_cpu_access_lock);
670
671 /* Secondly block other access to this @cpu ring buffer. */
672 mutex_lock(&per_cpu(cpu_access_lock, cpu));
673 }
674}
675
676static inline void trace_access_unlock(int cpu)
677{
ae3b5093 678 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
679 up_write(&all_cpu_access_lock);
680 } else {
681 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
682 up_read(&all_cpu_access_lock);
683 }
684}
685
686static inline void trace_access_lock_init(void)
687{
688 int cpu;
689
690 for_each_possible_cpu(cpu)
691 mutex_init(&per_cpu(cpu_access_lock, cpu));
692}
693
694#else
695
696static DEFINE_MUTEX(access_lock);
697
698static inline void trace_access_lock(int cpu)
699{
700 (void)cpu;
701 mutex_lock(&access_lock);
702}
703
704static inline void trace_access_unlock(int cpu)
705{
706 (void)cpu;
707 mutex_unlock(&access_lock);
708}
709
710static inline void trace_access_lock_init(void)
711{
712}
713
714#endif
715
d78a4614
SRRH
716#ifdef CONFIG_STACKTRACE
717static void __ftrace_trace_stack(struct ring_buffer *buffer,
718 unsigned long flags,
719 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
720static inline void ftrace_trace_stack(struct trace_array *tr,
721 struct ring_buffer *buffer,
73dddbb5
SRRH
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs);
ca475e83 724
d78a4614
SRRH
725#else
726static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
727 unsigned long flags,
728 int skip, int pc, struct pt_regs *regs)
729{
730}
2d34f489
SRRH
731static inline void ftrace_trace_stack(struct trace_array *tr,
732 struct ring_buffer *buffer,
73dddbb5
SRRH
733 unsigned long flags,
734 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
735{
736}
737
d78a4614
SRRH
738#endif
739
3e9a8aad
SRRH
740static __always_inline void
741trace_event_setup(struct ring_buffer_event *event,
742 int type, unsigned long flags, int pc)
743{
744 struct trace_entry *ent = ring_buffer_event_data(event);
745
746 tracing_generic_entry_update(ent, flags, pc);
747 ent->type = type;
748}
749
750static __always_inline struct ring_buffer_event *
751__trace_buffer_lock_reserve(struct ring_buffer *buffer,
752 int type,
753 unsigned long len,
754 unsigned long flags, int pc)
755{
756 struct ring_buffer_event *event;
757
758 event = ring_buffer_lock_reserve(buffer, len);
759 if (event != NULL)
760 trace_event_setup(event, type, flags, pc);
761
762 return event;
763}
764
2290f2c5 765void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
766{
767 if (tr->trace_buffer.buffer)
768 ring_buffer_record_on(tr->trace_buffer.buffer);
769 /*
770 * This flag is looked at when buffers haven't been allocated
771 * yet, or by some tracers (like irqsoff), that just want to
772 * know if the ring buffer has been disabled, but it can handle
773 * races of where it gets disabled but we still do a record.
774 * As the check is in the fast path of the tracers, it is more
775 * important to be fast than accurate.
776 */
777 tr->buffer_disabled = 0;
778 /* Make the flag seen by readers */
779 smp_wmb();
780}
781
499e5470
SR
782/**
783 * tracing_on - enable tracing buffers
784 *
785 * This function enables tracing buffers that may have been
786 * disabled with tracing_off.
787 */
788void tracing_on(void)
789{
10246fa3 790 tracer_tracing_on(&global_trace);
499e5470
SR
791}
792EXPORT_SYMBOL_GPL(tracing_on);
793
52ffabe3
SRRH
794
795static __always_inline void
796__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
797{
d914ba37 798 __this_cpu_write(trace_taskinfo_save, true);
52ffabe3
SRRH
799
800 /* If this is the temp buffer, we need to commit fully */
801 if (this_cpu_read(trace_buffered_event) == event) {
802 /* Length is in event->array[0] */
803 ring_buffer_write(buffer, event->array[0], &event->array[1]);
804 /* Release the temp buffer */
805 this_cpu_dec(trace_buffered_event_cnt);
806 } else
807 ring_buffer_unlock_commit(buffer, event);
808}
809
09ae7234
SRRH
810/**
811 * __trace_puts - write a constant string into the trace buffer.
812 * @ip: The address of the caller
813 * @str: The constant string to write
814 * @size: The size of the string.
815 */
816int __trace_puts(unsigned long ip, const char *str, int size)
817{
818 struct ring_buffer_event *event;
819 struct ring_buffer *buffer;
820 struct print_entry *entry;
821 unsigned long irq_flags;
822 int alloc;
8abfb872
J
823 int pc;
824
983f938a 825 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
826 return 0;
827
8abfb872 828 pc = preempt_count();
09ae7234 829
3132e107
SRRH
830 if (unlikely(tracing_selftest_running || tracing_disabled))
831 return 0;
832
09ae7234
SRRH
833 alloc = sizeof(*entry) + size + 2; /* possible \n added */
834
835 local_save_flags(irq_flags);
836 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
837 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
838 irq_flags, pc);
09ae7234
SRRH
839 if (!event)
840 return 0;
841
842 entry = ring_buffer_event_data(event);
843 entry->ip = ip;
844
845 memcpy(&entry->buf, str, size);
846
847 /* Add a newline if necessary */
848 if (entry->buf[size - 1] != '\n') {
849 entry->buf[size] = '\n';
850 entry->buf[size + 1] = '\0';
851 } else
852 entry->buf[size] = '\0';
853
854 __buffer_unlock_commit(buffer, event);
2d34f489 855 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
856
857 return size;
858}
859EXPORT_SYMBOL_GPL(__trace_puts);
860
861/**
862 * __trace_bputs - write the pointer to a constant string into trace buffer
863 * @ip: The address of the caller
864 * @str: The constant string to write to the buffer to
865 */
866int __trace_bputs(unsigned long ip, const char *str)
867{
868 struct ring_buffer_event *event;
869 struct ring_buffer *buffer;
870 struct bputs_entry *entry;
871 unsigned long irq_flags;
872 int size = sizeof(struct bputs_entry);
8abfb872
J
873 int pc;
874
983f938a 875 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
876 return 0;
877
8abfb872 878 pc = preempt_count();
09ae7234 879
3132e107
SRRH
880 if (unlikely(tracing_selftest_running || tracing_disabled))
881 return 0;
882
09ae7234
SRRH
883 local_save_flags(irq_flags);
884 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
885 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
886 irq_flags, pc);
09ae7234
SRRH
887 if (!event)
888 return 0;
889
890 entry = ring_buffer_event_data(event);
891 entry->ip = ip;
892 entry->str = str;
893
894 __buffer_unlock_commit(buffer, event);
2d34f489 895 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
896
897 return 1;
898}
899EXPORT_SYMBOL_GPL(__trace_bputs);
900
ad909e21 901#ifdef CONFIG_TRACER_SNAPSHOT
a35873a0 902void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
ad909e21 903{
ad909e21
SRRH
904 struct tracer *tracer = tr->current_trace;
905 unsigned long flags;
906
1b22e382
SRRH
907 if (in_nmi()) {
908 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
909 internal_trace_puts("*** snapshot is being ignored ***\n");
910 return;
911 }
912
ad909e21 913 if (!tr->allocated_snapshot) {
ca268da6
SRRH
914 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
915 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
916 tracing_off();
917 return;
918 }
919
920 /* Note, snapshot can not be used when the tracer uses it */
921 if (tracer->use_max_tr) {
ca268da6
SRRH
922 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
923 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
924 return;
925 }
926
927 local_irq_save(flags);
a35873a0 928 update_max_tr(tr, current, smp_processor_id(), cond_data);
ad909e21
SRRH
929 local_irq_restore(flags);
930}
cab50379 931
a35873a0
TZ
932void tracing_snapshot_instance(struct trace_array *tr)
933{
934 tracing_snapshot_instance_cond(tr, NULL);
935}
936
cab50379 937/**
5a93bae2 938 * tracing_snapshot - take a snapshot of the current buffer.
cab50379
SRV
939 *
940 * This causes a swap between the snapshot buffer and the current live
941 * tracing buffer. You can use this to take snapshots of the live
942 * trace when some condition is triggered, but continue to trace.
943 *
944 * Note, make sure to allocate the snapshot with either
945 * a tracing_snapshot_alloc(), or by doing it manually
946 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
947 *
948 * If the snapshot buffer is not allocated, it will stop tracing.
949 * Basically making a permanent snapshot.
950 */
951void tracing_snapshot(void)
952{
953 struct trace_array *tr = &global_trace;
954
955 tracing_snapshot_instance(tr);
956}
1b22e382 957EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21 958
a35873a0
TZ
959/**
960 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
961 * @tr: The tracing instance to snapshot
962 * @cond_data: The data to be tested conditionally, and possibly saved
963 *
964 * This is the same as tracing_snapshot() except that the snapshot is
965 * conditional - the snapshot will only happen if the
966 * cond_snapshot.update() implementation receiving the cond_data
967 * returns true, which means that the trace array's cond_snapshot
968 * update() operation used the cond_data to determine whether the
969 * snapshot should be taken, and if it was, presumably saved it along
970 * with the snapshot.
971 */
972void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
973{
974 tracing_snapshot_instance_cond(tr, cond_data);
975}
976EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
977
978/**
979 * tracing_snapshot_cond_data - get the user data associated with a snapshot
980 * @tr: The tracing instance
981 *
982 * When the user enables a conditional snapshot using
983 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
984 * with the snapshot. This accessor is used to retrieve it.
985 *
986 * Should not be called from cond_snapshot.update(), since it takes
987 * the tr->max_lock lock, which the code calling
988 * cond_snapshot.update() has already done.
989 *
990 * Returns the cond_data associated with the trace array's snapshot.
991 */
992void *tracing_cond_snapshot_data(struct trace_array *tr)
993{
994 void *cond_data = NULL;
995
996 arch_spin_lock(&tr->max_lock);
997
998 if (tr->cond_snapshot)
999 cond_data = tr->cond_snapshot->cond_data;
1000
1001 arch_spin_unlock(&tr->max_lock);
1002
1003 return cond_data;
1004}
1005EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1006
ad909e21
SRRH
1007static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1008 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
1009static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1010
2824f503 1011int tracing_alloc_snapshot_instance(struct trace_array *tr)
3209cff4
SRRH
1012{
1013 int ret;
1014
1015 if (!tr->allocated_snapshot) {
1016
1017 /* allocate spare buffer */
1018 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1019 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1020 if (ret < 0)
1021 return ret;
1022
1023 tr->allocated_snapshot = true;
1024 }
1025
1026 return 0;
1027}
1028
ad1438a0 1029static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
1030{
1031 /*
1032 * We don't free the ring buffer. instead, resize it because
1033 * The max_tr ring buffer has some state (e.g. ring->clock) and
1034 * we want preserve it.
1035 */
1036 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1037 set_buffer_entries(&tr->max_buffer, 1);
1038 tracing_reset_online_cpus(&tr->max_buffer);
1039 tr->allocated_snapshot = false;
1040}
ad909e21 1041
93e31ffb
TZ
1042/**
1043 * tracing_alloc_snapshot - allocate snapshot buffer.
1044 *
1045 * This only allocates the snapshot buffer if it isn't already
1046 * allocated - it doesn't also take a snapshot.
1047 *
1048 * This is meant to be used in cases where the snapshot buffer needs
1049 * to be set up for events that can't sleep but need to be able to
1050 * trigger a snapshot.
1051 */
1052int tracing_alloc_snapshot(void)
1053{
1054 struct trace_array *tr = &global_trace;
1055 int ret;
1056
2824f503 1057 ret = tracing_alloc_snapshot_instance(tr);
93e31ffb
TZ
1058 WARN_ON(ret < 0);
1059
1060 return ret;
1061}
1062EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1063
ad909e21 1064/**
5a93bae2 1065 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
ad909e21 1066 *
5a93bae2 1067 * This is similar to tracing_snapshot(), but it will allocate the
ad909e21
SRRH
1068 * snapshot buffer if it isn't already allocated. Use this only
1069 * where it is safe to sleep, as the allocation may sleep.
1070 *
1071 * This causes a swap between the snapshot buffer and the current live
1072 * tracing buffer. You can use this to take snapshots of the live
1073 * trace when some condition is triggered, but continue to trace.
1074 */
1075void tracing_snapshot_alloc(void)
1076{
ad909e21
SRRH
1077 int ret;
1078
93e31ffb
TZ
1079 ret = tracing_alloc_snapshot();
1080 if (ret < 0)
3209cff4 1081 return;
ad909e21
SRRH
1082
1083 tracing_snapshot();
1084}
1b22e382 1085EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
a35873a0
TZ
1086
1087/**
1088 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1089 * @tr: The tracing instance
1090 * @cond_data: User data to associate with the snapshot
1091 * @update: Implementation of the cond_snapshot update function
1092 *
1093 * Check whether the conditional snapshot for the given instance has
1094 * already been enabled, or if the current tracer is already using a
1095 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1096 * save the cond_data and update function inside.
1097 *
1098 * Returns 0 if successful, error otherwise.
1099 */
1100int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1101 cond_update_fn_t update)
1102{
1103 struct cond_snapshot *cond_snapshot;
1104 int ret = 0;
1105
1106 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1107 if (!cond_snapshot)
1108 return -ENOMEM;
1109
1110 cond_snapshot->cond_data = cond_data;
1111 cond_snapshot->update = update;
1112
1113 mutex_lock(&trace_types_lock);
1114
1115 ret = tracing_alloc_snapshot_instance(tr);
1116 if (ret)
1117 goto fail_unlock;
1118
1119 if (tr->current_trace->use_max_tr) {
1120 ret = -EBUSY;
1121 goto fail_unlock;
1122 }
1123
1c347a94
SRV
1124 /*
1125 * The cond_snapshot can only change to NULL without the
1126 * trace_types_lock. We don't care if we race with it going
1127 * to NULL, but we want to make sure that it's not set to
1128 * something other than NULL when we get here, which we can
1129 * do safely with only holding the trace_types_lock and not
1130 * having to take the max_lock.
1131 */
a35873a0
TZ
1132 if (tr->cond_snapshot) {
1133 ret = -EBUSY;
1134 goto fail_unlock;
1135 }
1136
1137 arch_spin_lock(&tr->max_lock);
1138 tr->cond_snapshot = cond_snapshot;
1139 arch_spin_unlock(&tr->max_lock);
1140
1141 mutex_unlock(&trace_types_lock);
1142
1143 return ret;
1144
1145 fail_unlock:
1146 mutex_unlock(&trace_types_lock);
1147 kfree(cond_snapshot);
1148 return ret;
1149}
1150EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1151
1152/**
1153 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1154 * @tr: The tracing instance
1155 *
1156 * Check whether the conditional snapshot for the given instance is
1157 * enabled; if so, free the cond_snapshot associated with it,
1158 * otherwise return -EINVAL.
1159 *
1160 * Returns 0 if successful, error otherwise.
1161 */
1162int tracing_snapshot_cond_disable(struct trace_array *tr)
1163{
1164 int ret = 0;
1165
1166 arch_spin_lock(&tr->max_lock);
1167
1168 if (!tr->cond_snapshot)
1169 ret = -EINVAL;
1170 else {
1171 kfree(tr->cond_snapshot);
1172 tr->cond_snapshot = NULL;
1173 }
1174
1175 arch_spin_unlock(&tr->max_lock);
1176
1177 return ret;
1178}
1179EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
ad909e21
SRRH
1180#else
1181void tracing_snapshot(void)
1182{
1183 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1184}
1b22e382 1185EXPORT_SYMBOL_GPL(tracing_snapshot);
a35873a0
TZ
1186void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1187{
1188 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1189}
1190EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
93e31ffb
TZ
1191int tracing_alloc_snapshot(void)
1192{
1193 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1194 return -ENODEV;
1195}
1196EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1197void tracing_snapshot_alloc(void)
1198{
1199 /* Give warning */
1200 tracing_snapshot();
1201}
1b22e382 1202EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
a35873a0
TZ
1203void *tracing_cond_snapshot_data(struct trace_array *tr)
1204{
1205 return NULL;
1206}
1207EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1208int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1209{
1210 return -ENODEV;
1211}
1212EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1213int tracing_snapshot_cond_disable(struct trace_array *tr)
1214{
1215 return false;
1216}
1217EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
ad909e21
SRRH
1218#endif /* CONFIG_TRACER_SNAPSHOT */
1219
2290f2c5 1220void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1221{
1222 if (tr->trace_buffer.buffer)
1223 ring_buffer_record_off(tr->trace_buffer.buffer);
1224 /*
1225 * This flag is looked at when buffers haven't been allocated
1226 * yet, or by some tracers (like irqsoff), that just want to
1227 * know if the ring buffer has been disabled, but it can handle
1228 * races of where it gets disabled but we still do a record.
1229 * As the check is in the fast path of the tracers, it is more
1230 * important to be fast than accurate.
1231 */
1232 tr->buffer_disabled = 1;
1233 /* Make the flag seen by readers */
1234 smp_wmb();
1235}
1236
499e5470
SR
1237/**
1238 * tracing_off - turn off tracing buffers
1239 *
1240 * This function stops the tracing buffers from recording data.
1241 * It does not disable any overhead the tracers themselves may
1242 * be causing. This function simply causes all recording to
1243 * the ring buffers to fail.
1244 */
1245void tracing_off(void)
1246{
10246fa3 1247 tracer_tracing_off(&global_trace);
499e5470
SR
1248}
1249EXPORT_SYMBOL_GPL(tracing_off);
1250
de7edd31
SRRH
1251void disable_trace_on_warning(void)
1252{
1253 if (__disable_trace_on_warning)
1254 tracing_off();
1255}
1256
10246fa3
SRRH
1257/**
1258 * tracer_tracing_is_on - show real state of ring buffer enabled
1259 * @tr : the trace array to know if ring buffer is enabled
1260 *
1261 * Shows real state of the ring buffer if it is enabled or not.
1262 */
ec573508 1263bool tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1264{
1265 if (tr->trace_buffer.buffer)
1266 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1267 return !tr->buffer_disabled;
1268}
1269
499e5470
SR
1270/**
1271 * tracing_is_on - show state of ring buffers enabled
1272 */
1273int tracing_is_on(void)
1274{
10246fa3 1275 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1276}
1277EXPORT_SYMBOL_GPL(tracing_is_on);
1278
3928a8a2 1279static int __init set_buf_size(char *str)
bc0c38d1 1280{
3928a8a2 1281 unsigned long buf_size;
c6caeeb1 1282
bc0c38d1
SR
1283 if (!str)
1284 return 0;
9d612bef 1285 buf_size = memparse(str, &str);
c6caeeb1 1286 /* nr_entries can not be zero */
9d612bef 1287 if (buf_size == 0)
c6caeeb1 1288 return 0;
3928a8a2 1289 trace_buf_size = buf_size;
bc0c38d1
SR
1290 return 1;
1291}
3928a8a2 1292__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1293
0e950173
TB
1294static int __init set_tracing_thresh(char *str)
1295{
87abb3b1 1296 unsigned long threshold;
0e950173
TB
1297 int ret;
1298
1299 if (!str)
1300 return 0;
bcd83ea6 1301 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1302 if (ret < 0)
1303 return 0;
87abb3b1 1304 tracing_thresh = threshold * 1000;
0e950173
TB
1305 return 1;
1306}
1307__setup("tracing_thresh=", set_tracing_thresh);
1308
57f50be1
SR
1309unsigned long nsecs_to_usecs(unsigned long nsecs)
1310{
1311 return nsecs / 1000;
1312}
1313
a3418a36
SRRH
1314/*
1315 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1316 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1317 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1318 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1319 */
1320#undef C
1321#define C(a, b) b
1322
4fcdae83 1323/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1324static const char *trace_options[] = {
a3418a36 1325 TRACE_FLAGS
bc0c38d1
SR
1326 NULL
1327};
1328
5079f326
Z
1329static struct {
1330 u64 (*func)(void);
1331 const char *name;
8be0709f 1332 int in_ns; /* is this clock in nanoseconds? */
5079f326 1333} trace_clocks[] = {
1b3e5c09
TG
1334 { trace_clock_local, "local", 1 },
1335 { trace_clock_global, "global", 1 },
1336 { trace_clock_counter, "counter", 0 },
e7fda6c4 1337 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1338 { trace_clock, "perf", 1 },
1339 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1340 { ktime_get_raw_fast_ns, "mono_raw", 1 },
a3ed0e43 1341 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1342 ARCH_TRACE_CLOCKS
5079f326
Z
1343};
1344
860f9f6b
TZ
1345bool trace_clock_in_ns(struct trace_array *tr)
1346{
1347 if (trace_clocks[tr->clock_id].in_ns)
1348 return true;
1349
1350 return false;
1351}
1352
b63f39ea 1353/*
1354 * trace_parser_get_init - gets the buffer for trace parser
1355 */
1356int trace_parser_get_init(struct trace_parser *parser, int size)
1357{
1358 memset(parser, 0, sizeof(*parser));
1359
1360 parser->buffer = kmalloc(size, GFP_KERNEL);
1361 if (!parser->buffer)
1362 return 1;
1363
1364 parser->size = size;
1365 return 0;
1366}
1367
1368/*
1369 * trace_parser_put - frees the buffer for trace parser
1370 */
1371void trace_parser_put(struct trace_parser *parser)
1372{
1373 kfree(parser->buffer);
0e684b65 1374 parser->buffer = NULL;
b63f39ea 1375}
1376
1377/*
1378 * trace_get_user - reads the user input string separated by space
1379 * (matched by isspace(ch))
1380 *
1381 * For each string found the 'struct trace_parser' is updated,
1382 * and the function returns.
1383 *
1384 * Returns number of bytes read.
1385 *
1386 * See kernel/trace/trace.h for 'struct trace_parser' details.
1387 */
1388int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1389 size_t cnt, loff_t *ppos)
1390{
1391 char ch;
1392 size_t read = 0;
1393 ssize_t ret;
1394
1395 if (!*ppos)
1396 trace_parser_clear(parser);
1397
1398 ret = get_user(ch, ubuf++);
1399 if (ret)
1400 goto out;
1401
1402 read++;
1403 cnt--;
1404
1405 /*
1406 * The parser is not finished with the last write,
1407 * continue reading the user input without skipping spaces.
1408 */
1409 if (!parser->cont) {
1410 /* skip white space */
1411 while (cnt && isspace(ch)) {
1412 ret = get_user(ch, ubuf++);
1413 if (ret)
1414 goto out;
1415 read++;
1416 cnt--;
1417 }
1418
76638d96
CD
1419 parser->idx = 0;
1420
b63f39ea 1421 /* only spaces were written */
921a7acd 1422 if (isspace(ch) || !ch) {
b63f39ea 1423 *ppos += read;
1424 ret = read;
1425 goto out;
1426 }
b63f39ea 1427 }
1428
1429 /* read the non-space input */
921a7acd 1430 while (cnt && !isspace(ch) && ch) {
3c235a33 1431 if (parser->idx < parser->size - 1)
b63f39ea 1432 parser->buffer[parser->idx++] = ch;
1433 else {
1434 ret = -EINVAL;
1435 goto out;
1436 }
1437 ret = get_user(ch, ubuf++);
1438 if (ret)
1439 goto out;
1440 read++;
1441 cnt--;
1442 }
1443
1444 /* We either got finished input or we have to wait for another call. */
921a7acd 1445 if (isspace(ch) || !ch) {
b63f39ea 1446 parser->buffer[parser->idx] = 0;
1447 parser->cont = false;
057db848 1448 } else if (parser->idx < parser->size - 1) {
b63f39ea 1449 parser->cont = true;
1450 parser->buffer[parser->idx++] = ch;
f4d0706c
CD
1451 /* Make sure the parsed string always terminates with '\0'. */
1452 parser->buffer[parser->idx] = 0;
057db848
SR
1453 } else {
1454 ret = -EINVAL;
1455 goto out;
b63f39ea 1456 }
1457
1458 *ppos += read;
1459 ret = read;
1460
1461out:
1462 return ret;
1463}
1464
3a161d99 1465/* TODO add a seq_buf_to_buffer() */
b8b94265 1466static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1467{
1468 int len;
3c56819b 1469
5ac48378 1470 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1471 return -EBUSY;
1472
5ac48378 1473 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1474 if (cnt > len)
1475 cnt = len;
3a161d99 1476 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1477
3a161d99 1478 s->seq.readpos += cnt;
3c56819b
EGM
1479 return cnt;
1480}
1481
0e950173
TB
1482unsigned long __read_mostly tracing_thresh;
1483
5d4a9dba 1484#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1485/*
1486 * Copy the new maximum trace into the separate maximum-trace
1487 * structure. (this way the maximum trace is permanently saved,
5a93bae2 1488 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
5d4a9dba
SR
1489 */
1490static void
1491__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1492{
12883efb
SRRH
1493 struct trace_buffer *trace_buf = &tr->trace_buffer;
1494 struct trace_buffer *max_buf = &tr->max_buffer;
1495 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1496 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1497
12883efb
SRRH
1498 max_buf->cpu = cpu;
1499 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1500
6d9b3fa5 1501 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1502 max_data->critical_start = data->critical_start;
1503 max_data->critical_end = data->critical_end;
5d4a9dba 1504
85f726a3 1505 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1506 max_data->pid = tsk->pid;
f17a5194
SRRH
1507 /*
1508 * If tsk == current, then use current_uid(), as that does not use
1509 * RCU. The irq tracer can be called out of RCU scope.
1510 */
1511 if (tsk == current)
1512 max_data->uid = current_uid();
1513 else
1514 max_data->uid = task_uid(tsk);
1515
8248ac05
SR
1516 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1517 max_data->policy = tsk->policy;
1518 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1519
1520 /* record this tasks comm */
1521 tracing_record_cmdline(tsk);
1522}
1523
4fcdae83
SR
1524/**
1525 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1526 * @tr: tracer
1527 * @tsk: the task with the latency
1528 * @cpu: The cpu that initiated the trace.
a35873a0 1529 * @cond_data: User data associated with a conditional snapshot
4fcdae83
SR
1530 *
1531 * Flip the buffers between the @tr and the max_tr and record information
1532 * about which task was the cause of this latency.
1533 */
e309b41d 1534void
a35873a0
TZ
1535update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1536 void *cond_data)
bc0c38d1 1537{
2b6080f2 1538 if (tr->stop_count)
b8de7bd1
SR
1539 return;
1540
4c11d7ae 1541 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1542
45ad21ca 1543 if (!tr->allocated_snapshot) {
debdd57f 1544 /* Only the nop tracer should hit this when disabling */
2b6080f2 1545 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1546 return;
debdd57f 1547 }
34600f0e 1548
0b9b12c1 1549 arch_spin_lock(&tr->max_lock);
3928a8a2 1550
73c8d894
MH
1551 /* Inherit the recordable setting from trace_buffer */
1552 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1553 ring_buffer_record_on(tr->max_buffer.buffer);
1554 else
1555 ring_buffer_record_off(tr->max_buffer.buffer);
1556
a35873a0
TZ
1557#ifdef CONFIG_TRACER_SNAPSHOT
1558 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1559 goto out_unlock;
1560#endif
08ae88f8 1561 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
3928a8a2 1562
bc0c38d1 1563 __update_max_tr(tr, tsk, cpu);
a35873a0
TZ
1564
1565 out_unlock:
0b9b12c1 1566 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1567}
1568
1569/**
1570 * update_max_tr_single - only copy one trace over, and reset the rest
1571 * @tr - tracer
1572 * @tsk - task with the latency
1573 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1574 *
1575 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1576 */
e309b41d 1577void
bc0c38d1
SR
1578update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1579{
3928a8a2 1580 int ret;
bc0c38d1 1581
2b6080f2 1582 if (tr->stop_count)
b8de7bd1
SR
1583 return;
1584
4c11d7ae 1585 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1586 if (!tr->allocated_snapshot) {
2930e04d 1587 /* Only the nop tracer should hit this when disabling */
9e8529af 1588 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1589 return;
2930e04d 1590 }
ef710e10 1591
0b9b12c1 1592 arch_spin_lock(&tr->max_lock);
bc0c38d1 1593
12883efb 1594 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1595
e8165dbb
SR
1596 if (ret == -EBUSY) {
1597 /*
1598 * We failed to swap the buffer due to a commit taking
1599 * place on this CPU. We fail to record, but we reset
1600 * the max trace buffer (no one writes directly to it)
1601 * and flag that it failed.
1602 */
12883efb 1603 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1604 "Failed to swap buffers due to commit in progress\n");
1605 }
1606
e8165dbb 1607 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1608
1609 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1610 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1611}
5d4a9dba 1612#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1613
2c2b0a78 1614static int wait_on_pipe(struct trace_iterator *iter, int full)
0d5c6e1c 1615{
15693458
SRRH
1616 /* Iterators are static, they should be filled or empty */
1617 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1618 return 0;
0d5c6e1c 1619
e30f53aa
RV
1620 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1621 full);
0d5c6e1c
SR
1622}
1623
f4e781c0 1624#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1625static bool selftests_can_run;
1626
1627struct trace_selftests {
1628 struct list_head list;
1629 struct tracer *type;
1630};
1631
1632static LIST_HEAD(postponed_selftests);
1633
1634static int save_selftest(struct tracer *type)
1635{
1636 struct trace_selftests *selftest;
1637
1638 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1639 if (!selftest)
1640 return -ENOMEM;
1641
1642 selftest->type = type;
1643 list_add(&selftest->list, &postponed_selftests);
1644 return 0;
1645}
1646
f4e781c0
SRRH
1647static int run_tracer_selftest(struct tracer *type)
1648{
1649 struct trace_array *tr = &global_trace;
1650 struct tracer *saved_tracer = tr->current_trace;
1651 int ret;
0d5c6e1c 1652
f4e781c0
SRRH
1653 if (!type->selftest || tracing_selftest_disabled)
1654 return 0;
0d5c6e1c 1655
9afecfbb
SRV
1656 /*
1657 * If a tracer registers early in boot up (before scheduling is
1658 * initialized and such), then do not run its selftests yet.
1659 * Instead, run it a little later in the boot process.
1660 */
1661 if (!selftests_can_run)
1662 return save_selftest(type);
1663
0d5c6e1c 1664 /*
f4e781c0
SRRH
1665 * Run a selftest on this tracer.
1666 * Here we reset the trace buffer, and set the current
1667 * tracer to be this tracer. The tracer can then run some
1668 * internal tracing to verify that everything is in order.
1669 * If we fail, we do not register this tracer.
0d5c6e1c 1670 */
f4e781c0 1671 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1672
f4e781c0
SRRH
1673 tr->current_trace = type;
1674
1675#ifdef CONFIG_TRACER_MAX_TRACE
1676 if (type->use_max_tr) {
1677 /* If we expanded the buffers, make sure the max is expanded too */
1678 if (ring_buffer_expanded)
1679 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1680 RING_BUFFER_ALL_CPUS);
1681 tr->allocated_snapshot = true;
1682 }
1683#endif
1684
1685 /* the test is responsible for initializing and enabling */
1686 pr_info("Testing tracer %s: ", type->name);
1687 ret = type->selftest(type, tr);
1688 /* the test is responsible for resetting too */
1689 tr->current_trace = saved_tracer;
1690 if (ret) {
1691 printk(KERN_CONT "FAILED!\n");
1692 /* Add the warning after printing 'FAILED' */
1693 WARN_ON(1);
1694 return -1;
1695 }
1696 /* Only reset on passing, to avoid touching corrupted buffers */
1697 tracing_reset_online_cpus(&tr->trace_buffer);
1698
1699#ifdef CONFIG_TRACER_MAX_TRACE
1700 if (type->use_max_tr) {
1701 tr->allocated_snapshot = false;
0d5c6e1c 1702
f4e781c0
SRRH
1703 /* Shrink the max buffer again */
1704 if (ring_buffer_expanded)
1705 ring_buffer_resize(tr->max_buffer.buffer, 1,
1706 RING_BUFFER_ALL_CPUS);
1707 }
1708#endif
1709
1710 printk(KERN_CONT "PASSED\n");
1711 return 0;
1712}
9afecfbb
SRV
1713
1714static __init int init_trace_selftests(void)
1715{
1716 struct trace_selftests *p, *n;
1717 struct tracer *t, **last;
1718 int ret;
1719
1720 selftests_can_run = true;
1721
1722 mutex_lock(&trace_types_lock);
1723
1724 if (list_empty(&postponed_selftests))
1725 goto out;
1726
1727 pr_info("Running postponed tracer tests:\n");
1728
1729 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
6fc2171c
AR
1730 /* This loop can take minutes when sanitizers are enabled, so
1731 * lets make sure we allow RCU processing.
1732 */
1733 cond_resched();
9afecfbb
SRV
1734 ret = run_tracer_selftest(p->type);
1735 /* If the test fails, then warn and remove from available_tracers */
1736 if (ret < 0) {
1737 WARN(1, "tracer: %s failed selftest, disabling\n",
1738 p->type->name);
1739 last = &trace_types;
1740 for (t = trace_types; t; t = t->next) {
1741 if (t == p->type) {
1742 *last = t->next;
1743 break;
1744 }
1745 last = &t->next;
1746 }
1747 }
1748 list_del(&p->list);
1749 kfree(p);
1750 }
1751
1752 out:
1753 mutex_unlock(&trace_types_lock);
1754
1755 return 0;
1756}
b9ef0326 1757core_initcall(init_trace_selftests);
f4e781c0
SRRH
1758#else
1759static inline int run_tracer_selftest(struct tracer *type)
1760{
1761 return 0;
0d5c6e1c 1762}
f4e781c0 1763#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1764
41d9c0be
SRRH
1765static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1766
a4d1e688
JW
1767static void __init apply_trace_boot_options(void);
1768
4fcdae83
SR
1769/**
1770 * register_tracer - register a tracer with the ftrace system.
1771 * @type - the plugin for the tracer
1772 *
1773 * Register a new plugin tracer.
1774 */
a4d1e688 1775int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1776{
1777 struct tracer *t;
bc0c38d1
SR
1778 int ret = 0;
1779
1780 if (!type->name) {
1781 pr_info("Tracer must have a name\n");
1782 return -1;
1783 }
1784
24a461d5 1785 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1786 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1787 return -1;
1788 }
1789
bc0c38d1 1790 mutex_lock(&trace_types_lock);
86fa2f60 1791
8e1b82e0
FW
1792 tracing_selftest_running = true;
1793
bc0c38d1
SR
1794 for (t = trace_types; t; t = t->next) {
1795 if (strcmp(type->name, t->name) == 0) {
1796 /* already found */
ee6c2c1b 1797 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1798 type->name);
1799 ret = -1;
1800 goto out;
1801 }
1802 }
1803
adf9f195
FW
1804 if (!type->set_flag)
1805 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1806 if (!type->flags) {
1807 /*allocate a dummy tracer_flags*/
1808 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1809 if (!type->flags) {
1810 ret = -ENOMEM;
1811 goto out;
1812 }
d39cdd20
CH
1813 type->flags->val = 0;
1814 type->flags->opts = dummy_tracer_opt;
1815 } else
adf9f195
FW
1816 if (!type->flags->opts)
1817 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1818
d39cdd20
CH
1819 /* store the tracer for __set_tracer_option */
1820 type->flags->trace = type;
1821
f4e781c0
SRRH
1822 ret = run_tracer_selftest(type);
1823 if (ret < 0)
1824 goto out;
60a11774 1825
bc0c38d1
SR
1826 type->next = trace_types;
1827 trace_types = type;
41d9c0be 1828 add_tracer_options(&global_trace, type);
60a11774 1829
bc0c38d1 1830 out:
8e1b82e0 1831 tracing_selftest_running = false;
bc0c38d1
SR
1832 mutex_unlock(&trace_types_lock);
1833
dac74940
SR
1834 if (ret || !default_bootup_tracer)
1835 goto out_unlock;
1836
ee6c2c1b 1837 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1838 goto out_unlock;
1839
1840 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1841 /* Do we want this tracer to start on bootup? */
607e2ea1 1842 tracing_set_tracer(&global_trace, type->name);
dac74940 1843 default_bootup_tracer = NULL;
a4d1e688
JW
1844
1845 apply_trace_boot_options();
1846
dac74940 1847 /* disable other selftests, since this will break it. */
55034cd6 1848 tracing_selftest_disabled = true;
b2821ae6 1849#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1850 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1851 type->name);
b2821ae6 1852#endif
b2821ae6 1853
dac74940 1854 out_unlock:
bc0c38d1
SR
1855 return ret;
1856}
1857
12883efb 1858void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1859{
12883efb 1860 struct ring_buffer *buffer = buf->buffer;
f633903a 1861
a5416411
HT
1862 if (!buffer)
1863 return;
1864
f633903a
SR
1865 ring_buffer_record_disable(buffer);
1866
1867 /* Make sure all commits have finished */
74401729 1868 synchronize_rcu();
68179686 1869 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1870
1871 ring_buffer_record_enable(buffer);
1872}
1873
12883efb 1874void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1875{
12883efb 1876 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1877 int cpu;
1878
a5416411
HT
1879 if (!buffer)
1880 return;
1881
621968cd
SR
1882 ring_buffer_record_disable(buffer);
1883
1884 /* Make sure all commits have finished */
74401729 1885 synchronize_rcu();
621968cd 1886
9457158b 1887 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1888
1889 for_each_online_cpu(cpu)
68179686 1890 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1891
1892 ring_buffer_record_enable(buffer);
213cc060
PE
1893}
1894
09d8091c 1895/* Must have trace_types_lock held */
873c642f 1896void tracing_reset_all_online_cpus(void)
9456f0fa 1897{
873c642f
SRRH
1898 struct trace_array *tr;
1899
873c642f 1900 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
065e63f9
SRV
1901 if (!tr->clear_trace)
1902 continue;
1903 tr->clear_trace = false;
12883efb
SRRH
1904 tracing_reset_online_cpus(&tr->trace_buffer);
1905#ifdef CONFIG_TRACER_MAX_TRACE
1906 tracing_reset_online_cpus(&tr->max_buffer);
1907#endif
873c642f 1908 }
9456f0fa
SR
1909}
1910
d914ba37
JF
1911static int *tgid_map;
1912
939c7a4f 1913#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1914#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1915static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1916struct saved_cmdlines_buffer {
1917 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1918 unsigned *map_cmdline_to_pid;
1919 unsigned cmdline_num;
1920 int cmdline_idx;
1921 char *saved_cmdlines;
1922};
1923static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1924
25b0b44a 1925/* temporary disable recording */
d914ba37 1926static atomic_t trace_record_taskinfo_disabled __read_mostly;
bc0c38d1 1927
939c7a4f
YY
1928static inline char *get_saved_cmdlines(int idx)
1929{
1930 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1931}
1932
1933static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1934{
85f726a3 1935 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
939c7a4f
YY
1936}
1937
1938static int allocate_cmdlines_buffer(unsigned int val,
1939 struct saved_cmdlines_buffer *s)
1940{
6da2ec56
KC
1941 s->map_cmdline_to_pid = kmalloc_array(val,
1942 sizeof(*s->map_cmdline_to_pid),
1943 GFP_KERNEL);
939c7a4f
YY
1944 if (!s->map_cmdline_to_pid)
1945 return -ENOMEM;
1946
6da2ec56 1947 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
939c7a4f
YY
1948 if (!s->saved_cmdlines) {
1949 kfree(s->map_cmdline_to_pid);
1950 return -ENOMEM;
1951 }
1952
1953 s->cmdline_idx = 0;
1954 s->cmdline_num = val;
1955 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1956 sizeof(s->map_pid_to_cmdline));
1957 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1958 val * sizeof(*s->map_cmdline_to_pid));
1959
1960 return 0;
1961}
1962
1963static int trace_create_savedcmd(void)
1964{
1965 int ret;
1966
a6af8fbf 1967 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1968 if (!savedcmd)
1969 return -ENOMEM;
1970
1971 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1972 if (ret < 0) {
1973 kfree(savedcmd);
1974 savedcmd = NULL;
1975 return -ENOMEM;
1976 }
1977
1978 return 0;
bc0c38d1
SR
1979}
1980
b5130b1e
CE
1981int is_tracing_stopped(void)
1982{
2b6080f2 1983 return global_trace.stop_count;
b5130b1e
CE
1984}
1985
0f048701
SR
1986/**
1987 * tracing_start - quick start of the tracer
1988 *
1989 * If tracing is enabled but was stopped by tracing_stop,
1990 * this will start the tracer back up.
1991 */
1992void tracing_start(void)
1993{
1994 struct ring_buffer *buffer;
1995 unsigned long flags;
1996
1997 if (tracing_disabled)
1998 return;
1999
2b6080f2
SR
2000 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2001 if (--global_trace.stop_count) {
2002 if (global_trace.stop_count < 0) {
b06a8301
SR
2003 /* Someone screwed up their debugging */
2004 WARN_ON_ONCE(1);
2b6080f2 2005 global_trace.stop_count = 0;
b06a8301 2006 }
0f048701
SR
2007 goto out;
2008 }
2009
a2f80714 2010 /* Prevent the buffers from switching */
0b9b12c1 2011 arch_spin_lock(&global_trace.max_lock);
0f048701 2012
12883efb 2013 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
2014 if (buffer)
2015 ring_buffer_record_enable(buffer);
2016
12883efb
SRRH
2017#ifdef CONFIG_TRACER_MAX_TRACE
2018 buffer = global_trace.max_buffer.buffer;
0f048701
SR
2019 if (buffer)
2020 ring_buffer_record_enable(buffer);
12883efb 2021#endif
0f048701 2022
0b9b12c1 2023 arch_spin_unlock(&global_trace.max_lock);
a2f80714 2024
0f048701 2025 out:
2b6080f2
SR
2026 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2027}
2028
2029static void tracing_start_tr(struct trace_array *tr)
2030{
2031 struct ring_buffer *buffer;
2032 unsigned long flags;
2033
2034 if (tracing_disabled)
2035 return;
2036
2037 /* If global, we need to also start the max tracer */
2038 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2039 return tracing_start();
2040
2041 raw_spin_lock_irqsave(&tr->start_lock, flags);
2042
2043 if (--tr->stop_count) {
2044 if (tr->stop_count < 0) {
2045 /* Someone screwed up their debugging */
2046 WARN_ON_ONCE(1);
2047 tr->stop_count = 0;
2048 }
2049 goto out;
2050 }
2051
12883efb 2052 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
2053 if (buffer)
2054 ring_buffer_record_enable(buffer);
2055
2056 out:
2057 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
2058}
2059
2060/**
2061 * tracing_stop - quick stop of the tracer
2062 *
2063 * Light weight way to stop tracing. Use in conjunction with
2064 * tracing_start.
2065 */
2066void tracing_stop(void)
2067{
2068 struct ring_buffer *buffer;
2069 unsigned long flags;
2070
2b6080f2
SR
2071 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2072 if (global_trace.stop_count++)
0f048701
SR
2073 goto out;
2074
a2f80714 2075 /* Prevent the buffers from switching */
0b9b12c1 2076 arch_spin_lock(&global_trace.max_lock);
a2f80714 2077
12883efb 2078 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
2079 if (buffer)
2080 ring_buffer_record_disable(buffer);
2081
12883efb
SRRH
2082#ifdef CONFIG_TRACER_MAX_TRACE
2083 buffer = global_trace.max_buffer.buffer;
0f048701
SR
2084 if (buffer)
2085 ring_buffer_record_disable(buffer);
12883efb 2086#endif
0f048701 2087
0b9b12c1 2088 arch_spin_unlock(&global_trace.max_lock);
a2f80714 2089
0f048701 2090 out:
2b6080f2
SR
2091 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2092}
2093
2094static void tracing_stop_tr(struct trace_array *tr)
2095{
2096 struct ring_buffer *buffer;
2097 unsigned long flags;
2098
2099 /* If global, we need to also stop the max tracer */
2100 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2101 return tracing_stop();
2102
2103 raw_spin_lock_irqsave(&tr->start_lock, flags);
2104 if (tr->stop_count++)
2105 goto out;
2106
12883efb 2107 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
2108 if (buffer)
2109 ring_buffer_record_disable(buffer);
2110
2111 out:
2112 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
2113}
2114
379cfdac 2115static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 2116{
a635cf04 2117 unsigned pid, idx;
bc0c38d1 2118
eaf260ac
JF
2119 /* treat recording of idle task as a success */
2120 if (!tsk->pid)
2121 return 1;
2122
2123 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 2124 return 0;
bc0c38d1
SR
2125
2126 /*
2127 * It's not the end of the world if we don't get
2128 * the lock, but we also don't want to spin
2129 * nor do we want to disable interrupts,
2130 * so if we miss here, then better luck next time.
2131 */
0199c4e6 2132 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 2133 return 0;
bc0c38d1 2134
939c7a4f 2135 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 2136 if (idx == NO_CMDLINE_MAP) {
939c7a4f 2137 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 2138
a635cf04
CE
2139 /*
2140 * Check whether the cmdline buffer at idx has a pid
2141 * mapped. We are going to overwrite that entry so we
2142 * need to clear the map_pid_to_cmdline. Otherwise we
2143 * would read the new comm for the old pid.
2144 */
939c7a4f 2145 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 2146 if (pid != NO_CMDLINE_MAP)
939c7a4f 2147 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 2148
939c7a4f
YY
2149 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2150 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 2151
939c7a4f 2152 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
2153 }
2154
939c7a4f 2155 set_cmdline(idx, tsk->comm);
bc0c38d1 2156
0199c4e6 2157 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
2158
2159 return 1;
bc0c38d1
SR
2160}
2161
4c27e756 2162static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 2163{
bc0c38d1
SR
2164 unsigned map;
2165
4ca53085
SR
2166 if (!pid) {
2167 strcpy(comm, "<idle>");
2168 return;
2169 }
bc0c38d1 2170
74bf4076
SR
2171 if (WARN_ON_ONCE(pid < 0)) {
2172 strcpy(comm, "<XXX>");
2173 return;
2174 }
2175
4ca53085
SR
2176 if (pid > PID_MAX_DEFAULT) {
2177 strcpy(comm, "<...>");
2178 return;
2179 }
bc0c38d1 2180
939c7a4f 2181 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 2182 if (map != NO_CMDLINE_MAP)
e09e2867 2183 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
2184 else
2185 strcpy(comm, "<...>");
4c27e756
SRRH
2186}
2187
2188void trace_find_cmdline(int pid, char comm[])
2189{
2190 preempt_disable();
2191 arch_spin_lock(&trace_cmdline_lock);
2192
2193 __trace_find_cmdline(pid, comm);
bc0c38d1 2194
0199c4e6 2195 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 2196 preempt_enable();
bc0c38d1
SR
2197}
2198
d914ba37
JF
2199int trace_find_tgid(int pid)
2200{
2201 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2202 return 0;
2203
2204 return tgid_map[pid];
2205}
2206
2207static int trace_save_tgid(struct task_struct *tsk)
2208{
bd45d34d
JF
2209 /* treat recording of idle task as a success */
2210 if (!tsk->pid)
2211 return 1;
2212
2213 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
d914ba37
JF
2214 return 0;
2215
2216 tgid_map[tsk->pid] = tsk->tgid;
2217 return 1;
2218}
2219
2220static bool tracing_record_taskinfo_skip(int flags)
2221{
2222 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2223 return true;
2224 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2225 return true;
2226 if (!__this_cpu_read(trace_taskinfo_save))
2227 return true;
2228 return false;
2229}
2230
2231/**
2232 * tracing_record_taskinfo - record the task info of a task
2233 *
2234 * @task - task to record
2235 * @flags - TRACE_RECORD_CMDLINE for recording comm
2236 * - TRACE_RECORD_TGID for recording tgid
2237 */
2238void tracing_record_taskinfo(struct task_struct *task, int flags)
2239{
29b1a8ad
JF
2240 bool done;
2241
d914ba37
JF
2242 if (tracing_record_taskinfo_skip(flags))
2243 return;
29b1a8ad
JF
2244
2245 /*
2246 * Record as much task information as possible. If some fail, continue
2247 * to try to record the others.
2248 */
2249 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2250 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2251
2252 /* If recording any information failed, retry again soon. */
2253 if (!done)
d914ba37
JF
2254 return;
2255
2256 __this_cpu_write(trace_taskinfo_save, false);
2257}
2258
2259/**
2260 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2261 *
2262 * @prev - previous task during sched_switch
2263 * @next - next task during sched_switch
2264 * @flags - TRACE_RECORD_CMDLINE for recording comm
2265 * TRACE_RECORD_TGID for recording tgid
2266 */
2267void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2268 struct task_struct *next, int flags)
bc0c38d1 2269{
29b1a8ad
JF
2270 bool done;
2271
d914ba37
JF
2272 if (tracing_record_taskinfo_skip(flags))
2273 return;
2274
29b1a8ad
JF
2275 /*
2276 * Record as much task information as possible. If some fail, continue
2277 * to try to record the others.
2278 */
2279 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2280 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2281 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2282 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
bc0c38d1 2283
29b1a8ad
JF
2284 /* If recording any information failed, retry again soon. */
2285 if (!done)
7ffbd48d
SR
2286 return;
2287
d914ba37
JF
2288 __this_cpu_write(trace_taskinfo_save, false);
2289}
2290
2291/* Helpers to record a specific task information */
2292void tracing_record_cmdline(struct task_struct *task)
2293{
2294 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2295}
2296
2297void tracing_record_tgid(struct task_struct *task)
2298{
2299 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
bc0c38d1
SR
2300}
2301
af0009fc
SRV
2302/*
2303 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2304 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2305 * simplifies those functions and keeps them in sync.
2306 */
2307enum print_line_t trace_handle_return(struct trace_seq *s)
2308{
2309 return trace_seq_has_overflowed(s) ?
2310 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2311}
2312EXPORT_SYMBOL_GPL(trace_handle_return);
2313
45dcd8b8 2314void
38697053
SR
2315tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2316 int pc)
bc0c38d1
SR
2317{
2318 struct task_struct *tsk = current;
bc0c38d1 2319
777e208d
SR
2320 entry->preempt_count = pc & 0xff;
2321 entry->pid = (tsk) ? tsk->pid : 0;
2322 entry->flags =
9244489a 2323#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2324 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2325#else
2326 TRACE_FLAG_IRQS_NOSUPPORT |
2327#endif
7e6867bf 2328 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2329 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2330 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2331 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2332 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2333}
f413cdb8 2334EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2335
e77405ad
SR
2336struct ring_buffer_event *
2337trace_buffer_lock_reserve(struct ring_buffer *buffer,
2338 int type,
2339 unsigned long len,
2340 unsigned long flags, int pc)
51a763dd 2341{
3e9a8aad 2342 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2343}
2344
2345DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2346DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2347static int trace_buffered_event_ref;
2348
2349/**
2350 * trace_buffered_event_enable - enable buffering events
2351 *
2352 * When events are being filtered, it is quicker to use a temporary
2353 * buffer to write the event data into if there's a likely chance
2354 * that it will not be committed. The discard of the ring buffer
2355 * is not as fast as committing, and is much slower than copying
2356 * a commit.
2357 *
2358 * When an event is to be filtered, allocate per cpu buffers to
2359 * write the event data into, and if the event is filtered and discarded
2360 * it is simply dropped, otherwise, the entire data is to be committed
2361 * in one shot.
2362 */
2363void trace_buffered_event_enable(void)
2364{
2365 struct ring_buffer_event *event;
2366 struct page *page;
2367 int cpu;
51a763dd 2368
0fc1b09f
SRRH
2369 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2370
2371 if (trace_buffered_event_ref++)
2372 return;
2373
2374 for_each_tracing_cpu(cpu) {
2375 page = alloc_pages_node(cpu_to_node(cpu),
2376 GFP_KERNEL | __GFP_NORETRY, 0);
2377 if (!page)
2378 goto failed;
2379
2380 event = page_address(page);
2381 memset(event, 0, sizeof(*event));
2382
2383 per_cpu(trace_buffered_event, cpu) = event;
2384
2385 preempt_disable();
2386 if (cpu == smp_processor_id() &&
2387 this_cpu_read(trace_buffered_event) !=
2388 per_cpu(trace_buffered_event, cpu))
2389 WARN_ON_ONCE(1);
2390 preempt_enable();
51a763dd
ACM
2391 }
2392
0fc1b09f
SRRH
2393 return;
2394 failed:
2395 trace_buffered_event_disable();
2396}
2397
2398static void enable_trace_buffered_event(void *data)
2399{
2400 /* Probably not needed, but do it anyway */
2401 smp_rmb();
2402 this_cpu_dec(trace_buffered_event_cnt);
2403}
2404
2405static void disable_trace_buffered_event(void *data)
2406{
2407 this_cpu_inc(trace_buffered_event_cnt);
2408}
2409
2410/**
2411 * trace_buffered_event_disable - disable buffering events
2412 *
2413 * When a filter is removed, it is faster to not use the buffered
2414 * events, and to commit directly into the ring buffer. Free up
2415 * the temp buffers when there are no more users. This requires
2416 * special synchronization with current events.
2417 */
2418void trace_buffered_event_disable(void)
2419{
2420 int cpu;
2421
2422 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2423
2424 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2425 return;
2426
2427 if (--trace_buffered_event_ref)
2428 return;
2429
2430 preempt_disable();
2431 /* For each CPU, set the buffer as used. */
2432 smp_call_function_many(tracing_buffer_mask,
2433 disable_trace_buffered_event, NULL, 1);
2434 preempt_enable();
2435
2436 /* Wait for all current users to finish */
74401729 2437 synchronize_rcu();
0fc1b09f
SRRH
2438
2439 for_each_tracing_cpu(cpu) {
2440 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2441 per_cpu(trace_buffered_event, cpu) = NULL;
2442 }
2443 /*
2444 * Make sure trace_buffered_event is NULL before clearing
2445 * trace_buffered_event_cnt.
2446 */
2447 smp_wmb();
2448
2449 preempt_disable();
2450 /* Do the work on each cpu */
2451 smp_call_function_many(tracing_buffer_mask,
2452 enable_trace_buffered_event, NULL, 1);
2453 preempt_enable();
51a763dd 2454}
51a763dd 2455
2c4a33ab
SRRH
2456static struct ring_buffer *temp_buffer;
2457
ccb469a1
SR
2458struct ring_buffer_event *
2459trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2460 struct trace_event_file *trace_file,
ccb469a1
SR
2461 int type, unsigned long len,
2462 unsigned long flags, int pc)
2463{
2c4a33ab 2464 struct ring_buffer_event *entry;
0fc1b09f 2465 int val;
2c4a33ab 2466
7f1d2f82 2467 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f 2468
00b41452 2469 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
0fc1b09f
SRRH
2470 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2471 (entry = this_cpu_read(trace_buffered_event))) {
2472 /* Try to use the per cpu buffer first */
2473 val = this_cpu_inc_return(trace_buffered_event_cnt);
2474 if (val == 1) {
2475 trace_event_setup(entry, type, flags, pc);
2476 entry->array[0] = len;
2477 return entry;
2478 }
2479 this_cpu_dec(trace_buffered_event_cnt);
2480 }
2481
3e9a8aad
SRRH
2482 entry = __trace_buffer_lock_reserve(*current_rb,
2483 type, len, flags, pc);
2c4a33ab
SRRH
2484 /*
2485 * If tracing is off, but we have triggers enabled
2486 * we still need to look at the event data. Use the temp_buffer
2487 * to store the trace event for the tigger to use. It's recusive
2488 * safe and will not be recorded anywhere.
2489 */
5d6ad960 2490 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2491 *current_rb = temp_buffer;
3e9a8aad
SRRH
2492 entry = __trace_buffer_lock_reserve(*current_rb,
2493 type, len, flags, pc);
2c4a33ab
SRRH
2494 }
2495 return entry;
ccb469a1
SR
2496}
2497EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2498
42391745
SRRH
2499static DEFINE_SPINLOCK(tracepoint_iter_lock);
2500static DEFINE_MUTEX(tracepoint_printk_mutex);
2501
2502static void output_printk(struct trace_event_buffer *fbuffer)
2503{
2504 struct trace_event_call *event_call;
2505 struct trace_event *event;
2506 unsigned long flags;
2507 struct trace_iterator *iter = tracepoint_print_iter;
2508
2509 /* We should never get here if iter is NULL */
2510 if (WARN_ON_ONCE(!iter))
2511 return;
2512
2513 event_call = fbuffer->trace_file->event_call;
2514 if (!event_call || !event_call->event.funcs ||
2515 !event_call->event.funcs->trace)
2516 return;
2517
2518 event = &fbuffer->trace_file->event_call->event;
2519
2520 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2521 trace_seq_init(&iter->seq);
2522 iter->ent = fbuffer->entry;
2523 event_call->event.funcs->trace(iter, 0, event);
2524 trace_seq_putc(&iter->seq, 0);
2525 printk("%s", iter->seq.buffer);
2526
2527 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2528}
2529
2530int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2531 void __user *buffer, size_t *lenp,
2532 loff_t *ppos)
2533{
2534 int save_tracepoint_printk;
2535 int ret;
2536
2537 mutex_lock(&tracepoint_printk_mutex);
2538 save_tracepoint_printk = tracepoint_printk;
2539
2540 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2541
2542 /*
2543 * This will force exiting early, as tracepoint_printk
2544 * is always zero when tracepoint_printk_iter is not allocated
2545 */
2546 if (!tracepoint_print_iter)
2547 tracepoint_printk = 0;
2548
2549 if (save_tracepoint_printk == tracepoint_printk)
2550 goto out;
2551
2552 if (tracepoint_printk)
2553 static_key_enable(&tracepoint_printk_key.key);
2554 else
2555 static_key_disable(&tracepoint_printk_key.key);
2556
2557 out:
2558 mutex_unlock(&tracepoint_printk_mutex);
2559
2560 return ret;
2561}
2562
2563void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2564{
2565 if (static_key_false(&tracepoint_printk_key.key))
2566 output_printk(fbuffer);
2567
2568 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2569 fbuffer->event, fbuffer->entry,
2570 fbuffer->flags, fbuffer->pc);
2571}
2572EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2573
2ee5b92a
SRV
2574/*
2575 * Skip 3:
2576 *
2577 * trace_buffer_unlock_commit_regs()
2578 * trace_event_buffer_commit()
2579 * trace_event_raw_event_xxx()
13cf912b 2580 */
2ee5b92a
SRV
2581# define STACK_SKIP 3
2582
b7f0c959
SRRH
2583void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2584 struct ring_buffer *buffer,
0d5c6e1c
SR
2585 struct ring_buffer_event *event,
2586 unsigned long flags, int pc,
2587 struct pt_regs *regs)
1fd8df2c 2588{
7ffbd48d 2589 __buffer_unlock_commit(buffer, event);
1fd8df2c 2590
be54f69c 2591 /*
2ee5b92a 2592 * If regs is not set, then skip the necessary functions.
be54f69c
SRRH
2593 * Note, we can still get here via blktrace, wakeup tracer
2594 * and mmiotrace, but that's ok if they lose a function or
2ee5b92a 2595 * two. They are not that meaningful.
be54f69c 2596 */
2ee5b92a 2597 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
1fd8df2c
MH
2598 ftrace_trace_userstack(buffer, flags, pc);
2599}
1fd8df2c 2600
52ffabe3
SRRH
2601/*
2602 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2603 */
2604void
2605trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2606 struct ring_buffer_event *event)
2607{
2608 __buffer_unlock_commit(buffer, event);
2609}
2610
478409dd
CZ
2611static void
2612trace_process_export(struct trace_export *export,
2613 struct ring_buffer_event *event)
2614{
2615 struct trace_entry *entry;
2616 unsigned int size = 0;
2617
2618 entry = ring_buffer_event_data(event);
2619 size = ring_buffer_event_length(event);
a773d419 2620 export->write(export, entry, size);
478409dd
CZ
2621}
2622
2623static DEFINE_MUTEX(ftrace_export_lock);
2624
2625static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2626
2627static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2628
2629static inline void ftrace_exports_enable(void)
2630{
2631 static_branch_enable(&ftrace_exports_enabled);
2632}
2633
2634static inline void ftrace_exports_disable(void)
2635{
2636 static_branch_disable(&ftrace_exports_enabled);
2637}
2638
1cce377d 2639static void ftrace_exports(struct ring_buffer_event *event)
478409dd
CZ
2640{
2641 struct trace_export *export;
2642
2643 preempt_disable_notrace();
2644
2645 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2646 while (export) {
2647 trace_process_export(export, event);
2648 export = rcu_dereference_raw_notrace(export->next);
2649 }
2650
2651 preempt_enable_notrace();
2652}
2653
2654static inline void
2655add_trace_export(struct trace_export **list, struct trace_export *export)
2656{
2657 rcu_assign_pointer(export->next, *list);
2658 /*
2659 * We are entering export into the list but another
2660 * CPU might be walking that list. We need to make sure
2661 * the export->next pointer is valid before another CPU sees
2662 * the export pointer included into the list.
2663 */
2664 rcu_assign_pointer(*list, export);
2665}
2666
2667static inline int
2668rm_trace_export(struct trace_export **list, struct trace_export *export)
2669{
2670 struct trace_export **p;
2671
2672 for (p = list; *p != NULL; p = &(*p)->next)
2673 if (*p == export)
2674 break;
2675
2676 if (*p != export)
2677 return -1;
2678
2679 rcu_assign_pointer(*p, (*p)->next);
2680
2681 return 0;
2682}
2683
2684static inline void
2685add_ftrace_export(struct trace_export **list, struct trace_export *export)
2686{
2687 if (*list == NULL)
2688 ftrace_exports_enable();
2689
2690 add_trace_export(list, export);
2691}
2692
2693static inline int
2694rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2695{
2696 int ret;
2697
2698 ret = rm_trace_export(list, export);
2699 if (*list == NULL)
2700 ftrace_exports_disable();
2701
2702 return ret;
2703}
2704
2705int register_ftrace_export(struct trace_export *export)
2706{
2707 if (WARN_ON_ONCE(!export->write))
2708 return -1;
2709
2710 mutex_lock(&ftrace_export_lock);
2711
2712 add_ftrace_export(&ftrace_exports_list, export);
2713
2714 mutex_unlock(&ftrace_export_lock);
2715
2716 return 0;
2717}
2718EXPORT_SYMBOL_GPL(register_ftrace_export);
2719
2720int unregister_ftrace_export(struct trace_export *export)
2721{
2722 int ret;
2723
2724 mutex_lock(&ftrace_export_lock);
2725
2726 ret = rm_ftrace_export(&ftrace_exports_list, export);
2727
2728 mutex_unlock(&ftrace_export_lock);
2729
2730 return ret;
2731}
2732EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2733
e309b41d 2734void
7be42151 2735trace_function(struct trace_array *tr,
38697053
SR
2736 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2737 int pc)
bc0c38d1 2738{
2425bcb9 2739 struct trace_event_call *call = &event_function;
12883efb 2740 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2741 struct ring_buffer_event *event;
777e208d 2742 struct ftrace_entry *entry;
bc0c38d1 2743
3e9a8aad
SRRH
2744 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2745 flags, pc);
3928a8a2
SR
2746 if (!event)
2747 return;
2748 entry = ring_buffer_event_data(event);
777e208d
SR
2749 entry->ip = ip;
2750 entry->parent_ip = parent_ip;
e1112b4d 2751
478409dd
CZ
2752 if (!call_filter_check_discard(call, entry, buffer, event)) {
2753 if (static_branch_unlikely(&ftrace_exports_enabled))
2754 ftrace_exports(event);
7ffbd48d 2755 __buffer_unlock_commit(buffer, event);
478409dd 2756 }
bc0c38d1
SR
2757}
2758
c0a0d0d3 2759#ifdef CONFIG_STACKTRACE
4a9bd3f1 2760
2a820bf7
TG
2761/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2762#define FTRACE_KSTACK_NESTING 4
2763
2764#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2765
4a9bd3f1 2766struct ftrace_stack {
2a820bf7 2767 unsigned long calls[FTRACE_KSTACK_ENTRIES];
4a9bd3f1
SR
2768};
2769
2a820bf7
TG
2770
2771struct ftrace_stacks {
2772 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
4a9bd3f1
SR
2773};
2774
2a820bf7 2775static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
4a9bd3f1
SR
2776static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2777
e77405ad 2778static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2779 unsigned long flags,
1fd8df2c 2780 int skip, int pc, struct pt_regs *regs)
86387f7e 2781{
2425bcb9 2782 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2783 struct ring_buffer_event *event;
ee6dd0db 2784 unsigned int size, nr_entries;
2a820bf7 2785 struct ftrace_stack *fstack;
777e208d 2786 struct stack_entry *entry;
2a820bf7 2787 int stackidx;
4a9bd3f1 2788
be54f69c 2789 /*
2ee5b92a 2790 * Add one, for this function and the call to save_stack_trace()
be54f69c
SRRH
2791 * If regs is set, then these functions will not be in the way.
2792 */
2ee5b92a 2793#ifndef CONFIG_UNWINDER_ORC
be54f69c 2794 if (!regs)
ee6dd0db 2795 skip++;
2ee5b92a 2796#endif
be54f69c 2797
4a9bd3f1
SR
2798 /*
2799 * Since events can happen in NMIs there's no safe way to
2800 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2801 * or NMI comes in, it will just have to use the default
2802 * FTRACE_STACK_SIZE.
2803 */
2804 preempt_disable_notrace();
2805
2a820bf7
TG
2806 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2807
2808 /* This should never happen. If it does, yell once and skip */
2809 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2810 goto out;
2811
4a9bd3f1 2812 /*
2a820bf7
TG
2813 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2814 * interrupt will either see the value pre increment or post
2815 * increment. If the interrupt happens pre increment it will have
2816 * restored the counter when it returns. We just need a barrier to
2817 * keep gcc from moving things around.
4a9bd3f1
SR
2818 */
2819 barrier();
4a9bd3f1 2820
2a820bf7 2821 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
ee6dd0db 2822 size = ARRAY_SIZE(fstack->calls);
4a9bd3f1 2823
ee6dd0db
TG
2824 if (regs) {
2825 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2826 size, skip);
2827 } else {
2828 nr_entries = stack_trace_save(fstack->calls, size, skip);
2829 }
86387f7e 2830
ee6dd0db 2831 size = nr_entries * sizeof(unsigned long);
3e9a8aad
SRRH
2832 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2833 sizeof(*entry) + size, flags, pc);
3928a8a2 2834 if (!event)
4a9bd3f1
SR
2835 goto out;
2836 entry = ring_buffer_event_data(event);
86387f7e 2837
ee6dd0db
TG
2838 memcpy(&entry->caller, fstack->calls, size);
2839 entry->size = nr_entries;
86387f7e 2840
f306cc82 2841 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2842 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2843
2844 out:
2845 /* Again, don't let gcc optimize things here */
2846 barrier();
82146529 2847 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2848 preempt_enable_notrace();
2849
f0a920d5
IM
2850}
2851
2d34f489
SRRH
2852static inline void ftrace_trace_stack(struct trace_array *tr,
2853 struct ring_buffer *buffer,
73dddbb5
SRRH
2854 unsigned long flags,
2855 int skip, int pc, struct pt_regs *regs)
53614991 2856{
2d34f489 2857 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2858 return;
2859
73dddbb5 2860 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2861}
2862
c0a0d0d3
FW
2863void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2864 int pc)
38697053 2865{
a33d7d94
SRV
2866 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2867
2868 if (rcu_is_watching()) {
2869 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2870 return;
2871 }
2872
2873 /*
2874 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2875 * but if the above rcu_is_watching() failed, then the NMI
2876 * triggered someplace critical, and rcu_irq_enter() should
2877 * not be called from NMI.
2878 */
2879 if (unlikely(in_nmi()))
2880 return;
2881
a33d7d94
SRV
2882 rcu_irq_enter_irqson();
2883 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2884 rcu_irq_exit_irqson();
38697053
SR
2885}
2886
03889384
SR
2887/**
2888 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2889 * @skip: Number of functions to skip (helper handlers)
03889384 2890 */
c142be8e 2891void trace_dump_stack(int skip)
03889384
SR
2892{
2893 unsigned long flags;
2894
2895 if (tracing_disabled || tracing_selftest_running)
e36c5458 2896 return;
03889384
SR
2897
2898 local_save_flags(flags);
2899
2ee5b92a
SRV
2900#ifndef CONFIG_UNWINDER_ORC
2901 /* Skip 1 to skip this function. */
2902 skip++;
2903#endif
c142be8e
SRRH
2904 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2905 flags, skip, preempt_count(), NULL);
03889384 2906}
da387e5c 2907EXPORT_SYMBOL_GPL(trace_dump_stack);
03889384 2908
c438f140 2909#ifdef CONFIG_USER_STACKTRACE_SUPPORT
91e86e56
SR
2910static DEFINE_PER_CPU(int, user_stack_count);
2911
c438f140 2912static void
e77405ad 2913ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2914{
2425bcb9 2915 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2916 struct ring_buffer_event *event;
02b67518 2917 struct userstack_entry *entry;
02b67518 2918
983f938a 2919 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2920 return;
2921
b6345879
SR
2922 /*
2923 * NMIs can not handle page faults, even with fix ups.
2924 * The save user stack can (and often does) fault.
2925 */
2926 if (unlikely(in_nmi()))
2927 return;
02b67518 2928
91e86e56
SR
2929 /*
2930 * prevent recursion, since the user stack tracing may
2931 * trigger other kernel events.
2932 */
2933 preempt_disable();
2934 if (__this_cpu_read(user_stack_count))
2935 goto out;
2936
2937 __this_cpu_inc(user_stack_count);
2938
3e9a8aad
SRRH
2939 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2940 sizeof(*entry), flags, pc);
02b67518 2941 if (!event)
1dbd1951 2942 goto out_drop_count;
02b67518 2943 entry = ring_buffer_event_data(event);
02b67518 2944
48659d31 2945 entry->tgid = current->tgid;
02b67518
TE
2946 memset(&entry->caller, 0, sizeof(entry->caller));
2947
ee6dd0db 2948 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
f306cc82 2949 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2950 __buffer_unlock_commit(buffer, event);
91e86e56 2951
1dbd1951 2952 out_drop_count:
91e86e56 2953 __this_cpu_dec(user_stack_count);
91e86e56
SR
2954 out:
2955 preempt_enable();
02b67518 2956}
c438f140
TG
2957#else /* CONFIG_USER_STACKTRACE_SUPPORT */
2958static void ftrace_trace_userstack(struct ring_buffer *buffer,
2959 unsigned long flags, int pc)
02b67518 2960{
02b67518 2961}
c438f140 2962#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
02b67518 2963
c0a0d0d3
FW
2964#endif /* CONFIG_STACKTRACE */
2965
07d777fe
SR
2966/* created for use with alloc_percpu */
2967struct trace_buffer_struct {
e2ace001
AL
2968 int nesting;
2969 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2970};
2971
2972static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2973
2974/*
e2ace001
AL
2975 * Thise allows for lockless recording. If we're nested too deeply, then
2976 * this returns NULL.
07d777fe
SR
2977 */
2978static char *get_trace_buf(void)
2979{
e2ace001 2980 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2981
e2ace001 2982 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2983 return NULL;
2984
3d9622c1
SRV
2985 buffer->nesting++;
2986
2987 /* Interrupts must see nesting incremented before we use the buffer */
2988 barrier();
2989 return &buffer->buffer[buffer->nesting][0];
e2ace001
AL
2990}
2991
2992static void put_trace_buf(void)
2993{
3d9622c1
SRV
2994 /* Don't let the decrement of nesting leak before this */
2995 barrier();
e2ace001 2996 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2997}
2998
2999static int alloc_percpu_trace_buffer(void)
3000{
3001 struct trace_buffer_struct *buffers;
07d777fe
SR
3002
3003 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
3004 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3005 return -ENOMEM;
07d777fe
SR
3006
3007 trace_percpu_buffer = buffers;
07d777fe 3008 return 0;
07d777fe
SR
3009}
3010
81698831
SR
3011static int buffers_allocated;
3012
07d777fe
SR
3013void trace_printk_init_buffers(void)
3014{
07d777fe
SR
3015 if (buffers_allocated)
3016 return;
3017
3018 if (alloc_percpu_trace_buffer())
3019 return;
3020
2184db46
SR
3021 /* trace_printk() is for debug use only. Don't use it in production. */
3022
a395d6a7
JP
3023 pr_warn("\n");
3024 pr_warn("**********************************************************\n");
3025 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3026 pr_warn("** **\n");
3027 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3028 pr_warn("** **\n");
3029 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3030 pr_warn("** unsafe for production use. **\n");
3031 pr_warn("** **\n");
3032 pr_warn("** If you see this message and you are not debugging **\n");
3033 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3034 pr_warn("** **\n");
3035 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3036 pr_warn("**********************************************************\n");
07d777fe 3037
b382ede6
SR
3038 /* Expand the buffers to set size */
3039 tracing_update_buffers();
3040
07d777fe 3041 buffers_allocated = 1;
81698831
SR
3042
3043 /*
3044 * trace_printk_init_buffers() can be called by modules.
3045 * If that happens, then we need to start cmdline recording
3046 * directly here. If the global_trace.buffer is already
3047 * allocated here, then this was called by module code.
3048 */
12883efb 3049 if (global_trace.trace_buffer.buffer)
81698831
SR
3050 tracing_start_cmdline_record();
3051}
f45d1225 3052EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
81698831
SR
3053
3054void trace_printk_start_comm(void)
3055{
3056 /* Start tracing comms if trace printk is set */
3057 if (!buffers_allocated)
3058 return;
3059 tracing_start_cmdline_record();
3060}
3061
3062static void trace_printk_start_stop_comm(int enabled)
3063{
3064 if (!buffers_allocated)
3065 return;
3066
3067 if (enabled)
3068 tracing_start_cmdline_record();
3069 else
3070 tracing_stop_cmdline_record();
07d777fe
SR
3071}
3072
769b0441 3073/**
48ead020 3074 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
3075 *
3076 */
40ce74f1 3077int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 3078{
2425bcb9 3079 struct trace_event_call *call = &event_bprint;
769b0441 3080 struct ring_buffer_event *event;
e77405ad 3081 struct ring_buffer *buffer;
769b0441 3082 struct trace_array *tr = &global_trace;
48ead020 3083 struct bprint_entry *entry;
769b0441 3084 unsigned long flags;
07d777fe
SR
3085 char *tbuffer;
3086 int len = 0, size, pc;
769b0441
FW
3087
3088 if (unlikely(tracing_selftest_running || tracing_disabled))
3089 return 0;
3090
3091 /* Don't pollute graph traces with trace_vprintk internals */
3092 pause_graph_tracing();
3093
3094 pc = preempt_count();
5168ae50 3095 preempt_disable_notrace();
769b0441 3096
07d777fe
SR
3097 tbuffer = get_trace_buf();
3098 if (!tbuffer) {
3099 len = 0;
e2ace001 3100 goto out_nobuffer;
07d777fe 3101 }
769b0441 3102
07d777fe 3103 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 3104
07d777fe
SR
3105 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3106 goto out;
769b0441 3107
07d777fe 3108 local_save_flags(flags);
769b0441 3109 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 3110 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
3111 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3112 flags, pc);
769b0441 3113 if (!event)
07d777fe 3114 goto out;
769b0441
FW
3115 entry = ring_buffer_event_data(event);
3116 entry->ip = ip;
769b0441
FW
3117 entry->fmt = fmt;
3118
07d777fe 3119 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 3120 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 3121 __buffer_unlock_commit(buffer, event);
2d34f489 3122 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 3123 }
769b0441 3124
769b0441 3125out:
e2ace001
AL
3126 put_trace_buf();
3127
3128out_nobuffer:
5168ae50 3129 preempt_enable_notrace();
769b0441
FW
3130 unpause_graph_tracing();
3131
3132 return len;
3133}
48ead020
FW
3134EXPORT_SYMBOL_GPL(trace_vbprintk);
3135
26b68dd2 3136__printf(3, 0)
12883efb
SRRH
3137static int
3138__trace_array_vprintk(struct ring_buffer *buffer,
3139 unsigned long ip, const char *fmt, va_list args)
48ead020 3140{
2425bcb9 3141 struct trace_event_call *call = &event_print;
48ead020 3142 struct ring_buffer_event *event;
07d777fe 3143 int len = 0, size, pc;
48ead020 3144 struct print_entry *entry;
07d777fe
SR
3145 unsigned long flags;
3146 char *tbuffer;
48ead020
FW
3147
3148 if (tracing_disabled || tracing_selftest_running)
3149 return 0;
3150
07d777fe
SR
3151 /* Don't pollute graph traces with trace_vprintk internals */
3152 pause_graph_tracing();
3153
48ead020
FW
3154 pc = preempt_count();
3155 preempt_disable_notrace();
48ead020 3156
07d777fe
SR
3157
3158 tbuffer = get_trace_buf();
3159 if (!tbuffer) {
3160 len = 0;
e2ace001 3161 goto out_nobuffer;
07d777fe 3162 }
48ead020 3163
3558a5ac 3164 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 3165
07d777fe 3166 local_save_flags(flags);
48ead020 3167 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
3168 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3169 flags, pc);
48ead020 3170 if (!event)
07d777fe 3171 goto out;
48ead020 3172 entry = ring_buffer_event_data(event);
c13d2f7c 3173 entry->ip = ip;
48ead020 3174
3558a5ac 3175 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 3176 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 3177 __buffer_unlock_commit(buffer, event);
2d34f489 3178 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 3179 }
e2ace001
AL
3180
3181out:
3182 put_trace_buf();
3183
3184out_nobuffer:
48ead020 3185 preempt_enable_notrace();
07d777fe 3186 unpause_graph_tracing();
48ead020
FW
3187
3188 return len;
3189}
659372d3 3190
26b68dd2 3191__printf(3, 0)
12883efb
SRRH
3192int trace_array_vprintk(struct trace_array *tr,
3193 unsigned long ip, const char *fmt, va_list args)
3194{
3195 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3196}
3197
26b68dd2 3198__printf(3, 0)
12883efb
SRRH
3199int trace_array_printk(struct trace_array *tr,
3200 unsigned long ip, const char *fmt, ...)
3201{
3202 int ret;
3203 va_list ap;
3204
983f938a 3205 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3206 return 0;
3207
3208 va_start(ap, fmt);
3209 ret = trace_array_vprintk(tr, ip, fmt, ap);
3210 va_end(ap);
3211 return ret;
3212}
f45d1225 3213EXPORT_SYMBOL_GPL(trace_array_printk);
12883efb 3214
26b68dd2 3215__printf(3, 4)
12883efb
SRRH
3216int trace_array_printk_buf(struct ring_buffer *buffer,
3217 unsigned long ip, const char *fmt, ...)
3218{
3219 int ret;
3220 va_list ap;
3221
983f938a 3222 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3223 return 0;
3224
3225 va_start(ap, fmt);
3226 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3227 va_end(ap);
3228 return ret;
3229}
3230
26b68dd2 3231__printf(2, 0)
659372d3
SR
3232int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3233{
a813a159 3234 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 3235}
769b0441
FW
3236EXPORT_SYMBOL_GPL(trace_vprintk);
3237
e2ac8ef5 3238static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 3239{
6d158a81
SR
3240 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3241
5a90f577 3242 iter->idx++;
6d158a81
SR
3243 if (buf_iter)
3244 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
3245}
3246
e309b41d 3247static struct trace_entry *
bc21b478
SR
3248peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3249 unsigned long *lost_events)
dd0e545f 3250{
3928a8a2 3251 struct ring_buffer_event *event;
6d158a81 3252 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 3253
d769041f
SR
3254 if (buf_iter)
3255 event = ring_buffer_iter_peek(buf_iter, ts);
3256 else
12883efb 3257 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 3258 lost_events);
d769041f 3259
4a9bd3f1
SR
3260 if (event) {
3261 iter->ent_size = ring_buffer_event_length(event);
3262 return ring_buffer_event_data(event);
3263 }
3264 iter->ent_size = 0;
3265 return NULL;
dd0e545f 3266}
d769041f 3267
dd0e545f 3268static struct trace_entry *
bc21b478
SR
3269__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3270 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 3271{
12883efb 3272 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 3273 struct trace_entry *ent, *next = NULL;
aa27497c 3274 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 3275 int cpu_file = iter->cpu_file;
3928a8a2 3276 u64 next_ts = 0, ts;
bc0c38d1 3277 int next_cpu = -1;
12b5da34 3278 int next_size = 0;
bc0c38d1
SR
3279 int cpu;
3280
b04cc6b1
FW
3281 /*
3282 * If we are in a per_cpu trace file, don't bother by iterating over
3283 * all cpu and peek directly.
3284 */
ae3b5093 3285 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
3286 if (ring_buffer_empty_cpu(buffer, cpu_file))
3287 return NULL;
bc21b478 3288 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
3289 if (ent_cpu)
3290 *ent_cpu = cpu_file;
3291
3292 return ent;
3293 }
3294
ab46428c 3295 for_each_tracing_cpu(cpu) {
dd0e545f 3296
3928a8a2
SR
3297 if (ring_buffer_empty_cpu(buffer, cpu))
3298 continue;
dd0e545f 3299
bc21b478 3300 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3301
cdd31cd2
IM
3302 /*
3303 * Pick the entry with the smallest timestamp:
3304 */
3928a8a2 3305 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3306 next = ent;
3307 next_cpu = cpu;
3928a8a2 3308 next_ts = ts;
bc21b478 3309 next_lost = lost_events;
12b5da34 3310 next_size = iter->ent_size;
bc0c38d1
SR
3311 }
3312 }
3313
12b5da34
SR
3314 iter->ent_size = next_size;
3315
bc0c38d1
SR
3316 if (ent_cpu)
3317 *ent_cpu = next_cpu;
3318
3928a8a2
SR
3319 if (ent_ts)
3320 *ent_ts = next_ts;
3321
bc21b478
SR
3322 if (missing_events)
3323 *missing_events = next_lost;
3324
bc0c38d1
SR
3325 return next;
3326}
3327
dd0e545f 3328/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3329struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3330 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3331{
bc21b478 3332 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3333}
3334
3335/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3336void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3337{
bc21b478
SR
3338 iter->ent = __find_next_entry(iter, &iter->cpu,
3339 &iter->lost_events, &iter->ts);
dd0e545f 3340
3928a8a2 3341 if (iter->ent)
e2ac8ef5 3342 trace_iterator_increment(iter);
dd0e545f 3343
3928a8a2 3344 return iter->ent ? iter : NULL;
b3806b43 3345}
bc0c38d1 3346
e309b41d 3347static void trace_consume(struct trace_iterator *iter)
b3806b43 3348{
12883efb 3349 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3350 &iter->lost_events);
bc0c38d1
SR
3351}
3352
e309b41d 3353static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3354{
3355 struct trace_iterator *iter = m->private;
bc0c38d1 3356 int i = (int)*pos;
4e3c3333 3357 void *ent;
bc0c38d1 3358
a63ce5b3
SR
3359 WARN_ON_ONCE(iter->leftover);
3360
bc0c38d1
SR
3361 (*pos)++;
3362
3363 /* can't go backwards */
3364 if (iter->idx > i)
3365 return NULL;
3366
3367 if (iter->idx < 0)
955b61e5 3368 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3369 else
3370 ent = iter;
3371
3372 while (ent && iter->idx < i)
955b61e5 3373 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3374
3375 iter->pos = *pos;
3376
bc0c38d1
SR
3377 return ent;
3378}
3379
955b61e5 3380void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3381{
2f26ebd5
SR
3382 struct ring_buffer_event *event;
3383 struct ring_buffer_iter *buf_iter;
3384 unsigned long entries = 0;
3385 u64 ts;
3386
12883efb 3387 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3388
6d158a81
SR
3389 buf_iter = trace_buffer_iter(iter, cpu);
3390 if (!buf_iter)
2f26ebd5
SR
3391 return;
3392
2f26ebd5
SR
3393 ring_buffer_iter_reset(buf_iter);
3394
3395 /*
3396 * We could have the case with the max latency tracers
3397 * that a reset never took place on a cpu. This is evident
3398 * by the timestamp being before the start of the buffer.
3399 */
3400 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3401 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3402 break;
3403 entries++;
3404 ring_buffer_read(buf_iter, NULL);
3405 }
3406
12883efb 3407 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3408}
3409
d7350c3f 3410/*
d7350c3f
FW
3411 * The current tracer is copied to avoid a global locking
3412 * all around.
3413 */
bc0c38d1
SR
3414static void *s_start(struct seq_file *m, loff_t *pos)
3415{
3416 struct trace_iterator *iter = m->private;
2b6080f2 3417 struct trace_array *tr = iter->tr;
b04cc6b1 3418 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3419 void *p = NULL;
3420 loff_t l = 0;
3928a8a2 3421 int cpu;
bc0c38d1 3422
2fd196ec
HT
3423 /*
3424 * copy the tracer to avoid using a global lock all around.
3425 * iter->trace is a copy of current_trace, the pointer to the
3426 * name may be used instead of a strcmp(), as iter->trace->name
3427 * will point to the same string as current_trace->name.
3428 */
bc0c38d1 3429 mutex_lock(&trace_types_lock);
2b6080f2
SR
3430 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3431 *iter->trace = *tr->current_trace;
d7350c3f 3432 mutex_unlock(&trace_types_lock);
bc0c38d1 3433
12883efb 3434#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3435 if (iter->snapshot && iter->trace->use_max_tr)
3436 return ERR_PTR(-EBUSY);
12883efb 3437#endif
debdd57f
HT
3438
3439 if (!iter->snapshot)
d914ba37 3440 atomic_inc(&trace_record_taskinfo_disabled);
bc0c38d1 3441
bc0c38d1
SR
3442 if (*pos != iter->pos) {
3443 iter->ent = NULL;
3444 iter->cpu = 0;
3445 iter->idx = -1;
3446
ae3b5093 3447 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3448 for_each_tracing_cpu(cpu)
2f26ebd5 3449 tracing_iter_reset(iter, cpu);
b04cc6b1 3450 } else
2f26ebd5 3451 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3452
ac91d854 3453 iter->leftover = 0;
bc0c38d1
SR
3454 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3455 ;
3456
3457 } else {
a63ce5b3
SR
3458 /*
3459 * If we overflowed the seq_file before, then we want
3460 * to just reuse the trace_seq buffer again.
3461 */
3462 if (iter->leftover)
3463 p = iter;
3464 else {
3465 l = *pos - 1;
3466 p = s_next(m, p, &l);
3467 }
bc0c38d1
SR
3468 }
3469
4f535968 3470 trace_event_read_lock();
7e53bd42 3471 trace_access_lock(cpu_file);
bc0c38d1
SR
3472 return p;
3473}
3474
3475static void s_stop(struct seq_file *m, void *p)
3476{
7e53bd42
LJ
3477 struct trace_iterator *iter = m->private;
3478
12883efb 3479#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3480 if (iter->snapshot && iter->trace->use_max_tr)
3481 return;
12883efb 3482#endif
debdd57f
HT
3483
3484 if (!iter->snapshot)
d914ba37 3485 atomic_dec(&trace_record_taskinfo_disabled);
12883efb 3486
7e53bd42 3487 trace_access_unlock(iter->cpu_file);
4f535968 3488 trace_event_read_unlock();
bc0c38d1
SR
3489}
3490
ecffc8a8
DA
3491static void
3492get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3493 unsigned long *entries, int cpu)
3494{
3495 unsigned long count;
3496
3497 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3498 /*
3499 * If this buffer has skipped entries, then we hold all
3500 * entries for the trace and we need to ignore the
3501 * ones before the time stamp.
3502 */
3503 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3504 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3505 /* total is the same as the entries */
3506 *total = count;
3507 } else
3508 *total = count +
3509 ring_buffer_overrun_cpu(buf->buffer, cpu);
3510 *entries = count;
3511}
3512
39eaf7ef 3513static void
12883efb
SRRH
3514get_total_entries(struct trace_buffer *buf,
3515 unsigned long *total, unsigned long *entries)
39eaf7ef 3516{
ecffc8a8 3517 unsigned long t, e;
39eaf7ef
SR
3518 int cpu;
3519
3520 *total = 0;
3521 *entries = 0;
3522
3523 for_each_tracing_cpu(cpu) {
ecffc8a8
DA
3524 get_total_entries_cpu(buf, &t, &e, cpu);
3525 *total += t;
3526 *entries += e;
39eaf7ef
SR
3527 }
3528}
3529
ecffc8a8
DA
3530unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3531{
3532 unsigned long total, entries;
3533
3534 if (!tr)
3535 tr = &global_trace;
3536
3537 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3538
3539 return entries;
3540}
3541
3542unsigned long trace_total_entries(struct trace_array *tr)
3543{
3544 unsigned long total, entries;
3545
3546 if (!tr)
3547 tr = &global_trace;
3548
3549 get_total_entries(&tr->trace_buffer, &total, &entries);
3550
3551 return entries;
3552}
3553
e309b41d 3554static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3555{
d79ac28f
RV
3556 seq_puts(m, "# _------=> CPU# \n"
3557 "# / _-----=> irqs-off \n"
3558 "# | / _----=> need-resched \n"
3559 "# || / _---=> hardirq/softirq \n"
3560 "# ||| / _--=> preempt-depth \n"
3561 "# |||| / delay \n"
3562 "# cmd pid ||||| time | caller \n"
3563 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3564}
3565
12883efb 3566static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3567{
39eaf7ef
SR
3568 unsigned long total;
3569 unsigned long entries;
3570
12883efb 3571 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3572 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3573 entries, total, num_online_cpus());
3574 seq_puts(m, "#\n");
3575}
3576
441dae8f
JF
3577static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3578 unsigned int flags)
39eaf7ef 3579{
441dae8f
JF
3580 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3581
12883efb 3582 print_event_info(buf, m);
441dae8f 3583
f8494fa3
JFG
3584 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3585 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
bc0c38d1
SR
3586}
3587
441dae8f
JF
3588static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3589 unsigned int flags)
77271ce4 3590{
441dae8f 3591 bool tgid = flags & TRACE_ITER_RECORD_TGID;
0f5e5a3a
RV
3592 const char *space = " ";
3593 int prec = tgid ? 10 : 2;
b11fb737 3594
9e738215
QP
3595 print_event_info(buf, m);
3596
0f5e5a3a
RV
3597 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3598 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3599 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3600 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3601 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3602 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3603 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
77271ce4 3604}
bc0c38d1 3605
62b915f1 3606void
bc0c38d1
SR
3607print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3608{
983f938a 3609 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3610 struct trace_buffer *buf = iter->trace_buffer;
3611 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3612 struct tracer *type = iter->trace;
39eaf7ef
SR
3613 unsigned long entries;
3614 unsigned long total;
bc0c38d1
SR
3615 const char *name = "preemption";
3616
d840f718 3617 name = type->name;
bc0c38d1 3618
12883efb 3619 get_total_entries(buf, &total, &entries);
bc0c38d1 3620
888b55dc 3621 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3622 name, UTS_RELEASE);
888b55dc 3623 seq_puts(m, "# -----------------------------------"
bc0c38d1 3624 "---------------------------------\n");
888b55dc 3625 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3626 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3627 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3628 entries,
4c11d7ae 3629 total,
12883efb 3630 buf->cpu,
bc0c38d1
SR
3631#if defined(CONFIG_PREEMPT_NONE)
3632 "server",
3633#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3634 "desktop",
b5c21b45 3635#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3636 "preempt",
3637#else
3638 "unknown",
3639#endif
3640 /* These are reserved for later use */
3641 0, 0, 0, 0);
3642#ifdef CONFIG_SMP
3643 seq_printf(m, " #P:%d)\n", num_online_cpus());
3644#else
3645 seq_puts(m, ")\n");
3646#endif
888b55dc
KM
3647 seq_puts(m, "# -----------------\n");
3648 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3649 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3650 data->comm, data->pid,
3651 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3652 data->policy, data->rt_priority);
888b55dc 3653 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3654
3655 if (data->critical_start) {
888b55dc 3656 seq_puts(m, "# => started at: ");
214023c3
SR
3657 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3658 trace_print_seq(m, &iter->seq);
888b55dc 3659 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3660 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3661 trace_print_seq(m, &iter->seq);
8248ac05 3662 seq_puts(m, "\n#\n");
bc0c38d1
SR
3663 }
3664
888b55dc 3665 seq_puts(m, "#\n");
bc0c38d1
SR
3666}
3667
a309720c
SR
3668static void test_cpu_buff_start(struct trace_iterator *iter)
3669{
3670 struct trace_seq *s = &iter->seq;
983f938a 3671 struct trace_array *tr = iter->tr;
a309720c 3672
983f938a 3673 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3674 return;
3675
3676 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3677 return;
3678
4dbbe2d8
MK
3679 if (cpumask_available(iter->started) &&
3680 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3681 return;
3682
12883efb 3683 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3684 return;
3685
4dbbe2d8 3686 if (cpumask_available(iter->started))
919cd979 3687 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3688
3689 /* Don't print started cpu buffer for the first entry of the trace */
3690 if (iter->idx > 1)
3691 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3692 iter->cpu);
a309720c
SR
3693}
3694
2c4f035f 3695static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3696{
983f938a 3697 struct trace_array *tr = iter->tr;
214023c3 3698 struct trace_seq *s = &iter->seq;
983f938a 3699 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3700 struct trace_entry *entry;
f633cef0 3701 struct trace_event *event;
bc0c38d1 3702
4e3c3333 3703 entry = iter->ent;
dd0e545f 3704
a309720c
SR
3705 test_cpu_buff_start(iter);
3706
c4a8e8be 3707 event = ftrace_find_event(entry->type);
bc0c38d1 3708
983f938a 3709 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3710 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3711 trace_print_lat_context(iter);
3712 else
3713 trace_print_context(iter);
c4a8e8be 3714 }
bc0c38d1 3715
19a7fe20
SRRH
3716 if (trace_seq_has_overflowed(s))
3717 return TRACE_TYPE_PARTIAL_LINE;
3718
268ccda0 3719 if (event)
a9a57763 3720 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3721
19a7fe20 3722 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3723
19a7fe20 3724 return trace_handle_return(s);
bc0c38d1
SR
3725}
3726
2c4f035f 3727static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3728{
983f938a 3729 struct trace_array *tr = iter->tr;
f9896bf3
IM
3730 struct trace_seq *s = &iter->seq;
3731 struct trace_entry *entry;
f633cef0 3732 struct trace_event *event;
f9896bf3
IM
3733
3734 entry = iter->ent;
dd0e545f 3735
983f938a 3736 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3737 trace_seq_printf(s, "%d %d %llu ",
3738 entry->pid, iter->cpu, iter->ts);
3739
3740 if (trace_seq_has_overflowed(s))
3741 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3742
f633cef0 3743 event = ftrace_find_event(entry->type);
268ccda0 3744 if (event)
a9a57763 3745 return event->funcs->raw(iter, 0, event);
d9793bd8 3746
19a7fe20 3747 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3748
19a7fe20 3749 return trace_handle_return(s);
f9896bf3
IM
3750}
3751
2c4f035f 3752static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3753{
983f938a 3754 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3755 struct trace_seq *s = &iter->seq;
3756 unsigned char newline = '\n';
3757 struct trace_entry *entry;
f633cef0 3758 struct trace_event *event;
5e3ca0ec
IM
3759
3760 entry = iter->ent;
dd0e545f 3761
983f938a 3762 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3763 SEQ_PUT_HEX_FIELD(s, entry->pid);
3764 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3765 SEQ_PUT_HEX_FIELD(s, iter->ts);
3766 if (trace_seq_has_overflowed(s))
3767 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3768 }
5e3ca0ec 3769
f633cef0 3770 event = ftrace_find_event(entry->type);
268ccda0 3771 if (event) {
a9a57763 3772 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3773 if (ret != TRACE_TYPE_HANDLED)
3774 return ret;
3775 }
7104f300 3776
19a7fe20 3777 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3778
19a7fe20 3779 return trace_handle_return(s);
5e3ca0ec
IM
3780}
3781
2c4f035f 3782static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3783{
983f938a 3784 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3785 struct trace_seq *s = &iter->seq;
3786 struct trace_entry *entry;
f633cef0 3787 struct trace_event *event;
cb0f12aa
IM
3788
3789 entry = iter->ent;
dd0e545f 3790
983f938a 3791 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3792 SEQ_PUT_FIELD(s, entry->pid);
3793 SEQ_PUT_FIELD(s, iter->cpu);
3794 SEQ_PUT_FIELD(s, iter->ts);
3795 if (trace_seq_has_overflowed(s))
3796 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3797 }
cb0f12aa 3798
f633cef0 3799 event = ftrace_find_event(entry->type);
a9a57763
SR
3800 return event ? event->funcs->binary(iter, 0, event) :
3801 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3802}
3803
62b915f1 3804int trace_empty(struct trace_iterator *iter)
bc0c38d1 3805{
6d158a81 3806 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3807 int cpu;
3808
9aba60fe 3809 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3810 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3811 cpu = iter->cpu_file;
6d158a81
SR
3812 buf_iter = trace_buffer_iter(iter, cpu);
3813 if (buf_iter) {
3814 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3815 return 0;
3816 } else {
12883efb 3817 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3818 return 0;
3819 }
3820 return 1;
3821 }
3822
ab46428c 3823 for_each_tracing_cpu(cpu) {
6d158a81
SR
3824 buf_iter = trace_buffer_iter(iter, cpu);
3825 if (buf_iter) {
3826 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3827 return 0;
3828 } else {
12883efb 3829 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3830 return 0;
3831 }
bc0c38d1 3832 }
d769041f 3833
797d3712 3834 return 1;
bc0c38d1
SR
3835}
3836
4f535968 3837/* Called with trace_event_read_lock() held. */
955b61e5 3838enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3839{
983f938a
SRRH
3840 struct trace_array *tr = iter->tr;
3841 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3842 enum print_line_t ret;
3843
19a7fe20
SRRH
3844 if (iter->lost_events) {
3845 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3846 iter->cpu, iter->lost_events);
3847 if (trace_seq_has_overflowed(&iter->seq))
3848 return TRACE_TYPE_PARTIAL_LINE;
3849 }
bc21b478 3850
2c4f035f
FW
3851 if (iter->trace && iter->trace->print_line) {
3852 ret = iter->trace->print_line(iter);
3853 if (ret != TRACE_TYPE_UNHANDLED)
3854 return ret;
3855 }
72829bc3 3856
09ae7234
SRRH
3857 if (iter->ent->type == TRACE_BPUTS &&
3858 trace_flags & TRACE_ITER_PRINTK &&
3859 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3860 return trace_print_bputs_msg_only(iter);
3861
48ead020
FW
3862 if (iter->ent->type == TRACE_BPRINT &&
3863 trace_flags & TRACE_ITER_PRINTK &&
3864 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3865 return trace_print_bprintk_msg_only(iter);
48ead020 3866
66896a85
FW
3867 if (iter->ent->type == TRACE_PRINT &&
3868 trace_flags & TRACE_ITER_PRINTK &&
3869 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3870 return trace_print_printk_msg_only(iter);
66896a85 3871
cb0f12aa
IM
3872 if (trace_flags & TRACE_ITER_BIN)
3873 return print_bin_fmt(iter);
3874
5e3ca0ec
IM
3875 if (trace_flags & TRACE_ITER_HEX)
3876 return print_hex_fmt(iter);
3877
f9896bf3
IM
3878 if (trace_flags & TRACE_ITER_RAW)
3879 return print_raw_fmt(iter);
3880
f9896bf3
IM
3881 return print_trace_fmt(iter);
3882}
3883
7e9a49ef
JO
3884void trace_latency_header(struct seq_file *m)
3885{
3886 struct trace_iterator *iter = m->private;
983f938a 3887 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3888
3889 /* print nothing if the buffers are empty */
3890 if (trace_empty(iter))
3891 return;
3892
3893 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3894 print_trace_header(m, iter);
3895
983f938a 3896 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3897 print_lat_help_header(m);
3898}
3899
62b915f1
JO
3900void trace_default_header(struct seq_file *m)
3901{
3902 struct trace_iterator *iter = m->private;
983f938a
SRRH
3903 struct trace_array *tr = iter->tr;
3904 unsigned long trace_flags = tr->trace_flags;
62b915f1 3905
f56e7f8e
JO
3906 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3907 return;
3908
62b915f1
JO
3909 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3910 /* print nothing if the buffers are empty */
3911 if (trace_empty(iter))
3912 return;
3913 print_trace_header(m, iter);
3914 if (!(trace_flags & TRACE_ITER_VERBOSE))
3915 print_lat_help_header(m);
3916 } else {
77271ce4
SR
3917 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3918 if (trace_flags & TRACE_ITER_IRQ_INFO)
441dae8f
JF
3919 print_func_help_header_irq(iter->trace_buffer,
3920 m, trace_flags);
77271ce4 3921 else
441dae8f
JF
3922 print_func_help_header(iter->trace_buffer, m,
3923 trace_flags);
77271ce4 3924 }
62b915f1
JO
3925 }
3926}
3927
e0a413f6
SR
3928static void test_ftrace_alive(struct seq_file *m)
3929{
3930 if (!ftrace_is_dead())
3931 return;
d79ac28f
RV
3932 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3933 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3934}
3935
d8741e2e 3936#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3937static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3938{
d79ac28f
RV
3939 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3940 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3941 "# Takes a snapshot of the main buffer.\n"
3942 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3943 "# (Doesn't have to be '2' works with any number that\n"
3944 "# is not a '0' or '1')\n");
d8741e2e 3945}
f1affcaa
SRRH
3946
3947static void show_snapshot_percpu_help(struct seq_file *m)
3948{
fa6f0cc7 3949 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3950#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3951 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3952 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3953#else
d79ac28f
RV
3954 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3955 "# Must use main snapshot file to allocate.\n");
f1affcaa 3956#endif
d79ac28f
RV
3957 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3958 "# (Doesn't have to be '2' works with any number that\n"
3959 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3960}
3961
d8741e2e
SRRH
3962static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3963{
45ad21ca 3964 if (iter->tr->allocated_snapshot)
fa6f0cc7 3965 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3966 else
fa6f0cc7 3967 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3968
fa6f0cc7 3969 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3970 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3971 show_snapshot_main_help(m);
3972 else
3973 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3974}
3975#else
3976/* Should never be called */
3977static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3978#endif
3979
bc0c38d1
SR
3980static int s_show(struct seq_file *m, void *v)
3981{
3982 struct trace_iterator *iter = v;
a63ce5b3 3983 int ret;
bc0c38d1
SR
3984
3985 if (iter->ent == NULL) {
3986 if (iter->tr) {
3987 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3988 seq_puts(m, "#\n");
e0a413f6 3989 test_ftrace_alive(m);
bc0c38d1 3990 }
d8741e2e
SRRH
3991 if (iter->snapshot && trace_empty(iter))
3992 print_snapshot_help(m, iter);
3993 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3994 iter->trace->print_header(m);
62b915f1
JO
3995 else
3996 trace_default_header(m);
3997
a63ce5b3
SR
3998 } else if (iter->leftover) {
3999 /*
4000 * If we filled the seq_file buffer earlier, we
4001 * want to just show it now.
4002 */
4003 ret = trace_print_seq(m, &iter->seq);
4004
4005 /* ret should this time be zero, but you never know */
4006 iter->leftover = ret;
4007
bc0c38d1 4008 } else {
f9896bf3 4009 print_trace_line(iter);
a63ce5b3
SR
4010 ret = trace_print_seq(m, &iter->seq);
4011 /*
4012 * If we overflow the seq_file buffer, then it will
4013 * ask us for this data again at start up.
4014 * Use that instead.
4015 * ret is 0 if seq_file write succeeded.
4016 * -1 otherwise.
4017 */
4018 iter->leftover = ret;
bc0c38d1
SR
4019 }
4020
4021 return 0;
4022}
4023
649e9c70
ON
4024/*
4025 * Should be used after trace_array_get(), trace_types_lock
4026 * ensures that i_cdev was already initialized.
4027 */
4028static inline int tracing_get_cpu(struct inode *inode)
4029{
4030 if (inode->i_cdev) /* See trace_create_cpu_file() */
4031 return (long)inode->i_cdev - 1;
4032 return RING_BUFFER_ALL_CPUS;
4033}
4034
88e9d34c 4035static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
4036 .start = s_start,
4037 .next = s_next,
4038 .stop = s_stop,
4039 .show = s_show,
bc0c38d1
SR
4040};
4041
e309b41d 4042static struct trace_iterator *
6484c71c 4043__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 4044{
6484c71c 4045 struct trace_array *tr = inode->i_private;
bc0c38d1 4046 struct trace_iterator *iter;
50e18b94 4047 int cpu;
bc0c38d1 4048
85a2f9b4
SR
4049 if (tracing_disabled)
4050 return ERR_PTR(-ENODEV);
60a11774 4051
50e18b94 4052 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
4053 if (!iter)
4054 return ERR_PTR(-ENOMEM);
bc0c38d1 4055
72917235 4056 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 4057 GFP_KERNEL);
93574fcc
DC
4058 if (!iter->buffer_iter)
4059 goto release;
4060
d7350c3f
FW
4061 /*
4062 * We make a copy of the current tracer to avoid concurrent
4063 * changes on it while we are reading.
4064 */
bc0c38d1 4065 mutex_lock(&trace_types_lock);
d7350c3f 4066 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 4067 if (!iter->trace)
d7350c3f 4068 goto fail;
85a2f9b4 4069
2b6080f2 4070 *iter->trace = *tr->current_trace;
d7350c3f 4071
79f55997 4072 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
4073 goto fail;
4074
12883efb
SRRH
4075 iter->tr = tr;
4076
4077#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4078 /* Currently only the top directory has a snapshot */
4079 if (tr->current_trace->print_max || snapshot)
12883efb 4080 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 4081 else
12883efb
SRRH
4082#endif
4083 iter->trace_buffer = &tr->trace_buffer;
debdd57f 4084 iter->snapshot = snapshot;
bc0c38d1 4085 iter->pos = -1;
6484c71c 4086 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4087 mutex_init(&iter->mutex);
bc0c38d1 4088
8bba1bf5
MM
4089 /* Notify the tracer early; before we stop tracing. */
4090 if (iter->trace && iter->trace->open)
a93751ca 4091 iter->trace->open(iter);
8bba1bf5 4092
12ef7d44 4093 /* Annotate start of buffers if we had overruns */
12883efb 4094 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
4095 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4096
8be0709f 4097 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4098 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4099 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4100
debdd57f
HT
4101 /* stop the trace while dumping if we are not opening "snapshot" */
4102 if (!iter->snapshot)
2b6080f2 4103 tracing_stop_tr(tr);
2f26ebd5 4104
ae3b5093 4105 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 4106 for_each_tracing_cpu(cpu) {
b04cc6b1 4107 iter->buffer_iter[cpu] =
31b265b3
DA
4108 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4109 cpu, GFP_KERNEL);
72c9ddfd
DM
4110 }
4111 ring_buffer_read_prepare_sync();
4112 for_each_tracing_cpu(cpu) {
4113 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 4114 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
4115 }
4116 } else {
4117 cpu = iter->cpu_file;
3928a8a2 4118 iter->buffer_iter[cpu] =
31b265b3
DA
4119 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4120 cpu, GFP_KERNEL);
72c9ddfd
DM
4121 ring_buffer_read_prepare_sync();
4122 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 4123 tracing_iter_reset(iter, cpu);
3928a8a2
SR
4124 }
4125
bc0c38d1
SR
4126 mutex_unlock(&trace_types_lock);
4127
bc0c38d1 4128 return iter;
3928a8a2 4129
d7350c3f 4130 fail:
3928a8a2 4131 mutex_unlock(&trace_types_lock);
d7350c3f 4132 kfree(iter->trace);
6d158a81 4133 kfree(iter->buffer_iter);
93574fcc 4134release:
50e18b94
JO
4135 seq_release_private(inode, file);
4136 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
4137}
4138
4139int tracing_open_generic(struct inode *inode, struct file *filp)
4140{
60a11774
SR
4141 if (tracing_disabled)
4142 return -ENODEV;
4143
bc0c38d1
SR
4144 filp->private_data = inode->i_private;
4145 return 0;
4146}
4147
2e86421d
GB
4148bool tracing_is_disabled(void)
4149{
4150 return (tracing_disabled) ? true: false;
4151}
4152
7b85af63
SRRH
4153/*
4154 * Open and update trace_array ref count.
4155 * Must have the current trace_array passed to it.
4156 */
dcc30223 4157static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
4158{
4159 struct trace_array *tr = inode->i_private;
4160
4161 if (tracing_disabled)
4162 return -ENODEV;
4163
4164 if (trace_array_get(tr) < 0)
4165 return -ENODEV;
4166
4167 filp->private_data = inode->i_private;
4168
4169 return 0;
7b85af63
SRRH
4170}
4171
4fd27358 4172static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 4173{
6484c71c 4174 struct trace_array *tr = inode->i_private;
907f2784 4175 struct seq_file *m = file->private_data;
4acd4d00 4176 struct trace_iterator *iter;
3928a8a2 4177 int cpu;
bc0c38d1 4178
ff451961 4179 if (!(file->f_mode & FMODE_READ)) {
6484c71c 4180 trace_array_put(tr);
4acd4d00 4181 return 0;
ff451961 4182 }
4acd4d00 4183
6484c71c 4184 /* Writes do not use seq_file */
4acd4d00 4185 iter = m->private;
bc0c38d1 4186 mutex_lock(&trace_types_lock);
a695cb58 4187
3928a8a2
SR
4188 for_each_tracing_cpu(cpu) {
4189 if (iter->buffer_iter[cpu])
4190 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4191 }
4192
bc0c38d1
SR
4193 if (iter->trace && iter->trace->close)
4194 iter->trace->close(iter);
4195
debdd57f
HT
4196 if (!iter->snapshot)
4197 /* reenable tracing if it was previously enabled */
2b6080f2 4198 tracing_start_tr(tr);
f77d09a3
AL
4199
4200 __trace_array_put(tr);
4201
bc0c38d1
SR
4202 mutex_unlock(&trace_types_lock);
4203
d7350c3f 4204 mutex_destroy(&iter->mutex);
b0dfa978 4205 free_cpumask_var(iter->started);
d7350c3f 4206 kfree(iter->trace);
6d158a81 4207 kfree(iter->buffer_iter);
50e18b94 4208 seq_release_private(inode, file);
ff451961 4209
bc0c38d1
SR
4210 return 0;
4211}
4212
7b85af63
SRRH
4213static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4214{
4215 struct trace_array *tr = inode->i_private;
4216
4217 trace_array_put(tr);
bc0c38d1
SR
4218 return 0;
4219}
4220
7b85af63
SRRH
4221static int tracing_single_release_tr(struct inode *inode, struct file *file)
4222{
4223 struct trace_array *tr = inode->i_private;
4224
4225 trace_array_put(tr);
4226
4227 return single_release(inode, file);
4228}
4229
bc0c38d1
SR
4230static int tracing_open(struct inode *inode, struct file *file)
4231{
6484c71c 4232 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
4233 struct trace_iterator *iter;
4234 int ret = 0;
bc0c38d1 4235
ff451961
SRRH
4236 if (trace_array_get(tr) < 0)
4237 return -ENODEV;
4238
4acd4d00 4239 /* If this file was open for write, then erase contents */
6484c71c
ON
4240 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4241 int cpu = tracing_get_cpu(inode);
8dd33bcb
BY
4242 struct trace_buffer *trace_buf = &tr->trace_buffer;
4243
4244#ifdef CONFIG_TRACER_MAX_TRACE
4245 if (tr->current_trace->print_max)
4246 trace_buf = &tr->max_buffer;
4247#endif
6484c71c
ON
4248
4249 if (cpu == RING_BUFFER_ALL_CPUS)
8dd33bcb 4250 tracing_reset_online_cpus(trace_buf);
4acd4d00 4251 else
8dd33bcb 4252 tracing_reset(trace_buf, cpu);
4acd4d00 4253 }
bc0c38d1 4254
4acd4d00 4255 if (file->f_mode & FMODE_READ) {
6484c71c 4256 iter = __tracing_open(inode, file, false);
4acd4d00
SR
4257 if (IS_ERR(iter))
4258 ret = PTR_ERR(iter);
983f938a 4259 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
4260 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4261 }
ff451961
SRRH
4262
4263 if (ret < 0)
4264 trace_array_put(tr);
4265
bc0c38d1
SR
4266 return ret;
4267}
4268
607e2ea1
SRRH
4269/*
4270 * Some tracers are not suitable for instance buffers.
4271 * A tracer is always available for the global array (toplevel)
4272 * or if it explicitly states that it is.
4273 */
4274static bool
4275trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4276{
4277 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4278}
4279
4280/* Find the next tracer that this trace array may use */
4281static struct tracer *
4282get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4283{
4284 while (t && !trace_ok_for_array(t, tr))
4285 t = t->next;
4286
4287 return t;
4288}
4289
e309b41d 4290static void *
bc0c38d1
SR
4291t_next(struct seq_file *m, void *v, loff_t *pos)
4292{
607e2ea1 4293 struct trace_array *tr = m->private;
f129e965 4294 struct tracer *t = v;
bc0c38d1
SR
4295
4296 (*pos)++;
4297
4298 if (t)
607e2ea1 4299 t = get_tracer_for_array(tr, t->next);
bc0c38d1 4300
bc0c38d1
SR
4301 return t;
4302}
4303
4304static void *t_start(struct seq_file *m, loff_t *pos)
4305{
607e2ea1 4306 struct trace_array *tr = m->private;
f129e965 4307 struct tracer *t;
bc0c38d1
SR
4308 loff_t l = 0;
4309
4310 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
4311
4312 t = get_tracer_for_array(tr, trace_types);
4313 for (; t && l < *pos; t = t_next(m, t, &l))
4314 ;
bc0c38d1
SR
4315
4316 return t;
4317}
4318
4319static void t_stop(struct seq_file *m, void *p)
4320{
4321 mutex_unlock(&trace_types_lock);
4322}
4323
4324static int t_show(struct seq_file *m, void *v)
4325{
4326 struct tracer *t = v;
4327
4328 if (!t)
4329 return 0;
4330
fa6f0cc7 4331 seq_puts(m, t->name);
bc0c38d1
SR
4332 if (t->next)
4333 seq_putc(m, ' ');
4334 else
4335 seq_putc(m, '\n');
4336
4337 return 0;
4338}
4339
88e9d34c 4340static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
4341 .start = t_start,
4342 .next = t_next,
4343 .stop = t_stop,
4344 .show = t_show,
bc0c38d1
SR
4345};
4346
4347static int show_traces_open(struct inode *inode, struct file *file)
4348{
607e2ea1
SRRH
4349 struct trace_array *tr = inode->i_private;
4350 struct seq_file *m;
4351 int ret;
4352
60a11774
SR
4353 if (tracing_disabled)
4354 return -ENODEV;
4355
607e2ea1
SRRH
4356 ret = seq_open(file, &show_traces_seq_ops);
4357 if (ret)
4358 return ret;
4359
4360 m = file->private_data;
4361 m->private = tr;
4362
4363 return 0;
bc0c38d1
SR
4364}
4365
4acd4d00
SR
4366static ssize_t
4367tracing_write_stub(struct file *filp, const char __user *ubuf,
4368 size_t count, loff_t *ppos)
4369{
4370 return count;
4371}
4372
098c879e 4373loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4374{
098c879e
SRRH
4375 int ret;
4376
364829b1 4377 if (file->f_mode & FMODE_READ)
098c879e 4378 ret = seq_lseek(file, offset, whence);
364829b1 4379 else
098c879e
SRRH
4380 file->f_pos = ret = 0;
4381
4382 return ret;
364829b1
SP
4383}
4384
5e2336a0 4385static const struct file_operations tracing_fops = {
4bf39a94
IM
4386 .open = tracing_open,
4387 .read = seq_read,
4acd4d00 4388 .write = tracing_write_stub,
098c879e 4389 .llseek = tracing_lseek,
4bf39a94 4390 .release = tracing_release,
bc0c38d1
SR
4391};
4392
5e2336a0 4393static const struct file_operations show_traces_fops = {
c7078de1
IM
4394 .open = show_traces_open,
4395 .read = seq_read,
4396 .release = seq_release,
b444786f 4397 .llseek = seq_lseek,
c7078de1
IM
4398};
4399
4400static ssize_t
4401tracing_cpumask_read(struct file *filp, char __user *ubuf,
4402 size_t count, loff_t *ppos)
4403{
ccfe9e42 4404 struct trace_array *tr = file_inode(filp)->i_private;
90e406f9 4405 char *mask_str;
36dfe925 4406 int len;
c7078de1 4407
90e406f9
CD
4408 len = snprintf(NULL, 0, "%*pb\n",
4409 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4410 mask_str = kmalloc(len, GFP_KERNEL);
4411 if (!mask_str)
4412 return -ENOMEM;
36dfe925 4413
90e406f9 4414 len = snprintf(mask_str, len, "%*pb\n",
1a40243b
TH
4415 cpumask_pr_args(tr->tracing_cpumask));
4416 if (len >= count) {
36dfe925
IM
4417 count = -EINVAL;
4418 goto out_err;
4419 }
90e406f9 4420 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
36dfe925
IM
4421
4422out_err:
90e406f9 4423 kfree(mask_str);
c7078de1
IM
4424
4425 return count;
4426}
4427
4428static ssize_t
4429tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4430 size_t count, loff_t *ppos)
4431{
ccfe9e42 4432 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4433 cpumask_var_t tracing_cpumask_new;
2b6080f2 4434 int err, cpu;
9e01c1b7
RR
4435
4436 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4437 return -ENOMEM;
c7078de1 4438
9e01c1b7 4439 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4440 if (err)
36dfe925
IM
4441 goto err_unlock;
4442
a5e25883 4443 local_irq_disable();
0b9b12c1 4444 arch_spin_lock(&tr->max_lock);
ab46428c 4445 for_each_tracing_cpu(cpu) {
36dfe925
IM
4446 /*
4447 * Increase/decrease the disabled counter if we are
4448 * about to flip a bit in the cpumask:
4449 */
ccfe9e42 4450 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4451 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4452 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4453 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4454 }
ccfe9e42 4455 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4456 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4457 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4458 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4459 }
4460 }
0b9b12c1 4461 arch_spin_unlock(&tr->max_lock);
a5e25883 4462 local_irq_enable();
36dfe925 4463
ccfe9e42 4464 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
9e01c1b7 4465 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4466
4467 return count;
36dfe925
IM
4468
4469err_unlock:
215368e8 4470 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4471
4472 return err;
c7078de1
IM
4473}
4474
5e2336a0 4475static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4476 .open = tracing_open_generic_tr,
c7078de1
IM
4477 .read = tracing_cpumask_read,
4478 .write = tracing_cpumask_write,
ccfe9e42 4479 .release = tracing_release_generic_tr,
b444786f 4480 .llseek = generic_file_llseek,
bc0c38d1
SR
4481};
4482
fdb372ed 4483static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4484{
d8e83d26 4485 struct tracer_opt *trace_opts;
2b6080f2 4486 struct trace_array *tr = m->private;
d8e83d26 4487 u32 tracer_flags;
d8e83d26 4488 int i;
adf9f195 4489
d8e83d26 4490 mutex_lock(&trace_types_lock);
2b6080f2
SR
4491 tracer_flags = tr->current_trace->flags->val;
4492 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4493
bc0c38d1 4494 for (i = 0; trace_options[i]; i++) {
983f938a 4495 if (tr->trace_flags & (1 << i))
fdb372ed 4496 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4497 else
fdb372ed 4498 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4499 }
4500
adf9f195
FW
4501 for (i = 0; trace_opts[i].name; i++) {
4502 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4503 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4504 else
fdb372ed 4505 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4506 }
d8e83d26 4507 mutex_unlock(&trace_types_lock);
adf9f195 4508
fdb372ed 4509 return 0;
bc0c38d1 4510}
bc0c38d1 4511
8c1a49ae 4512static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4513 struct tracer_flags *tracer_flags,
4514 struct tracer_opt *opts, int neg)
4515{
d39cdd20 4516 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4517 int ret;
bc0c38d1 4518
8c1a49ae 4519 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4520 if (ret)
4521 return ret;
4522
4523 if (neg)
4524 tracer_flags->val &= ~opts->bit;
4525 else
4526 tracer_flags->val |= opts->bit;
4527 return 0;
bc0c38d1
SR
4528}
4529
adf9f195 4530/* Try to assign a tracer specific option */
8c1a49ae 4531static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4532{
8c1a49ae 4533 struct tracer *trace = tr->current_trace;
7770841e 4534 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4535 struct tracer_opt *opts = NULL;
8d18eaaf 4536 int i;
adf9f195 4537
7770841e
Z
4538 for (i = 0; tracer_flags->opts[i].name; i++) {
4539 opts = &tracer_flags->opts[i];
adf9f195 4540
8d18eaaf 4541 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4542 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4543 }
adf9f195 4544
8d18eaaf 4545 return -EINVAL;
adf9f195
FW
4546}
4547
613f04a0
SRRH
4548/* Some tracers require overwrite to stay enabled */
4549int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4550{
4551 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4552 return -1;
4553
4554 return 0;
4555}
4556
2b6080f2 4557int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4558{
4559 /* do nothing if flag is already set */
983f938a 4560 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4561 return 0;
4562
4563 /* Give the tracer a chance to approve the change */
2b6080f2 4564 if (tr->current_trace->flag_changed)
bf6065b5 4565 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4566 return -EINVAL;
af4617bd
SR
4567
4568 if (enabled)
983f938a 4569 tr->trace_flags |= mask;
af4617bd 4570 else
983f938a 4571 tr->trace_flags &= ~mask;
e870e9a1
LZ
4572
4573 if (mask == TRACE_ITER_RECORD_CMD)
4574 trace_event_enable_cmd_record(enabled);
750912fa 4575
d914ba37
JF
4576 if (mask == TRACE_ITER_RECORD_TGID) {
4577 if (!tgid_map)
6396bb22
KC
4578 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4579 sizeof(*tgid_map),
d914ba37
JF
4580 GFP_KERNEL);
4581 if (!tgid_map) {
4582 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4583 return -ENOMEM;
4584 }
4585
4586 trace_event_enable_tgid_record(enabled);
4587 }
4588
c37775d5
SR
4589 if (mask == TRACE_ITER_EVENT_FORK)
4590 trace_event_follow_fork(tr, enabled);
4591
1e10486f
NK
4592 if (mask == TRACE_ITER_FUNC_FORK)
4593 ftrace_pid_follow_fork(tr, enabled);
4594
80902822 4595 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4596 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4597#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4598 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4599#endif
4600 }
81698831 4601
b9f9108c 4602 if (mask == TRACE_ITER_PRINTK) {
81698831 4603 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4604 trace_printk_control(enabled);
4605 }
613f04a0
SRRH
4606
4607 return 0;
af4617bd
SR
4608}
4609
2b6080f2 4610static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4611{
8d18eaaf 4612 char *cmp;
bc0c38d1 4613 int neg = 0;
591a033d 4614 int ret;
a4d1e688 4615 size_t orig_len = strlen(option);
3d739c1f 4616 int len;
bc0c38d1 4617
7bcfaf54 4618 cmp = strstrip(option);
bc0c38d1 4619
3d739c1f
SRV
4620 len = str_has_prefix(cmp, "no");
4621 if (len)
bc0c38d1 4622 neg = 1;
3d739c1f
SRV
4623
4624 cmp += len;
bc0c38d1 4625
69d34da2
SRRH
4626 mutex_lock(&trace_types_lock);
4627
591a033d 4628 ret = match_string(trace_options, -1, cmp);
adf9f195 4629 /* If no option could be set, test the specific tracer options */
591a033d 4630 if (ret < 0)
8c1a49ae 4631 ret = set_tracer_option(tr, cmp, neg);
591a033d
YX
4632 else
4633 ret = set_tracer_flag(tr, 1 << ret, !neg);
69d34da2
SRRH
4634
4635 mutex_unlock(&trace_types_lock);
bc0c38d1 4636
a4d1e688
JW
4637 /*
4638 * If the first trailing whitespace is replaced with '\0' by strstrip,
4639 * turn it back into a space.
4640 */
4641 if (orig_len > strlen(option))
4642 option[strlen(option)] = ' ';
4643
7bcfaf54
SR
4644 return ret;
4645}
4646
a4d1e688
JW
4647static void __init apply_trace_boot_options(void)
4648{
4649 char *buf = trace_boot_options_buf;
4650 char *option;
4651
4652 while (true) {
4653 option = strsep(&buf, ",");
4654
4655 if (!option)
4656 break;
a4d1e688 4657
43ed3843
SRRH
4658 if (*option)
4659 trace_set_options(&global_trace, option);
a4d1e688
JW
4660
4661 /* Put back the comma to allow this to be called again */
4662 if (buf)
4663 *(buf - 1) = ',';
4664 }
4665}
4666
7bcfaf54
SR
4667static ssize_t
4668tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4669 size_t cnt, loff_t *ppos)
4670{
2b6080f2
SR
4671 struct seq_file *m = filp->private_data;
4672 struct trace_array *tr = m->private;
7bcfaf54 4673 char buf[64];
613f04a0 4674 int ret;
7bcfaf54
SR
4675
4676 if (cnt >= sizeof(buf))
4677 return -EINVAL;
4678
4afe6495 4679 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4680 return -EFAULT;
4681
a8dd2176
SR
4682 buf[cnt] = 0;
4683
2b6080f2 4684 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4685 if (ret < 0)
4686 return ret;
7bcfaf54 4687
cf8517cf 4688 *ppos += cnt;
bc0c38d1
SR
4689
4690 return cnt;
4691}
4692
fdb372ed
LZ
4693static int tracing_trace_options_open(struct inode *inode, struct file *file)
4694{
7b85af63 4695 struct trace_array *tr = inode->i_private;
f77d09a3 4696 int ret;
7b85af63 4697
fdb372ed
LZ
4698 if (tracing_disabled)
4699 return -ENODEV;
2b6080f2 4700
7b85af63
SRRH
4701 if (trace_array_get(tr) < 0)
4702 return -ENODEV;
4703
f77d09a3
AL
4704 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4705 if (ret < 0)
4706 trace_array_put(tr);
4707
4708 return ret;
fdb372ed
LZ
4709}
4710
5e2336a0 4711static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4712 .open = tracing_trace_options_open,
4713 .read = seq_read,
4714 .llseek = seq_lseek,
7b85af63 4715 .release = tracing_single_release_tr,
ee6bce52 4716 .write = tracing_trace_options_write,
bc0c38d1
SR
4717};
4718
7bd2f24c
IM
4719static const char readme_msg[] =
4720 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4721 "# echo 0 > tracing_on : quick way to disable tracing\n"
4722 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4723 " Important files:\n"
4724 " trace\t\t\t- The static contents of the buffer\n"
4725 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4726 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4727 " current_tracer\t- function and latency tracers\n"
4728 " available_tracers\t- list of configured tracers for current_tracer\n"
a8d65579 4729 " error_log\t- error log for failed commands (that support it)\n"
22f45649
SRRH
4730 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4731 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4732 " trace_clock\t\t-change the clock used to order events\n"
4733 " local: Per cpu clock but may not be synced across CPUs\n"
4734 " global: Synced across CPUs but slows tracing down.\n"
4735 " counter: Not a clock, but just an increment\n"
4736 " uptime: Jiffy counter from time of boot\n"
4737 " perf: Same clock that perf events use\n"
4738#ifdef CONFIG_X86_64
4739 " x86-tsc: TSC cycle counter\n"
4740#endif
2c1ea60b
TZ
4741 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4742 " delta: Delta difference against a buffer-wide timestamp\n"
4743 " absolute: Absolute (standalone) timestamp\n"
22f45649 4744 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4745 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4746 " tracing_cpumask\t- Limit which CPUs to trace\n"
4747 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4748 "\t\t\t Remove sub-buffer with rmdir\n"
4749 " trace_options\t\t- Set format or modify how tracing happens\n"
b9416997 4750 "\t\t\t Disable an option by prefixing 'no' to the\n"
71485c45 4751 "\t\t\t option name\n"
939c7a4f 4752 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4753#ifdef CONFIG_DYNAMIC_FTRACE
4754 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4755 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4756 "\t\t\t functions\n"
60f1d5e3 4757 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4758 "\t modules: Can select a group via module\n"
4759 "\t Format: :mod:<module-name>\n"
4760 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4761 "\t triggers: a command to perform when function is hit\n"
4762 "\t Format: <function>:<trigger>[:count]\n"
4763 "\t trigger: traceon, traceoff\n"
4764 "\t\t enable_event:<system>:<event>\n"
4765 "\t\t disable_event:<system>:<event>\n"
22f45649 4766#ifdef CONFIG_STACKTRACE
71485c45 4767 "\t\t stacktrace\n"
22f45649
SRRH
4768#endif
4769#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4770 "\t\t snapshot\n"
22f45649 4771#endif
17a280ea
SRRH
4772 "\t\t dump\n"
4773 "\t\t cpudump\n"
71485c45
SRRH
4774 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4775 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4776 "\t The first one will disable tracing every time do_fault is hit\n"
4777 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4778 "\t The first time do trap is hit and it disables tracing, the\n"
4779 "\t counter will decrement to 2. If tracing is already disabled,\n"
4780 "\t the counter will not decrement. It only decrements when the\n"
4781 "\t trigger did work\n"
4782 "\t To remove trigger without count:\n"
4783 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4784 "\t To remove trigger with a count:\n"
4785 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4786 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4787 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4788 "\t modules: Can select a group via module command :mod:\n"
4789 "\t Does not accept triggers\n"
22f45649
SRRH
4790#endif /* CONFIG_DYNAMIC_FTRACE */
4791#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4792 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4793 "\t\t (function)\n"
22f45649
SRRH
4794#endif
4795#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4796 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4797 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4798 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4799#endif
4800#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4801 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4802 "\t\t\t snapshot buffer. Read the contents for more\n"
4803 "\t\t\t information\n"
22f45649 4804#endif
991821c8 4805#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4806 " stack_trace\t\t- Shows the max stack trace when active\n"
4807 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4808 "\t\t\t Write into this file to reset the max size (trigger a\n"
4809 "\t\t\t new trace)\n"
22f45649 4810#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4811 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4812 "\t\t\t traces\n"
22f45649 4813#endif
991821c8 4814#endif /* CONFIG_STACK_TRACER */
5448d44c
MH
4815#ifdef CONFIG_DYNAMIC_EVENTS
4816 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4817 "\t\t\t Write into this file to define/undefine new trace events.\n"
4818#endif
6b0b7551 4819#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4820 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4821 "\t\t\t Write into this file to define/undefine new trace events.\n"
4822#endif
6b0b7551 4823#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4824 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4825 "\t\t\t Write into this file to define/undefine new trace events.\n"
4826#endif
6b0b7551 4827#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625 4828 "\t accepts: event-definitions (one definition per line)\n"
c3ca46ef
MH
4829 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4830 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
7bbab38d
MH
4831#ifdef CONFIG_HIST_TRIGGERS
4832 "\t s:[synthetic/]<event> <field> [<field>]\n"
4833#endif
86425625 4834 "\t -:[<group>/]<event>\n"
6b0b7551 4835#ifdef CONFIG_KPROBE_EVENTS
86425625 4836 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4837 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4838#endif
6b0b7551 4839#ifdef CONFIG_UPROBE_EVENTS
1cc33161 4840 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
86425625
MH
4841#endif
4842 "\t args: <name>=fetcharg[:type]\n"
4843 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
a1303af5
MH
4844#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4845 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4846#else
86425625 4847 "\t $stack<index>, $stack, $retval, $comm\n"
a1303af5 4848#endif
60c2e0ce 4849 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
40b53b77
MH
4850 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4851 "\t <type>\\[<array-size>\\]\n"
7bbab38d
MH
4852#ifdef CONFIG_HIST_TRIGGERS
4853 "\t field: <stype> <name>;\n"
4854 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4855 "\t [unsigned] char/int/long\n"
4856#endif
86425625 4857#endif
26f25564
TZ
4858 " events/\t\t- Directory containing all trace event subsystems:\n"
4859 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4860 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4861 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4862 "\t\t\t events\n"
26f25564 4863 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4864 " events/<system>/<event>/\t- Directory containing control files for\n"
4865 "\t\t\t <event>:\n"
26f25564
TZ
4866 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4867 " filter\t\t- If set, only events passing filter are traced\n"
4868 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4869 "\t Format: <trigger>[:count][if <filter>]\n"
4870 "\t trigger: traceon, traceoff\n"
4871 "\t enable_event:<system>:<event>\n"
4872 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4873#ifdef CONFIG_HIST_TRIGGERS
4874 "\t enable_hist:<system>:<event>\n"
4875 "\t disable_hist:<system>:<event>\n"
4876#endif
26f25564 4877#ifdef CONFIG_STACKTRACE
71485c45 4878 "\t\t stacktrace\n"
26f25564
TZ
4879#endif
4880#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4881 "\t\t snapshot\n"
7ef224d1
TZ
4882#endif
4883#ifdef CONFIG_HIST_TRIGGERS
4884 "\t\t hist (see below)\n"
26f25564 4885#endif
71485c45
SRRH
4886 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4887 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4888 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4889 "\t events/block/block_unplug/trigger\n"
4890 "\t The first disables tracing every time block_unplug is hit.\n"
4891 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4892 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4893 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4894 "\t Like function triggers, the counter is only decremented if it\n"
4895 "\t enabled or disabled tracing.\n"
4896 "\t To remove a trigger without a count:\n"
4897 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4898 "\t To remove a trigger with a count:\n"
4899 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4900 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4901#ifdef CONFIG_HIST_TRIGGERS
4902 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4903 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4904 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4905 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4906 "\t [:size=#entries]\n"
e86ae9ba 4907 "\t [:pause][:continue][:clear]\n"
5463bfda 4908 "\t [:name=histname1]\n"
c3e49506 4909 "\t [:<handler>.<action>]\n"
7ef224d1
TZ
4910 "\t [if <filter>]\n\n"
4911 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4912 "\t table using the key(s) and value(s) named, and the value of a\n"
4913 "\t sum called 'hitcount' is incremented. Keys and values\n"
4914 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4915 "\t can be any field, or the special string 'stacktrace'.\n"
4916 "\t Compound keys consisting of up to two fields can be specified\n"
4917 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4918 "\t fields. Sort keys consisting of up to two fields can be\n"
4919 "\t specified using the 'sort' keyword. The sort direction can\n"
4920 "\t be modified by appending '.descending' or '.ascending' to a\n"
4921 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4922 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4923 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4924 "\t its histogram data will be shared with other triggers of the\n"
4925 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4926 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4927 "\t table in its entirety to stdout. If there are multiple hist\n"
4928 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4929 "\t trigger in the output. The table displayed for a named\n"
4930 "\t trigger will be the same as any other instance having the\n"
4931 "\t same name. The default format used to display a given field\n"
4932 "\t can be modified by appending any of the following modifiers\n"
4933 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4934 "\t .hex display a number as a hex value\n"
4935 "\t .sym display an address as a symbol\n"
6b4827ad 4936 "\t .sym-offset display an address as a symbol and offset\n"
31696198 4937 "\t .execname display a common_pid as a program name\n"
860f9f6b
TZ
4938 "\t .syscall display a syscall id as a syscall name\n"
4939 "\t .log2 display log2 value rather than raw number\n"
4940 "\t .usecs display a common_timestamp in microseconds\n\n"
83e99914
TZ
4941 "\t The 'pause' parameter can be used to pause an existing hist\n"
4942 "\t trigger or to start a hist trigger but not log any events\n"
4943 "\t until told to do so. 'continue' can be used to start or\n"
4944 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4945 "\t The 'clear' parameter will clear the contents of a running\n"
4946 "\t hist trigger and leave its current paused/active state\n"
4947 "\t unchanged.\n\n"
d0bad49b
TZ
4948 "\t The enable_hist and disable_hist triggers can be used to\n"
4949 "\t have one event conditionally start and stop another event's\n"
9e5a36a3 4950 "\t already-attached hist trigger. The syntax is analogous to\n"
c3e49506
TZ
4951 "\t the enable_event and disable_event triggers.\n\n"
4952 "\t Hist trigger handlers and actions are executed whenever a\n"
4953 "\t a histogram entry is added or updated. They take the form:\n\n"
4954 "\t <handler>.<action>\n\n"
4955 "\t The available handlers are:\n\n"
4956 "\t onmatch(matching.event) - invoke on addition or update\n"
dff81f55
TZ
4957 "\t onmax(var) - invoke if var exceeds current max\n"
4958 "\t onchange(var) - invoke action if var changes\n\n"
c3e49506 4959 "\t The available actions are:\n\n"
e91eefd7 4960 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
c3e49506 4961 "\t save(field,...) - save current event fields\n"
a3785b7e
TZ
4962#ifdef CONFIG_TRACER_SNAPSHOT
4963 "\t snapshot() - snapshot the trace buffer\n"
4964#endif
7ef224d1 4965#endif
7bd2f24c
IM
4966;
4967
4968static ssize_t
4969tracing_readme_read(struct file *filp, char __user *ubuf,
4970 size_t cnt, loff_t *ppos)
4971{
4972 return simple_read_from_buffer(ubuf, cnt, ppos,
4973 readme_msg, strlen(readme_msg));
4974}
4975
5e2336a0 4976static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4977 .open = tracing_open_generic,
4978 .read = tracing_readme_read,
b444786f 4979 .llseek = generic_file_llseek,
7bd2f24c
IM
4980};
4981
99c621d7
MS
4982static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4983{
4984 int *ptr = v;
4985
4986 if (*pos || m->count)
4987 ptr++;
4988
4989 (*pos)++;
4990
4991 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4992 if (trace_find_tgid(*ptr))
4993 return ptr;
4994 }
4995
4996 return NULL;
4997}
4998
4999static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5000{
5001 void *v;
5002 loff_t l = 0;
5003
5004 if (!tgid_map)
5005 return NULL;
5006
5007 v = &tgid_map[0];
5008 while (l <= *pos) {
5009 v = saved_tgids_next(m, v, &l);
5010 if (!v)
5011 return NULL;
5012 }
5013
5014 return v;
5015}
5016
5017static void saved_tgids_stop(struct seq_file *m, void *v)
5018{
5019}
5020
5021static int saved_tgids_show(struct seq_file *m, void *v)
5022{
5023 int pid = (int *)v - tgid_map;
5024
5025 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5026 return 0;
5027}
5028
5029static const struct seq_operations tracing_saved_tgids_seq_ops = {
5030 .start = saved_tgids_start,
5031 .stop = saved_tgids_stop,
5032 .next = saved_tgids_next,
5033 .show = saved_tgids_show,
5034};
5035
5036static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5037{
5038 if (tracing_disabled)
5039 return -ENODEV;
5040
5041 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5042}
5043
5044
5045static const struct file_operations tracing_saved_tgids_fops = {
5046 .open = tracing_saved_tgids_open,
5047 .read = seq_read,
5048 .llseek = seq_lseek,
5049 .release = seq_release,
5050};
5051
42584c81
YY
5052static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5053{
5054 unsigned int *ptr = v;
69abe6a5 5055
42584c81
YY
5056 if (*pos || m->count)
5057 ptr++;
69abe6a5 5058
42584c81 5059 (*pos)++;
69abe6a5 5060
939c7a4f
YY
5061 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5062 ptr++) {
42584c81
YY
5063 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5064 continue;
69abe6a5 5065
42584c81
YY
5066 return ptr;
5067 }
69abe6a5 5068
42584c81
YY
5069 return NULL;
5070}
5071
5072static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5073{
5074 void *v;
5075 loff_t l = 0;
69abe6a5 5076
4c27e756
SRRH
5077 preempt_disable();
5078 arch_spin_lock(&trace_cmdline_lock);
5079
939c7a4f 5080 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
5081 while (l <= *pos) {
5082 v = saved_cmdlines_next(m, v, &l);
5083 if (!v)
5084 return NULL;
69abe6a5
AP
5085 }
5086
42584c81
YY
5087 return v;
5088}
5089
5090static void saved_cmdlines_stop(struct seq_file *m, void *v)
5091{
4c27e756
SRRH
5092 arch_spin_unlock(&trace_cmdline_lock);
5093 preempt_enable();
42584c81 5094}
69abe6a5 5095
42584c81
YY
5096static int saved_cmdlines_show(struct seq_file *m, void *v)
5097{
5098 char buf[TASK_COMM_LEN];
5099 unsigned int *pid = v;
69abe6a5 5100
4c27e756 5101 __trace_find_cmdline(*pid, buf);
42584c81
YY
5102 seq_printf(m, "%d %s\n", *pid, buf);
5103 return 0;
5104}
5105
5106static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5107 .start = saved_cmdlines_start,
5108 .next = saved_cmdlines_next,
5109 .stop = saved_cmdlines_stop,
5110 .show = saved_cmdlines_show,
5111};
5112
5113static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5114{
5115 if (tracing_disabled)
5116 return -ENODEV;
5117
5118 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
5119}
5120
5121static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
5122 .open = tracing_saved_cmdlines_open,
5123 .read = seq_read,
5124 .llseek = seq_lseek,
5125 .release = seq_release,
69abe6a5
AP
5126};
5127
939c7a4f
YY
5128static ssize_t
5129tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5130 size_t cnt, loff_t *ppos)
5131{
5132 char buf[64];
5133 int r;
5134
5135 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 5136 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
5137 arch_spin_unlock(&trace_cmdline_lock);
5138
5139 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5140}
5141
5142static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5143{
5144 kfree(s->saved_cmdlines);
5145 kfree(s->map_cmdline_to_pid);
5146 kfree(s);
5147}
5148
5149static int tracing_resize_saved_cmdlines(unsigned int val)
5150{
5151 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5152
a6af8fbf 5153 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
5154 if (!s)
5155 return -ENOMEM;
5156
5157 if (allocate_cmdlines_buffer(val, s) < 0) {
5158 kfree(s);
5159 return -ENOMEM;
5160 }
5161
5162 arch_spin_lock(&trace_cmdline_lock);
5163 savedcmd_temp = savedcmd;
5164 savedcmd = s;
5165 arch_spin_unlock(&trace_cmdline_lock);
5166 free_saved_cmdlines_buffer(savedcmd_temp);
5167
5168 return 0;
5169}
5170
5171static ssize_t
5172tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5173 size_t cnt, loff_t *ppos)
5174{
5175 unsigned long val;
5176 int ret;
5177
5178 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5179 if (ret)
5180 return ret;
5181
5182 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5183 if (!val || val > PID_MAX_DEFAULT)
5184 return -EINVAL;
5185
5186 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5187 if (ret < 0)
5188 return ret;
5189
5190 *ppos += cnt;
5191
5192 return cnt;
5193}
5194
5195static const struct file_operations tracing_saved_cmdlines_size_fops = {
5196 .open = tracing_open_generic,
5197 .read = tracing_saved_cmdlines_size_read,
5198 .write = tracing_saved_cmdlines_size_write,
5199};
5200
681bec03 5201#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 5202static union trace_eval_map_item *
f57a4143 5203update_eval_map(union trace_eval_map_item *ptr)
9828413d 5204{
00f4b652 5205 if (!ptr->map.eval_string) {
9828413d
SRRH
5206 if (ptr->tail.next) {
5207 ptr = ptr->tail.next;
5208 /* Set ptr to the next real item (skip head) */
5209 ptr++;
5210 } else
5211 return NULL;
5212 }
5213 return ptr;
5214}
5215
f57a4143 5216static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 5217{
23bf8cb8 5218 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5219
5220 /*
5221 * Paranoid! If ptr points to end, we don't want to increment past it.
5222 * This really should never happen.
5223 */
f57a4143 5224 ptr = update_eval_map(ptr);
9828413d
SRRH
5225 if (WARN_ON_ONCE(!ptr))
5226 return NULL;
5227
5228 ptr++;
5229
5230 (*pos)++;
5231
f57a4143 5232 ptr = update_eval_map(ptr);
9828413d
SRRH
5233
5234 return ptr;
5235}
5236
f57a4143 5237static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 5238{
23bf8cb8 5239 union trace_eval_map_item *v;
9828413d
SRRH
5240 loff_t l = 0;
5241
1793ed93 5242 mutex_lock(&trace_eval_mutex);
9828413d 5243
23bf8cb8 5244 v = trace_eval_maps;
9828413d
SRRH
5245 if (v)
5246 v++;
5247
5248 while (v && l < *pos) {
f57a4143 5249 v = eval_map_next(m, v, &l);
9828413d
SRRH
5250 }
5251
5252 return v;
5253}
5254
f57a4143 5255static void eval_map_stop(struct seq_file *m, void *v)
9828413d 5256{
1793ed93 5257 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5258}
5259
f57a4143 5260static int eval_map_show(struct seq_file *m, void *v)
9828413d 5261{
23bf8cb8 5262 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5263
5264 seq_printf(m, "%s %ld (%s)\n",
00f4b652 5265 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
5266 ptr->map.system);
5267
5268 return 0;
5269}
5270
f57a4143
JL
5271static const struct seq_operations tracing_eval_map_seq_ops = {
5272 .start = eval_map_start,
5273 .next = eval_map_next,
5274 .stop = eval_map_stop,
5275 .show = eval_map_show,
9828413d
SRRH
5276};
5277
f57a4143 5278static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
5279{
5280 if (tracing_disabled)
5281 return -ENODEV;
5282
f57a4143 5283 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
5284}
5285
f57a4143
JL
5286static const struct file_operations tracing_eval_map_fops = {
5287 .open = tracing_eval_map_open,
9828413d
SRRH
5288 .read = seq_read,
5289 .llseek = seq_lseek,
5290 .release = seq_release,
5291};
5292
23bf8cb8 5293static inline union trace_eval_map_item *
5f60b351 5294trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
5295{
5296 /* Return tail of array given the head */
5297 return ptr + ptr->head.length + 1;
5298}
5299
5300static void
f57a4143 5301trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
5302 int len)
5303{
00f4b652
JL
5304 struct trace_eval_map **stop;
5305 struct trace_eval_map **map;
23bf8cb8
JL
5306 union trace_eval_map_item *map_array;
5307 union trace_eval_map_item *ptr;
9828413d
SRRH
5308
5309 stop = start + len;
5310
5311 /*
23bf8cb8 5312 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
5313 * where the head holds the module and length of array, and the
5314 * tail holds a pointer to the next list.
5315 */
6da2ec56 5316 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
9828413d 5317 if (!map_array) {
f57a4143 5318 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
5319 return;
5320 }
5321
1793ed93 5322 mutex_lock(&trace_eval_mutex);
9828413d 5323
23bf8cb8
JL
5324 if (!trace_eval_maps)
5325 trace_eval_maps = map_array;
9828413d 5326 else {
23bf8cb8 5327 ptr = trace_eval_maps;
9828413d 5328 for (;;) {
5f60b351 5329 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
5330 if (!ptr->tail.next)
5331 break;
5332 ptr = ptr->tail.next;
5333
5334 }
5335 ptr->tail.next = map_array;
5336 }
5337 map_array->head.mod = mod;
5338 map_array->head.length = len;
5339 map_array++;
5340
5341 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5342 map_array->map = **map;
5343 map_array++;
5344 }
5345 memset(map_array, 0, sizeof(*map_array));
5346
1793ed93 5347 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5348}
5349
f57a4143 5350static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 5351{
681bec03 5352 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 5353 NULL, &tracing_eval_map_fops);
9828413d
SRRH
5354}
5355
681bec03 5356#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
5357static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5358static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 5359 struct trace_eval_map **start, int len) { }
681bec03 5360#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 5361
f57a4143 5362static void trace_insert_eval_map(struct module *mod,
00f4b652 5363 struct trace_eval_map **start, int len)
0c564a53 5364{
00f4b652 5365 struct trace_eval_map **map;
0c564a53
SRRH
5366
5367 if (len <= 0)
5368 return;
5369
5370 map = start;
5371
f57a4143 5372 trace_event_eval_update(map, len);
9828413d 5373
f57a4143 5374 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
5375}
5376
bc0c38d1
SR
5377static ssize_t
5378tracing_set_trace_read(struct file *filp, char __user *ubuf,
5379 size_t cnt, loff_t *ppos)
5380{
2b6080f2 5381 struct trace_array *tr = filp->private_data;
ee6c2c1b 5382 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
5383 int r;
5384
5385 mutex_lock(&trace_types_lock);
2b6080f2 5386 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
5387 mutex_unlock(&trace_types_lock);
5388
4bf39a94 5389 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5390}
5391
b6f11df2
ACM
5392int tracer_init(struct tracer *t, struct trace_array *tr)
5393{
12883efb 5394 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
5395 return t->init(tr);
5396}
5397
12883efb 5398static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
5399{
5400 int cpu;
737223fb 5401
438ced17 5402 for_each_tracing_cpu(cpu)
12883efb 5403 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
5404}
5405
12883efb 5406#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 5407/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
5408static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5409 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
5410{
5411 int cpu, ret = 0;
5412
5413 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5414 for_each_tracing_cpu(cpu) {
12883efb
SRRH
5415 ret = ring_buffer_resize(trace_buf->buffer,
5416 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
5417 if (ret < 0)
5418 break;
12883efb
SRRH
5419 per_cpu_ptr(trace_buf->data, cpu)->entries =
5420 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
5421 }
5422 } else {
12883efb
SRRH
5423 ret = ring_buffer_resize(trace_buf->buffer,
5424 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 5425 if (ret == 0)
12883efb
SRRH
5426 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5427 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
5428 }
5429
5430 return ret;
5431}
12883efb 5432#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 5433
2b6080f2
SR
5434static int __tracing_resize_ring_buffer(struct trace_array *tr,
5435 unsigned long size, int cpu)
73c5162a
SR
5436{
5437 int ret;
5438
5439 /*
5440 * If kernel or user changes the size of the ring buffer
a123c52b
SR
5441 * we use the size that was given, and we can forget about
5442 * expanding it later.
73c5162a 5443 */
55034cd6 5444 ring_buffer_expanded = true;
73c5162a 5445
b382ede6 5446 /* May be called before buffers are initialized */
12883efb 5447 if (!tr->trace_buffer.buffer)
b382ede6
SR
5448 return 0;
5449
12883efb 5450 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
5451 if (ret < 0)
5452 return ret;
5453
12883efb 5454#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5455 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5456 !tr->current_trace->use_max_tr)
ef710e10
KM
5457 goto out;
5458
12883efb 5459 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5460 if (ret < 0) {
12883efb
SRRH
5461 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5462 &tr->trace_buffer, cpu);
73c5162a 5463 if (r < 0) {
a123c52b
SR
5464 /*
5465 * AARGH! We are left with different
5466 * size max buffer!!!!
5467 * The max buffer is our "snapshot" buffer.
5468 * When a tracer needs a snapshot (one of the
5469 * latency tracers), it swaps the max buffer
5470 * with the saved snap shot. We succeeded to
5471 * update the size of the main buffer, but failed to
5472 * update the size of the max buffer. But when we tried
5473 * to reset the main buffer to the original size, we
5474 * failed there too. This is very unlikely to
5475 * happen, but if it does, warn and kill all
5476 * tracing.
5477 */
73c5162a
SR
5478 WARN_ON(1);
5479 tracing_disabled = 1;
5480 }
5481 return ret;
5482 }
5483
438ced17 5484 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5485 set_buffer_entries(&tr->max_buffer, size);
438ced17 5486 else
12883efb 5487 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5488
ef710e10 5489 out:
12883efb
SRRH
5490#endif /* CONFIG_TRACER_MAX_TRACE */
5491
438ced17 5492 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5493 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5494 else
12883efb 5495 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5496
5497 return ret;
5498}
5499
2b6080f2
SR
5500static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5501 unsigned long size, int cpu_id)
4f271a2a 5502{
83f40318 5503 int ret = size;
4f271a2a
VN
5504
5505 mutex_lock(&trace_types_lock);
5506
438ced17
VN
5507 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5508 /* make sure, this cpu is enabled in the mask */
5509 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5510 ret = -EINVAL;
5511 goto out;
5512 }
5513 }
4f271a2a 5514
2b6080f2 5515 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5516 if (ret < 0)
5517 ret = -ENOMEM;
5518
438ced17 5519out:
4f271a2a
VN
5520 mutex_unlock(&trace_types_lock);
5521
5522 return ret;
5523}
5524
ef710e10 5525
1852fcce
SR
5526/**
5527 * tracing_update_buffers - used by tracing facility to expand ring buffers
5528 *
5529 * To save on memory when the tracing is never used on a system with it
5530 * configured in. The ring buffers are set to a minimum size. But once
5531 * a user starts to use the tracing facility, then they need to grow
5532 * to their default size.
5533 *
5534 * This function is to be called when a tracer is about to be used.
5535 */
5536int tracing_update_buffers(void)
5537{
5538 int ret = 0;
5539
1027fcb2 5540 mutex_lock(&trace_types_lock);
1852fcce 5541 if (!ring_buffer_expanded)
2b6080f2 5542 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5543 RING_BUFFER_ALL_CPUS);
1027fcb2 5544 mutex_unlock(&trace_types_lock);
1852fcce
SR
5545
5546 return ret;
5547}
5548
577b785f
SR
5549struct trace_option_dentry;
5550
37aea98b 5551static void
2b6080f2 5552create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5553
6b450d25
SRRH
5554/*
5555 * Used to clear out the tracer before deletion of an instance.
5556 * Must have trace_types_lock held.
5557 */
5558static void tracing_set_nop(struct trace_array *tr)
5559{
5560 if (tr->current_trace == &nop_trace)
5561 return;
5562
50512ab5 5563 tr->current_trace->enabled--;
6b450d25
SRRH
5564
5565 if (tr->current_trace->reset)
5566 tr->current_trace->reset(tr);
5567
5568 tr->current_trace = &nop_trace;
5569}
5570
41d9c0be 5571static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5572{
09d23a1d
SRRH
5573 /* Only enable if the directory has been created already. */
5574 if (!tr->dir)
5575 return;
5576
37aea98b 5577 create_trace_option_files(tr, t);
09d23a1d
SRRH
5578}
5579
5580static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5581{
bc0c38d1 5582 struct tracer *t;
12883efb 5583#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5584 bool had_max_tr;
12883efb 5585#endif
d9e54076 5586 int ret = 0;
bc0c38d1 5587
1027fcb2
SR
5588 mutex_lock(&trace_types_lock);
5589
73c5162a 5590 if (!ring_buffer_expanded) {
2b6080f2 5591 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5592 RING_BUFFER_ALL_CPUS);
73c5162a 5593 if (ret < 0)
59f586db 5594 goto out;
73c5162a
SR
5595 ret = 0;
5596 }
5597
bc0c38d1
SR
5598 for (t = trace_types; t; t = t->next) {
5599 if (strcmp(t->name, buf) == 0)
5600 break;
5601 }
c2931e05
FW
5602 if (!t) {
5603 ret = -EINVAL;
5604 goto out;
5605 }
2b6080f2 5606 if (t == tr->current_trace)
bc0c38d1
SR
5607 goto out;
5608
a35873a0
TZ
5609#ifdef CONFIG_TRACER_SNAPSHOT
5610 if (t->use_max_tr) {
5611 arch_spin_lock(&tr->max_lock);
5612 if (tr->cond_snapshot)
5613 ret = -EBUSY;
5614 arch_spin_unlock(&tr->max_lock);
5615 if (ret)
5616 goto out;
5617 }
5618#endif
c7b3ae0b
ZSZ
5619 /* Some tracers won't work on kernel command line */
5620 if (system_state < SYSTEM_RUNNING && t->noboot) {
5621 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5622 t->name);
5623 goto out;
5624 }
5625
607e2ea1
SRRH
5626 /* Some tracers are only allowed for the top level buffer */
5627 if (!trace_ok_for_array(t, tr)) {
5628 ret = -EINVAL;
5629 goto out;
5630 }
5631
cf6ab6d9
SRRH
5632 /* If trace pipe files are being read, we can't change the tracer */
5633 if (tr->current_trace->ref) {
5634 ret = -EBUSY;
5635 goto out;
5636 }
5637
9f029e83 5638 trace_branch_disable();
613f04a0 5639
50512ab5 5640 tr->current_trace->enabled--;
613f04a0 5641
2b6080f2
SR
5642 if (tr->current_trace->reset)
5643 tr->current_trace->reset(tr);
34600f0e 5644
74401729 5645 /* Current trace needs to be nop_trace before synchronize_rcu */
2b6080f2 5646 tr->current_trace = &nop_trace;
34600f0e 5647
45ad21ca
SRRH
5648#ifdef CONFIG_TRACER_MAX_TRACE
5649 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5650
5651 if (had_max_tr && !t->use_max_tr) {
5652 /*
5653 * We need to make sure that the update_max_tr sees that
5654 * current_trace changed to nop_trace to keep it from
5655 * swapping the buffers after we resize it.
5656 * The update_max_tr is called from interrupts disabled
5657 * so a synchronized_sched() is sufficient.
5658 */
74401729 5659 synchronize_rcu();
3209cff4 5660 free_snapshot(tr);
ef710e10 5661 }
12883efb 5662#endif
12883efb
SRRH
5663
5664#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5665 if (t->use_max_tr && !had_max_tr) {
2824f503 5666 ret = tracing_alloc_snapshot_instance(tr);
d60da506
HT
5667 if (ret < 0)
5668 goto out;
ef710e10 5669 }
12883efb 5670#endif
577b785f 5671
1c80025a 5672 if (t->init) {
b6f11df2 5673 ret = tracer_init(t, tr);
1c80025a
FW
5674 if (ret)
5675 goto out;
5676 }
bc0c38d1 5677
2b6080f2 5678 tr->current_trace = t;
50512ab5 5679 tr->current_trace->enabled++;
9f029e83 5680 trace_branch_enable(tr);
bc0c38d1
SR
5681 out:
5682 mutex_unlock(&trace_types_lock);
5683
d9e54076
PZ
5684 return ret;
5685}
5686
5687static ssize_t
5688tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5689 size_t cnt, loff_t *ppos)
5690{
607e2ea1 5691 struct trace_array *tr = filp->private_data;
ee6c2c1b 5692 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5693 int i;
5694 size_t ret;
e6e7a65a
FW
5695 int err;
5696
5697 ret = cnt;
d9e54076 5698
ee6c2c1b
LZ
5699 if (cnt > MAX_TRACER_SIZE)
5700 cnt = MAX_TRACER_SIZE;
d9e54076 5701
4afe6495 5702 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5703 return -EFAULT;
5704
5705 buf[cnt] = 0;
5706
5707 /* strip ending whitespace. */
5708 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5709 buf[i] = 0;
5710
607e2ea1 5711 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5712 if (err)
5713 return err;
d9e54076 5714
cf8517cf 5715 *ppos += ret;
bc0c38d1 5716
c2931e05 5717 return ret;
bc0c38d1
SR
5718}
5719
5720static ssize_t
6508fa76
SF
5721tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5722 size_t cnt, loff_t *ppos)
bc0c38d1 5723{
bc0c38d1
SR
5724 char buf[64];
5725 int r;
5726
cffae437 5727 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5728 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5729 if (r > sizeof(buf))
5730 r = sizeof(buf);
4bf39a94 5731 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5732}
5733
5734static ssize_t
6508fa76
SF
5735tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5736 size_t cnt, loff_t *ppos)
bc0c38d1 5737{
5e39841c 5738 unsigned long val;
c6caeeb1 5739 int ret;
bc0c38d1 5740
22fe9b54
PH
5741 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5742 if (ret)
c6caeeb1 5743 return ret;
bc0c38d1
SR
5744
5745 *ptr = val * 1000;
5746
5747 return cnt;
5748}
5749
6508fa76
SF
5750static ssize_t
5751tracing_thresh_read(struct file *filp, char __user *ubuf,
5752 size_t cnt, loff_t *ppos)
5753{
5754 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5755}
5756
5757static ssize_t
5758tracing_thresh_write(struct file *filp, const char __user *ubuf,
5759 size_t cnt, loff_t *ppos)
5760{
5761 struct trace_array *tr = filp->private_data;
5762 int ret;
5763
5764 mutex_lock(&trace_types_lock);
5765 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5766 if (ret < 0)
5767 goto out;
5768
5769 if (tr->current_trace->update_thresh) {
5770 ret = tr->current_trace->update_thresh(tr);
5771 if (ret < 0)
5772 goto out;
5773 }
5774
5775 ret = cnt;
5776out:
5777 mutex_unlock(&trace_types_lock);
5778
5779 return ret;
5780}
5781
f971cc9a 5782#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5783
6508fa76
SF
5784static ssize_t
5785tracing_max_lat_read(struct file *filp, char __user *ubuf,
5786 size_t cnt, loff_t *ppos)
5787{
5788 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5789}
5790
5791static ssize_t
5792tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5793 size_t cnt, loff_t *ppos)
5794{
5795 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5796}
5797
e428abbb
CG
5798#endif
5799
b3806b43
SR
5800static int tracing_open_pipe(struct inode *inode, struct file *filp)
5801{
15544209 5802 struct trace_array *tr = inode->i_private;
b3806b43 5803 struct trace_iterator *iter;
b04cc6b1 5804 int ret = 0;
b3806b43
SR
5805
5806 if (tracing_disabled)
5807 return -ENODEV;
5808
7b85af63
SRRH
5809 if (trace_array_get(tr) < 0)
5810 return -ENODEV;
5811
b04cc6b1
FW
5812 mutex_lock(&trace_types_lock);
5813
b3806b43
SR
5814 /* create a buffer to store the information to pass to userspace */
5815 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5816 if (!iter) {
5817 ret = -ENOMEM;
f77d09a3 5818 __trace_array_put(tr);
b04cc6b1
FW
5819 goto out;
5820 }
b3806b43 5821
3a161d99 5822 trace_seq_init(&iter->seq);
d716ff71 5823 iter->trace = tr->current_trace;
d7350c3f 5824
4462344e 5825 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5826 ret = -ENOMEM;
d7350c3f 5827 goto fail;
4462344e
RR
5828 }
5829
a309720c 5830 /* trace pipe does not show start of buffer */
4462344e 5831 cpumask_setall(iter->started);
a309720c 5832
983f938a 5833 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5834 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5835
8be0709f 5836 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5837 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5838 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5839
15544209
ON
5840 iter->tr = tr;
5841 iter->trace_buffer = &tr->trace_buffer;
5842 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5843 mutex_init(&iter->mutex);
b3806b43
SR
5844 filp->private_data = iter;
5845
107bad8b
SR
5846 if (iter->trace->pipe_open)
5847 iter->trace->pipe_open(iter);
107bad8b 5848
b444786f 5849 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5850
5851 tr->current_trace->ref++;
b04cc6b1
FW
5852out:
5853 mutex_unlock(&trace_types_lock);
5854 return ret;
d7350c3f
FW
5855
5856fail:
d7350c3f 5857 kfree(iter);
7b85af63 5858 __trace_array_put(tr);
d7350c3f
FW
5859 mutex_unlock(&trace_types_lock);
5860 return ret;
b3806b43
SR
5861}
5862
5863static int tracing_release_pipe(struct inode *inode, struct file *file)
5864{
5865 struct trace_iterator *iter = file->private_data;
15544209 5866 struct trace_array *tr = inode->i_private;
b3806b43 5867
b04cc6b1
FW
5868 mutex_lock(&trace_types_lock);
5869
cf6ab6d9
SRRH
5870 tr->current_trace->ref--;
5871
29bf4a5e 5872 if (iter->trace->pipe_close)
c521efd1
SR
5873 iter->trace->pipe_close(iter);
5874
b04cc6b1
FW
5875 mutex_unlock(&trace_types_lock);
5876
4462344e 5877 free_cpumask_var(iter->started);
d7350c3f 5878 mutex_destroy(&iter->mutex);
b3806b43 5879 kfree(iter);
b3806b43 5880
7b85af63
SRRH
5881 trace_array_put(tr);
5882
b3806b43
SR
5883 return 0;
5884}
5885
9dd95748 5886static __poll_t
cc60cdc9 5887trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5888{
983f938a
SRRH
5889 struct trace_array *tr = iter->tr;
5890
15693458
SRRH
5891 /* Iterators are static, they should be filled or empty */
5892 if (trace_buffer_iter(iter, iter->cpu_file))
a9a08845 5893 return EPOLLIN | EPOLLRDNORM;
2a2cc8f7 5894
983f938a 5895 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5896 /*
5897 * Always select as readable when in blocking mode
5898 */
a9a08845 5899 return EPOLLIN | EPOLLRDNORM;
15693458 5900 else
12883efb 5901 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5902 filp, poll_table);
2a2cc8f7 5903}
2a2cc8f7 5904
9dd95748 5905static __poll_t
cc60cdc9
SR
5906tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5907{
5908 struct trace_iterator *iter = filp->private_data;
5909
5910 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5911}
5912
d716ff71 5913/* Must be called with iter->mutex held. */
ff98781b 5914static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5915{
5916 struct trace_iterator *iter = filp->private_data;
8b8b3683 5917 int ret;
b3806b43 5918
b3806b43 5919 while (trace_empty(iter)) {
2dc8f095 5920
107bad8b 5921 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5922 return -EAGAIN;
107bad8b 5923 }
2dc8f095 5924
b3806b43 5925 /*
250bfd3d 5926 * We block until we read something and tracing is disabled.
b3806b43
SR
5927 * We still block if tracing is disabled, but we have never
5928 * read anything. This allows a user to cat this file, and
5929 * then enable tracing. But after we have read something,
5930 * we give an EOF when tracing is again disabled.
5931 *
5932 * iter->pos will be 0 if we haven't read anything.
5933 */
75df6e68 5934 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
b3806b43 5935 break;
f4874261
SRRH
5936
5937 mutex_unlock(&iter->mutex);
5938
2c2b0a78 5939 ret = wait_on_pipe(iter, 0);
f4874261
SRRH
5940
5941 mutex_lock(&iter->mutex);
5942
8b8b3683
SRRH
5943 if (ret)
5944 return ret;
b3806b43
SR
5945 }
5946
ff98781b
EGM
5947 return 1;
5948}
5949
5950/*
5951 * Consumer reader.
5952 */
5953static ssize_t
5954tracing_read_pipe(struct file *filp, char __user *ubuf,
5955 size_t cnt, loff_t *ppos)
5956{
5957 struct trace_iterator *iter = filp->private_data;
5958 ssize_t sret;
5959
d7350c3f
FW
5960 /*
5961 * Avoid more than one consumer on a single file descriptor
5962 * This is just a matter of traces coherency, the ring buffer itself
5963 * is protected.
5964 */
5965 mutex_lock(&iter->mutex);
1245800c
SRRH
5966
5967 /* return any leftover data */
5968 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5969 if (sret != -EBUSY)
5970 goto out;
5971
5972 trace_seq_init(&iter->seq);
5973
ff98781b
EGM
5974 if (iter->trace->read) {
5975 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5976 if (sret)
5977 goto out;
5978 }
5979
5980waitagain:
5981 sret = tracing_wait_pipe(filp);
5982 if (sret <= 0)
5983 goto out;
5984
b3806b43 5985 /* stop when tracing is finished */
ff98781b
EGM
5986 if (trace_empty(iter)) {
5987 sret = 0;
107bad8b 5988 goto out;
ff98781b 5989 }
b3806b43
SR
5990
5991 if (cnt >= PAGE_SIZE)
5992 cnt = PAGE_SIZE - 1;
5993
53d0aa77 5994 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5995 memset(&iter->seq, 0,
5996 sizeof(struct trace_iterator) -
5997 offsetof(struct trace_iterator, seq));
ed5467da 5998 cpumask_clear(iter->started);
4823ed7e 5999 iter->pos = -1;
b3806b43 6000
4f535968 6001 trace_event_read_lock();
7e53bd42 6002 trace_access_lock(iter->cpu_file);
955b61e5 6003 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 6004 enum print_line_t ret;
5ac48378 6005 int save_len = iter->seq.seq.len;
088b1e42 6006
f9896bf3 6007 ret = print_trace_line(iter);
2c4f035f 6008 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 6009 /* don't print partial lines */
5ac48378 6010 iter->seq.seq.len = save_len;
b3806b43 6011 break;
088b1e42 6012 }
b91facc3
FW
6013 if (ret != TRACE_TYPE_NO_CONSUME)
6014 trace_consume(iter);
b3806b43 6015
5ac48378 6016 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 6017 break;
ee5e51f5
JO
6018
6019 /*
6020 * Setting the full flag means we reached the trace_seq buffer
6021 * size and we should leave by partial output condition above.
6022 * One of the trace_seq_* functions is not used properly.
6023 */
6024 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6025 iter->ent->type);
b3806b43 6026 }
7e53bd42 6027 trace_access_unlock(iter->cpu_file);
4f535968 6028 trace_event_read_unlock();
b3806b43 6029
b3806b43 6030 /* Now copy what we have to the user */
6c6c2796 6031 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 6032 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 6033 trace_seq_init(&iter->seq);
9ff4b974
PP
6034
6035 /*
25985edc 6036 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
6037 * entries, go back to wait for more entries.
6038 */
6c6c2796 6039 if (sret == -EBUSY)
9ff4b974 6040 goto waitagain;
b3806b43 6041
107bad8b 6042out:
d7350c3f 6043 mutex_unlock(&iter->mutex);
107bad8b 6044
6c6c2796 6045 return sret;
b3806b43
SR
6046}
6047
3c56819b
EGM
6048static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6049 unsigned int idx)
6050{
6051 __free_page(spd->pages[idx]);
6052}
6053
28dfef8f 6054static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 6055 .confirm = generic_pipe_buf_confirm,
92fdd98c 6056 .release = generic_pipe_buf_release,
34cd4998
SR
6057 .steal = generic_pipe_buf_steal,
6058 .get = generic_pipe_buf_get,
3c56819b
EGM
6059};
6060
34cd4998 6061static size_t
fa7c7f6e 6062tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
6063{
6064 size_t count;
74f06bb7 6065 int save_len;
34cd4998
SR
6066 int ret;
6067
6068 /* Seq buffer is page-sized, exactly what we need. */
6069 for (;;) {
74f06bb7 6070 save_len = iter->seq.seq.len;
34cd4998 6071 ret = print_trace_line(iter);
74f06bb7
SRRH
6072
6073 if (trace_seq_has_overflowed(&iter->seq)) {
6074 iter->seq.seq.len = save_len;
34cd4998
SR
6075 break;
6076 }
74f06bb7
SRRH
6077
6078 /*
6079 * This should not be hit, because it should only
6080 * be set if the iter->seq overflowed. But check it
6081 * anyway to be safe.
6082 */
34cd4998 6083 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
6084 iter->seq.seq.len = save_len;
6085 break;
6086 }
6087
5ac48378 6088 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
6089 if (rem < count) {
6090 rem = 0;
6091 iter->seq.seq.len = save_len;
34cd4998
SR
6092 break;
6093 }
6094
74e7ff8c
LJ
6095 if (ret != TRACE_TYPE_NO_CONSUME)
6096 trace_consume(iter);
34cd4998 6097 rem -= count;
955b61e5 6098 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
6099 rem = 0;
6100 iter->ent = NULL;
6101 break;
6102 }
6103 }
6104
6105 return rem;
6106}
6107
3c56819b
EGM
6108static ssize_t tracing_splice_read_pipe(struct file *filp,
6109 loff_t *ppos,
6110 struct pipe_inode_info *pipe,
6111 size_t len,
6112 unsigned int flags)
6113{
35f3d14d
JA
6114 struct page *pages_def[PIPE_DEF_BUFFERS];
6115 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
6116 struct trace_iterator *iter = filp->private_data;
6117 struct splice_pipe_desc spd = {
35f3d14d
JA
6118 .pages = pages_def,
6119 .partial = partial_def,
34cd4998 6120 .nr_pages = 0, /* This gets updated below. */
047fe360 6121 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
6122 .ops = &tracing_pipe_buf_ops,
6123 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
6124 };
6125 ssize_t ret;
34cd4998 6126 size_t rem;
3c56819b
EGM
6127 unsigned int i;
6128
35f3d14d
JA
6129 if (splice_grow_spd(pipe, &spd))
6130 return -ENOMEM;
6131
d7350c3f 6132 mutex_lock(&iter->mutex);
3c56819b
EGM
6133
6134 if (iter->trace->splice_read) {
6135 ret = iter->trace->splice_read(iter, filp,
6136 ppos, pipe, len, flags);
6137 if (ret)
34cd4998 6138 goto out_err;
3c56819b
EGM
6139 }
6140
6141 ret = tracing_wait_pipe(filp);
6142 if (ret <= 0)
34cd4998 6143 goto out_err;
3c56819b 6144
955b61e5 6145 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 6146 ret = -EFAULT;
34cd4998 6147 goto out_err;
3c56819b
EGM
6148 }
6149
4f535968 6150 trace_event_read_lock();
7e53bd42 6151 trace_access_lock(iter->cpu_file);
4f535968 6152
3c56819b 6153 /* Fill as many pages as possible. */
a786c06d 6154 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
6155 spd.pages[i] = alloc_page(GFP_KERNEL);
6156 if (!spd.pages[i])
34cd4998 6157 break;
3c56819b 6158
fa7c7f6e 6159 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
6160
6161 /* Copy the data into the page, so we can start over. */
6162 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 6163 page_address(spd.pages[i]),
5ac48378 6164 trace_seq_used(&iter->seq));
3c56819b 6165 if (ret < 0) {
35f3d14d 6166 __free_page(spd.pages[i]);
3c56819b
EGM
6167 break;
6168 }
35f3d14d 6169 spd.partial[i].offset = 0;
5ac48378 6170 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 6171
f9520750 6172 trace_seq_init(&iter->seq);
3c56819b
EGM
6173 }
6174
7e53bd42 6175 trace_access_unlock(iter->cpu_file);
4f535968 6176 trace_event_read_unlock();
d7350c3f 6177 mutex_unlock(&iter->mutex);
3c56819b
EGM
6178
6179 spd.nr_pages = i;
6180
a29054d9
SRRH
6181 if (i)
6182 ret = splice_to_pipe(pipe, &spd);
6183 else
6184 ret = 0;
35f3d14d 6185out:
047fe360 6186 splice_shrink_spd(&spd);
35f3d14d 6187 return ret;
3c56819b 6188
34cd4998 6189out_err:
d7350c3f 6190 mutex_unlock(&iter->mutex);
35f3d14d 6191 goto out;
3c56819b
EGM
6192}
6193
a98a3c3f
SR
6194static ssize_t
6195tracing_entries_read(struct file *filp, char __user *ubuf,
6196 size_t cnt, loff_t *ppos)
6197{
0bc392ee
ON
6198 struct inode *inode = file_inode(filp);
6199 struct trace_array *tr = inode->i_private;
6200 int cpu = tracing_get_cpu(inode);
438ced17
VN
6201 char buf[64];
6202 int r = 0;
6203 ssize_t ret;
a98a3c3f 6204
db526ca3 6205 mutex_lock(&trace_types_lock);
438ced17 6206
0bc392ee 6207 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
6208 int cpu, buf_size_same;
6209 unsigned long size;
6210
6211 size = 0;
6212 buf_size_same = 1;
6213 /* check if all cpu sizes are same */
6214 for_each_tracing_cpu(cpu) {
6215 /* fill in the size from first enabled cpu */
6216 if (size == 0)
12883efb
SRRH
6217 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6218 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
6219 buf_size_same = 0;
6220 break;
6221 }
6222 }
6223
6224 if (buf_size_same) {
6225 if (!ring_buffer_expanded)
6226 r = sprintf(buf, "%lu (expanded: %lu)\n",
6227 size >> 10,
6228 trace_buf_size >> 10);
6229 else
6230 r = sprintf(buf, "%lu\n", size >> 10);
6231 } else
6232 r = sprintf(buf, "X\n");
6233 } else
0bc392ee 6234 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 6235
db526ca3
SR
6236 mutex_unlock(&trace_types_lock);
6237
438ced17
VN
6238 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6239 return ret;
a98a3c3f
SR
6240}
6241
6242static ssize_t
6243tracing_entries_write(struct file *filp, const char __user *ubuf,
6244 size_t cnt, loff_t *ppos)
6245{
0bc392ee
ON
6246 struct inode *inode = file_inode(filp);
6247 struct trace_array *tr = inode->i_private;
a98a3c3f 6248 unsigned long val;
4f271a2a 6249 int ret;
a98a3c3f 6250
22fe9b54
PH
6251 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6252 if (ret)
c6caeeb1 6253 return ret;
a98a3c3f
SR
6254
6255 /* must have at least 1 entry */
6256 if (!val)
6257 return -EINVAL;
6258
1696b2b0
SR
6259 /* value is in KB */
6260 val <<= 10;
0bc392ee 6261 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
6262 if (ret < 0)
6263 return ret;
a98a3c3f 6264
cf8517cf 6265 *ppos += cnt;
a98a3c3f 6266
4f271a2a
VN
6267 return cnt;
6268}
bf5e6519 6269
f81ab074
VN
6270static ssize_t
6271tracing_total_entries_read(struct file *filp, char __user *ubuf,
6272 size_t cnt, loff_t *ppos)
6273{
6274 struct trace_array *tr = filp->private_data;
6275 char buf[64];
6276 int r, cpu;
6277 unsigned long size = 0, expanded_size = 0;
6278
6279 mutex_lock(&trace_types_lock);
6280 for_each_tracing_cpu(cpu) {
12883efb 6281 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
6282 if (!ring_buffer_expanded)
6283 expanded_size += trace_buf_size >> 10;
6284 }
6285 if (ring_buffer_expanded)
6286 r = sprintf(buf, "%lu\n", size);
6287 else
6288 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6289 mutex_unlock(&trace_types_lock);
6290
6291 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6292}
6293
4f271a2a
VN
6294static ssize_t
6295tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6296 size_t cnt, loff_t *ppos)
6297{
6298 /*
6299 * There is no need to read what the user has written, this function
6300 * is just to make sure that there is no error when "echo" is used
6301 */
6302
6303 *ppos += cnt;
a98a3c3f
SR
6304
6305 return cnt;
6306}
6307
4f271a2a
VN
6308static int
6309tracing_free_buffer_release(struct inode *inode, struct file *filp)
6310{
2b6080f2
SR
6311 struct trace_array *tr = inode->i_private;
6312
cf30cf67 6313 /* disable tracing ? */
983f938a 6314 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 6315 tracer_tracing_off(tr);
4f271a2a 6316 /* resize the ring buffer to 0 */
2b6080f2 6317 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 6318
7b85af63
SRRH
6319 trace_array_put(tr);
6320
4f271a2a
VN
6321 return 0;
6322}
6323
5bf9a1ee
PP
6324static ssize_t
6325tracing_mark_write(struct file *filp, const char __user *ubuf,
6326 size_t cnt, loff_t *fpos)
6327{
2d71619c 6328 struct trace_array *tr = filp->private_data;
d696b58c 6329 struct ring_buffer_event *event;
3dd80953 6330 enum event_trigger_type tt = ETT_NONE;
d696b58c
SR
6331 struct ring_buffer *buffer;
6332 struct print_entry *entry;
6333 unsigned long irq_flags;
d696b58c 6334 ssize_t written;
d696b58c
SR
6335 int size;
6336 int len;
fa32e855 6337
656c7f0d 6338/* Used in tracing_mark_raw_write() as well */
0f5e5a3a
RV
6339#define FAULTED_STR "<faulted>"
6340#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
5bf9a1ee 6341
c76f0694 6342 if (tracing_disabled)
5bf9a1ee
PP
6343 return -EINVAL;
6344
983f938a 6345 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
6346 return -EINVAL;
6347
5bf9a1ee
PP
6348 if (cnt > TRACE_BUF_SIZE)
6349 cnt = TRACE_BUF_SIZE;
6350
d696b58c 6351 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 6352
d696b58c 6353 local_save_flags(irq_flags);
656c7f0d 6354 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 6355
656c7f0d
SRRH
6356 /* If less than "<faulted>", then make sure we can still add that */
6357 if (cnt < FAULTED_SIZE)
6358 size += FAULTED_SIZE - cnt;
d696b58c 6359
2d71619c 6360 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6361 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6362 irq_flags, preempt_count());
656c7f0d 6363 if (unlikely(!event))
d696b58c 6364 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6365 return -EBADF;
d696b58c
SR
6366
6367 entry = ring_buffer_event_data(event);
6368 entry->ip = _THIS_IP_;
6369
656c7f0d
SRRH
6370 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6371 if (len) {
0f5e5a3a 6372 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
656c7f0d
SRRH
6373 cnt = FAULTED_SIZE;
6374 written = -EFAULT;
c13d2f7c 6375 } else
656c7f0d
SRRH
6376 written = cnt;
6377 len = cnt;
5bf9a1ee 6378
3dd80953
SRV
6379 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6380 /* do not add \n before testing triggers, but add \0 */
6381 entry->buf[cnt] = '\0';
6382 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6383 }
6384
d696b58c
SR
6385 if (entry->buf[cnt - 1] != '\n') {
6386 entry->buf[cnt] = '\n';
6387 entry->buf[cnt + 1] = '\0';
6388 } else
6389 entry->buf[cnt] = '\0';
6390
7ffbd48d 6391 __buffer_unlock_commit(buffer, event);
5bf9a1ee 6392
3dd80953
SRV
6393 if (tt)
6394 event_triggers_post_call(tr->trace_marker_file, tt);
6395
656c7f0d
SRRH
6396 if (written > 0)
6397 *fpos += written;
5bf9a1ee 6398
fa32e855
SR
6399 return written;
6400}
6401
6402/* Limit it for now to 3K (including tag) */
6403#define RAW_DATA_MAX_SIZE (1024*3)
6404
6405static ssize_t
6406tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6407 size_t cnt, loff_t *fpos)
6408{
6409 struct trace_array *tr = filp->private_data;
6410 struct ring_buffer_event *event;
6411 struct ring_buffer *buffer;
6412 struct raw_data_entry *entry;
6413 unsigned long irq_flags;
fa32e855 6414 ssize_t written;
fa32e855
SR
6415 int size;
6416 int len;
6417
656c7f0d
SRRH
6418#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6419
fa32e855
SR
6420 if (tracing_disabled)
6421 return -EINVAL;
6422
6423 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6424 return -EINVAL;
6425
6426 /* The marker must at least have a tag id */
6427 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6428 return -EINVAL;
6429
6430 if (cnt > TRACE_BUF_SIZE)
6431 cnt = TRACE_BUF_SIZE;
6432
6433 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6434
fa32e855
SR
6435 local_save_flags(irq_flags);
6436 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
6437 if (cnt < FAULT_SIZE_ID)
6438 size += FAULT_SIZE_ID - cnt;
6439
fa32e855 6440 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6441 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6442 irq_flags, preempt_count());
656c7f0d 6443 if (!event)
fa32e855 6444 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6445 return -EBADF;
fa32e855
SR
6446
6447 entry = ring_buffer_event_data(event);
6448
656c7f0d
SRRH
6449 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6450 if (len) {
6451 entry->id = -1;
0f5e5a3a 6452 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
656c7f0d 6453 written = -EFAULT;
fa32e855 6454 } else
656c7f0d 6455 written = cnt;
fa32e855
SR
6456
6457 __buffer_unlock_commit(buffer, event);
6458
656c7f0d
SRRH
6459 if (written > 0)
6460 *fpos += written;
1aa54bca
MS
6461
6462 return written;
5bf9a1ee
PP
6463}
6464
13f16d20 6465static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 6466{
2b6080f2 6467 struct trace_array *tr = m->private;
5079f326
Z
6468 int i;
6469
6470 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 6471 seq_printf(m,
5079f326 6472 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
6473 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6474 i == tr->clock_id ? "]" : "");
13f16d20 6475 seq_putc(m, '\n');
5079f326 6476
13f16d20 6477 return 0;
5079f326
Z
6478}
6479
d71bd34d 6480int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6481{
5079f326
Z
6482 int i;
6483
5079f326
Z
6484 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6485 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6486 break;
6487 }
6488 if (i == ARRAY_SIZE(trace_clocks))
6489 return -EINVAL;
6490
5079f326
Z
6491 mutex_lock(&trace_types_lock);
6492
2b6080f2
SR
6493 tr->clock_id = i;
6494
12883efb 6495 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6496
60303ed3
DS
6497 /*
6498 * New clock may not be consistent with the previous clock.
6499 * Reset the buffer so that it doesn't have incomparable timestamps.
6500 */
9457158b 6501 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6502
6503#ifdef CONFIG_TRACER_MAX_TRACE
170b3b10 6504 if (tr->max_buffer.buffer)
12883efb 6505 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6506 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6507#endif
60303ed3 6508
5079f326
Z
6509 mutex_unlock(&trace_types_lock);
6510
e1e232ca
SR
6511 return 0;
6512}
6513
6514static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6515 size_t cnt, loff_t *fpos)
6516{
6517 struct seq_file *m = filp->private_data;
6518 struct trace_array *tr = m->private;
6519 char buf[64];
6520 const char *clockstr;
6521 int ret;
6522
6523 if (cnt >= sizeof(buf))
6524 return -EINVAL;
6525
4afe6495 6526 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6527 return -EFAULT;
6528
6529 buf[cnt] = 0;
6530
6531 clockstr = strstrip(buf);
6532
6533 ret = tracing_set_clock(tr, clockstr);
6534 if (ret)
6535 return ret;
6536
5079f326
Z
6537 *fpos += cnt;
6538
6539 return cnt;
6540}
6541
13f16d20
LZ
6542static int tracing_clock_open(struct inode *inode, struct file *file)
6543{
7b85af63
SRRH
6544 struct trace_array *tr = inode->i_private;
6545 int ret;
6546
13f16d20
LZ
6547 if (tracing_disabled)
6548 return -ENODEV;
2b6080f2 6549
7b85af63
SRRH
6550 if (trace_array_get(tr))
6551 return -ENODEV;
6552
6553 ret = single_open(file, tracing_clock_show, inode->i_private);
6554 if (ret < 0)
6555 trace_array_put(tr);
6556
6557 return ret;
13f16d20
LZ
6558}
6559
2c1ea60b
TZ
6560static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6561{
6562 struct trace_array *tr = m->private;
6563
6564 mutex_lock(&trace_types_lock);
6565
6566 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6567 seq_puts(m, "delta [absolute]\n");
6568 else
6569 seq_puts(m, "[delta] absolute\n");
6570
6571 mutex_unlock(&trace_types_lock);
6572
6573 return 0;
6574}
6575
6576static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6577{
6578 struct trace_array *tr = inode->i_private;
6579 int ret;
6580
6581 if (tracing_disabled)
6582 return -ENODEV;
6583
6584 if (trace_array_get(tr))
6585 return -ENODEV;
6586
6587 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6588 if (ret < 0)
6589 trace_array_put(tr);
6590
6591 return ret;
6592}
6593
00b41452
TZ
6594int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6595{
6596 int ret = 0;
6597
6598 mutex_lock(&trace_types_lock);
6599
6600 if (abs && tr->time_stamp_abs_ref++)
6601 goto out;
6602
6603 if (!abs) {
6604 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6605 ret = -EINVAL;
6606 goto out;
6607 }
6608
6609 if (--tr->time_stamp_abs_ref)
6610 goto out;
6611 }
6612
6613 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6614
6615#ifdef CONFIG_TRACER_MAX_TRACE
6616 if (tr->max_buffer.buffer)
6617 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6618#endif
6619 out:
6620 mutex_unlock(&trace_types_lock);
6621
6622 return ret;
6623}
6624
6de58e62
SRRH
6625struct ftrace_buffer_info {
6626 struct trace_iterator iter;
6627 void *spare;
73a757e6 6628 unsigned int spare_cpu;
6de58e62
SRRH
6629 unsigned int read;
6630};
6631
debdd57f
HT
6632#ifdef CONFIG_TRACER_SNAPSHOT
6633static int tracing_snapshot_open(struct inode *inode, struct file *file)
6634{
6484c71c 6635 struct trace_array *tr = inode->i_private;
debdd57f 6636 struct trace_iterator *iter;
2b6080f2 6637 struct seq_file *m;
debdd57f
HT
6638 int ret = 0;
6639
ff451961
SRRH
6640 if (trace_array_get(tr) < 0)
6641 return -ENODEV;
6642
debdd57f 6643 if (file->f_mode & FMODE_READ) {
6484c71c 6644 iter = __tracing_open(inode, file, true);
debdd57f
HT
6645 if (IS_ERR(iter))
6646 ret = PTR_ERR(iter);
2b6080f2
SR
6647 } else {
6648 /* Writes still need the seq_file to hold the private data */
f77d09a3 6649 ret = -ENOMEM;
2b6080f2
SR
6650 m = kzalloc(sizeof(*m), GFP_KERNEL);
6651 if (!m)
f77d09a3 6652 goto out;
2b6080f2
SR
6653 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6654 if (!iter) {
6655 kfree(m);
f77d09a3 6656 goto out;
2b6080f2 6657 }
f77d09a3
AL
6658 ret = 0;
6659
ff451961 6660 iter->tr = tr;
6484c71c
ON
6661 iter->trace_buffer = &tr->max_buffer;
6662 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6663 m->private = iter;
6664 file->private_data = m;
debdd57f 6665 }
f77d09a3 6666out:
ff451961
SRRH
6667 if (ret < 0)
6668 trace_array_put(tr);
6669
debdd57f
HT
6670 return ret;
6671}
6672
6673static ssize_t
6674tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6675 loff_t *ppos)
6676{
2b6080f2
SR
6677 struct seq_file *m = filp->private_data;
6678 struct trace_iterator *iter = m->private;
6679 struct trace_array *tr = iter->tr;
debdd57f
HT
6680 unsigned long val;
6681 int ret;
6682
6683 ret = tracing_update_buffers();
6684 if (ret < 0)
6685 return ret;
6686
6687 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6688 if (ret)
6689 return ret;
6690
6691 mutex_lock(&trace_types_lock);
6692
2b6080f2 6693 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6694 ret = -EBUSY;
6695 goto out;
6696 }
6697
a35873a0
TZ
6698 arch_spin_lock(&tr->max_lock);
6699 if (tr->cond_snapshot)
6700 ret = -EBUSY;
6701 arch_spin_unlock(&tr->max_lock);
6702 if (ret)
6703 goto out;
6704
debdd57f
HT
6705 switch (val) {
6706 case 0:
f1affcaa
SRRH
6707 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6708 ret = -EINVAL;
6709 break;
debdd57f 6710 }
3209cff4
SRRH
6711 if (tr->allocated_snapshot)
6712 free_snapshot(tr);
debdd57f
HT
6713 break;
6714 case 1:
f1affcaa
SRRH
6715/* Only allow per-cpu swap if the ring buffer supports it */
6716#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6717 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6718 ret = -EINVAL;
6719 break;
6720 }
6721#endif
45ad21ca 6722 if (!tr->allocated_snapshot) {
2824f503 6723 ret = tracing_alloc_snapshot_instance(tr);
debdd57f
HT
6724 if (ret < 0)
6725 break;
debdd57f 6726 }
debdd57f
HT
6727 local_irq_disable();
6728 /* Now, we're going to swap */
f1affcaa 6729 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
a35873a0 6730 update_max_tr(tr, current, smp_processor_id(), NULL);
f1affcaa 6731 else
ce9bae55 6732 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6733 local_irq_enable();
6734 break;
6735 default:
45ad21ca 6736 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6737 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6738 tracing_reset_online_cpus(&tr->max_buffer);
6739 else
6740 tracing_reset(&tr->max_buffer, iter->cpu_file);
6741 }
debdd57f
HT
6742 break;
6743 }
6744
6745 if (ret >= 0) {
6746 *ppos += cnt;
6747 ret = cnt;
6748 }
6749out:
6750 mutex_unlock(&trace_types_lock);
6751 return ret;
6752}
2b6080f2
SR
6753
6754static int tracing_snapshot_release(struct inode *inode, struct file *file)
6755{
6756 struct seq_file *m = file->private_data;
ff451961
SRRH
6757 int ret;
6758
6759 ret = tracing_release(inode, file);
2b6080f2
SR
6760
6761 if (file->f_mode & FMODE_READ)
ff451961 6762 return ret;
2b6080f2
SR
6763
6764 /* If write only, the seq_file is just a stub */
6765 if (m)
6766 kfree(m->private);
6767 kfree(m);
6768
6769 return 0;
6770}
6771
6de58e62
SRRH
6772static int tracing_buffers_open(struct inode *inode, struct file *filp);
6773static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6774 size_t count, loff_t *ppos);
6775static int tracing_buffers_release(struct inode *inode, struct file *file);
6776static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6777 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6778
6779static int snapshot_raw_open(struct inode *inode, struct file *filp)
6780{
6781 struct ftrace_buffer_info *info;
6782 int ret;
6783
6784 ret = tracing_buffers_open(inode, filp);
6785 if (ret < 0)
6786 return ret;
6787
6788 info = filp->private_data;
6789
6790 if (info->iter.trace->use_max_tr) {
6791 tracing_buffers_release(inode, filp);
6792 return -EBUSY;
6793 }
6794
6795 info->iter.snapshot = true;
6796 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6797
6798 return ret;
6799}
6800
debdd57f
HT
6801#endif /* CONFIG_TRACER_SNAPSHOT */
6802
6803
6508fa76
SF
6804static const struct file_operations tracing_thresh_fops = {
6805 .open = tracing_open_generic,
6806 .read = tracing_thresh_read,
6807 .write = tracing_thresh_write,
6808 .llseek = generic_file_llseek,
6809};
6810
f971cc9a 6811#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6812static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6813 .open = tracing_open_generic,
6814 .read = tracing_max_lat_read,
6815 .write = tracing_max_lat_write,
b444786f 6816 .llseek = generic_file_llseek,
bc0c38d1 6817};
e428abbb 6818#endif
bc0c38d1 6819
5e2336a0 6820static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6821 .open = tracing_open_generic,
6822 .read = tracing_set_trace_read,
6823 .write = tracing_set_trace_write,
b444786f 6824 .llseek = generic_file_llseek,
bc0c38d1
SR
6825};
6826
5e2336a0 6827static const struct file_operations tracing_pipe_fops = {
4bf39a94 6828 .open = tracing_open_pipe,
2a2cc8f7 6829 .poll = tracing_poll_pipe,
4bf39a94 6830 .read = tracing_read_pipe,
3c56819b 6831 .splice_read = tracing_splice_read_pipe,
4bf39a94 6832 .release = tracing_release_pipe,
b444786f 6833 .llseek = no_llseek,
b3806b43
SR
6834};
6835
5e2336a0 6836static const struct file_operations tracing_entries_fops = {
0bc392ee 6837 .open = tracing_open_generic_tr,
a98a3c3f
SR
6838 .read = tracing_entries_read,
6839 .write = tracing_entries_write,
b444786f 6840 .llseek = generic_file_llseek,
0bc392ee 6841 .release = tracing_release_generic_tr,
a98a3c3f
SR
6842};
6843
f81ab074 6844static const struct file_operations tracing_total_entries_fops = {
7b85af63 6845 .open = tracing_open_generic_tr,
f81ab074
VN
6846 .read = tracing_total_entries_read,
6847 .llseek = generic_file_llseek,
7b85af63 6848 .release = tracing_release_generic_tr,
f81ab074
VN
6849};
6850
4f271a2a 6851static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6852 .open = tracing_open_generic_tr,
4f271a2a
VN
6853 .write = tracing_free_buffer_write,
6854 .release = tracing_free_buffer_release,
6855};
6856
5e2336a0 6857static const struct file_operations tracing_mark_fops = {
7b85af63 6858 .open = tracing_open_generic_tr,
5bf9a1ee 6859 .write = tracing_mark_write,
b444786f 6860 .llseek = generic_file_llseek,
7b85af63 6861 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6862};
6863
fa32e855
SR
6864static const struct file_operations tracing_mark_raw_fops = {
6865 .open = tracing_open_generic_tr,
6866 .write = tracing_mark_raw_write,
6867 .llseek = generic_file_llseek,
6868 .release = tracing_release_generic_tr,
6869};
6870
5079f326 6871static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6872 .open = tracing_clock_open,
6873 .read = seq_read,
6874 .llseek = seq_lseek,
7b85af63 6875 .release = tracing_single_release_tr,
5079f326
Z
6876 .write = tracing_clock_write,
6877};
6878
2c1ea60b
TZ
6879static const struct file_operations trace_time_stamp_mode_fops = {
6880 .open = tracing_time_stamp_mode_open,
6881 .read = seq_read,
6882 .llseek = seq_lseek,
6883 .release = tracing_single_release_tr,
6884};
6885
debdd57f
HT
6886#ifdef CONFIG_TRACER_SNAPSHOT
6887static const struct file_operations snapshot_fops = {
6888 .open = tracing_snapshot_open,
6889 .read = seq_read,
6890 .write = tracing_snapshot_write,
098c879e 6891 .llseek = tracing_lseek,
2b6080f2 6892 .release = tracing_snapshot_release,
debdd57f 6893};
debdd57f 6894
6de58e62
SRRH
6895static const struct file_operations snapshot_raw_fops = {
6896 .open = snapshot_raw_open,
6897 .read = tracing_buffers_read,
6898 .release = tracing_buffers_release,
6899 .splice_read = tracing_buffers_splice_read,
6900 .llseek = no_llseek,
2cadf913
SR
6901};
6902
6de58e62
SRRH
6903#endif /* CONFIG_TRACER_SNAPSHOT */
6904
8a062902
TZ
6905#define TRACING_LOG_ERRS_MAX 8
6906#define TRACING_LOG_LOC_MAX 128
6907
6908#define CMD_PREFIX " Command: "
6909
6910struct err_info {
6911 const char **errs; /* ptr to loc-specific array of err strings */
6912 u8 type; /* index into errs -> specific err string */
6913 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6914 u64 ts;
6915};
6916
6917struct tracing_log_err {
6918 struct list_head list;
6919 struct err_info info;
6920 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6921 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6922};
6923
8a062902
TZ
6924static DEFINE_MUTEX(tracing_err_log_lock);
6925
ff585c5b 6926static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
8a062902
TZ
6927{
6928 struct tracing_log_err *err;
6929
2f754e77 6930 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
8a062902
TZ
6931 err = kzalloc(sizeof(*err), GFP_KERNEL);
6932 if (!err)
6933 err = ERR_PTR(-ENOMEM);
2f754e77 6934 tr->n_err_log_entries++;
8a062902
TZ
6935
6936 return err;
6937 }
6938
2f754e77 6939 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8a062902
TZ
6940 list_del(&err->list);
6941
6942 return err;
6943}
6944
6945/**
6946 * err_pos - find the position of a string within a command for error careting
6947 * @cmd: The tracing command that caused the error
6948 * @str: The string to position the caret at within @cmd
6949 *
6950 * Finds the position of the first occurence of @str within @cmd. The
6951 * return value can be passed to tracing_log_err() for caret placement
6952 * within @cmd.
6953 *
6954 * Returns the index within @cmd of the first occurence of @str or 0
6955 * if @str was not found.
6956 */
6957unsigned int err_pos(char *cmd, const char *str)
6958{
6959 char *found;
6960
6961 if (WARN_ON(!strlen(cmd)))
6962 return 0;
6963
6964 found = strstr(cmd, str);
6965 if (found)
6966 return found - cmd;
6967
6968 return 0;
6969}
6970
6971/**
6972 * tracing_log_err - write an error to the tracing error log
2f754e77 6973 * @tr: The associated trace array for the error (NULL for top level array)
8a062902
TZ
6974 * @loc: A string describing where the error occurred
6975 * @cmd: The tracing command that caused the error
6976 * @errs: The array of loc-specific static error strings
6977 * @type: The index into errs[], which produces the specific static err string
6978 * @pos: The position the caret should be placed in the cmd
6979 *
6980 * Writes an error into tracing/error_log of the form:
6981 *
6982 * <loc>: error: <text>
6983 * Command: <cmd>
6984 * ^
6985 *
6986 * tracing/error_log is a small log file containing the last
6987 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
6988 * unless there has been a tracing error, and the error log can be
6989 * cleared and have its memory freed by writing the empty string in
6990 * truncation mode to it i.e. echo > tracing/error_log.
6991 *
6992 * NOTE: the @errs array along with the @type param are used to
6993 * produce a static error string - this string is not copied and saved
6994 * when the error is logged - only a pointer to it is saved. See
6995 * existing callers for examples of how static strings are typically
6996 * defined for use with tracing_log_err().
6997 */
2f754e77
SRV
6998void tracing_log_err(struct trace_array *tr,
6999 const char *loc, const char *cmd,
8a062902
TZ
7000 const char **errs, u8 type, u8 pos)
7001{
7002 struct tracing_log_err *err;
7003
2f754e77
SRV
7004 if (!tr)
7005 tr = &global_trace;
7006
8a062902 7007 mutex_lock(&tracing_err_log_lock);
2f754e77 7008 err = get_tracing_log_err(tr);
8a062902
TZ
7009 if (PTR_ERR(err) == -ENOMEM) {
7010 mutex_unlock(&tracing_err_log_lock);
7011 return;
7012 }
7013
7014 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7015 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7016
7017 err->info.errs = errs;
7018 err->info.type = type;
7019 err->info.pos = pos;
7020 err->info.ts = local_clock();
7021
2f754e77 7022 list_add_tail(&err->list, &tr->err_log);
8a062902
TZ
7023 mutex_unlock(&tracing_err_log_lock);
7024}
7025
2f754e77 7026static void clear_tracing_err_log(struct trace_array *tr)
8a062902
TZ
7027{
7028 struct tracing_log_err *err, *next;
7029
7030 mutex_lock(&tracing_err_log_lock);
2f754e77 7031 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8a062902
TZ
7032 list_del(&err->list);
7033 kfree(err);
7034 }
7035
2f754e77 7036 tr->n_err_log_entries = 0;
8a062902
TZ
7037 mutex_unlock(&tracing_err_log_lock);
7038}
7039
7040static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7041{
2f754e77
SRV
7042 struct trace_array *tr = m->private;
7043
8a062902
TZ
7044 mutex_lock(&tracing_err_log_lock);
7045
2f754e77 7046 return seq_list_start(&tr->err_log, *pos);
8a062902
TZ
7047}
7048
7049static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7050{
2f754e77
SRV
7051 struct trace_array *tr = m->private;
7052
7053 return seq_list_next(v, &tr->err_log, pos);
8a062902
TZ
7054}
7055
7056static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7057{
7058 mutex_unlock(&tracing_err_log_lock);
7059}
7060
7061static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7062{
7063 u8 i;
7064
7065 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7066 seq_putc(m, ' ');
7067 for (i = 0; i < pos; i++)
7068 seq_putc(m, ' ');
7069 seq_puts(m, "^\n");
7070}
7071
7072static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7073{
7074 struct tracing_log_err *err = v;
7075
7076 if (err) {
7077 const char *err_text = err->info.errs[err->info.type];
7078 u64 sec = err->info.ts;
7079 u32 nsec;
7080
7081 nsec = do_div(sec, NSEC_PER_SEC);
7082 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7083 err->loc, err_text);
7084 seq_printf(m, "%s", err->cmd);
7085 tracing_err_log_show_pos(m, err->info.pos);
7086 }
7087
7088 return 0;
7089}
7090
7091static const struct seq_operations tracing_err_log_seq_ops = {
7092 .start = tracing_err_log_seq_start,
7093 .next = tracing_err_log_seq_next,
7094 .stop = tracing_err_log_seq_stop,
7095 .show = tracing_err_log_seq_show
7096};
7097
7098static int tracing_err_log_open(struct inode *inode, struct file *file)
7099{
2f754e77 7100 struct trace_array *tr = inode->i_private;
8a062902
TZ
7101 int ret = 0;
7102
2f754e77
SRV
7103 if (trace_array_get(tr) < 0)
7104 return -ENODEV;
7105
8a062902
TZ
7106 /* If this file was opened for write, then erase contents */
7107 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
2f754e77 7108 clear_tracing_err_log(tr);
8a062902 7109
2f754e77 7110 if (file->f_mode & FMODE_READ) {
8a062902 7111 ret = seq_open(file, &tracing_err_log_seq_ops);
2f754e77
SRV
7112 if (!ret) {
7113 struct seq_file *m = file->private_data;
7114 m->private = tr;
7115 } else {
7116 trace_array_put(tr);
7117 }
7118 }
8a062902
TZ
7119 return ret;
7120}
7121
7122static ssize_t tracing_err_log_write(struct file *file,
7123 const char __user *buffer,
7124 size_t count, loff_t *ppos)
7125{
7126 return count;
7127}
7128
d122ed62
TM
7129static int tracing_err_log_release(struct inode *inode, struct file *file)
7130{
7131 struct trace_array *tr = inode->i_private;
7132
7133 trace_array_put(tr);
7134
7135 if (file->f_mode & FMODE_READ)
7136 seq_release(inode, file);
7137
7138 return 0;
7139}
7140
8a062902
TZ
7141static const struct file_operations tracing_err_log_fops = {
7142 .open = tracing_err_log_open,
7143 .write = tracing_err_log_write,
7144 .read = seq_read,
7145 .llseek = seq_lseek,
d122ed62 7146 .release = tracing_err_log_release,
8a062902
TZ
7147};
7148
2cadf913
SR
7149static int tracing_buffers_open(struct inode *inode, struct file *filp)
7150{
46ef2be0 7151 struct trace_array *tr = inode->i_private;
2cadf913 7152 struct ftrace_buffer_info *info;
7b85af63 7153 int ret;
2cadf913
SR
7154
7155 if (tracing_disabled)
7156 return -ENODEV;
7157
7b85af63
SRRH
7158 if (trace_array_get(tr) < 0)
7159 return -ENODEV;
7160
2cadf913 7161 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
7162 if (!info) {
7163 trace_array_put(tr);
2cadf913 7164 return -ENOMEM;
7b85af63 7165 }
2cadf913 7166
a695cb58
SRRH
7167 mutex_lock(&trace_types_lock);
7168
cc60cdc9 7169 info->iter.tr = tr;
46ef2be0 7170 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 7171 info->iter.trace = tr->current_trace;
12883efb 7172 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 7173 info->spare = NULL;
2cadf913 7174 /* Force reading ring buffer for first read */
cc60cdc9 7175 info->read = (unsigned int)-1;
2cadf913
SR
7176
7177 filp->private_data = info;
7178
cf6ab6d9
SRRH
7179 tr->current_trace->ref++;
7180
a695cb58
SRRH
7181 mutex_unlock(&trace_types_lock);
7182
7b85af63
SRRH
7183 ret = nonseekable_open(inode, filp);
7184 if (ret < 0)
7185 trace_array_put(tr);
7186
7187 return ret;
2cadf913
SR
7188}
7189
9dd95748 7190static __poll_t
cc60cdc9
SR
7191tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7192{
7193 struct ftrace_buffer_info *info = filp->private_data;
7194 struct trace_iterator *iter = &info->iter;
7195
7196 return trace_poll(iter, filp, poll_table);
7197}
7198
2cadf913
SR
7199static ssize_t
7200tracing_buffers_read(struct file *filp, char __user *ubuf,
7201 size_t count, loff_t *ppos)
7202{
7203 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 7204 struct trace_iterator *iter = &info->iter;
a7e52ad7 7205 ssize_t ret = 0;
6de58e62 7206 ssize_t size;
2cadf913 7207
2dc5d12b
SR
7208 if (!count)
7209 return 0;
7210
6de58e62 7211#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
7212 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7213 return -EBUSY;
6de58e62
SRRH
7214#endif
7215
73a757e6 7216 if (!info->spare) {
12883efb
SRRH
7217 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7218 iter->cpu_file);
a7e52ad7
SRV
7219 if (IS_ERR(info->spare)) {
7220 ret = PTR_ERR(info->spare);
7221 info->spare = NULL;
7222 } else {
7223 info->spare_cpu = iter->cpu_file;
7224 }
73a757e6 7225 }
ddd538f3 7226 if (!info->spare)
a7e52ad7 7227 return ret;
ddd538f3 7228
2cadf913
SR
7229 /* Do we have previous read data to read? */
7230 if (info->read < PAGE_SIZE)
7231 goto read;
7232
b627344f 7233 again:
cc60cdc9 7234 trace_access_lock(iter->cpu_file);
12883efb 7235 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
7236 &info->spare,
7237 count,
cc60cdc9
SR
7238 iter->cpu_file, 0);
7239 trace_access_unlock(iter->cpu_file);
2cadf913 7240
b627344f
SR
7241 if (ret < 0) {
7242 if (trace_empty(iter)) {
d716ff71
SRRH
7243 if ((filp->f_flags & O_NONBLOCK))
7244 return -EAGAIN;
7245
2c2b0a78 7246 ret = wait_on_pipe(iter, 0);
d716ff71
SRRH
7247 if (ret)
7248 return ret;
7249
b627344f
SR
7250 goto again;
7251 }
d716ff71 7252 return 0;
b627344f 7253 }
436fc280 7254
436fc280 7255 info->read = 0;
b627344f 7256 read:
2cadf913
SR
7257 size = PAGE_SIZE - info->read;
7258 if (size > count)
7259 size = count;
7260
7261 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
7262 if (ret == size)
7263 return -EFAULT;
7264
2dc5d12b
SR
7265 size -= ret;
7266
2cadf913
SR
7267 *ppos += size;
7268 info->read += size;
7269
7270 return size;
7271}
7272
7273static int tracing_buffers_release(struct inode *inode, struct file *file)
7274{
7275 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 7276 struct trace_iterator *iter = &info->iter;
2cadf913 7277
a695cb58
SRRH
7278 mutex_lock(&trace_types_lock);
7279
cf6ab6d9
SRRH
7280 iter->tr->current_trace->ref--;
7281
ff451961 7282 __trace_array_put(iter->tr);
2cadf913 7283
ddd538f3 7284 if (info->spare)
73a757e6
SRV
7285 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7286 info->spare_cpu, info->spare);
2cadf913
SR
7287 kfree(info);
7288
a695cb58
SRRH
7289 mutex_unlock(&trace_types_lock);
7290
2cadf913
SR
7291 return 0;
7292}
7293
7294struct buffer_ref {
7295 struct ring_buffer *buffer;
7296 void *page;
73a757e6 7297 int cpu;
b9872226 7298 refcount_t refcount;
2cadf913
SR
7299};
7300
b9872226
JH
7301static void buffer_ref_release(struct buffer_ref *ref)
7302{
7303 if (!refcount_dec_and_test(&ref->refcount))
7304 return;
7305 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7306 kfree(ref);
7307}
7308
2cadf913
SR
7309static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7310 struct pipe_buffer *buf)
7311{
7312 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7313
b9872226 7314 buffer_ref_release(ref);
2cadf913
SR
7315 buf->private = 0;
7316}
7317
15fab63e 7318static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
2cadf913
SR
7319 struct pipe_buffer *buf)
7320{
7321 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7322
e9e1a2e7 7323 if (refcount_read(&ref->refcount) > INT_MAX/2)
15fab63e
MW
7324 return false;
7325
b9872226 7326 refcount_inc(&ref->refcount);
15fab63e 7327 return true;
2cadf913
SR
7328}
7329
7330/* Pipe buffer operations for a buffer. */
28dfef8f 7331static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913
SR
7332 .confirm = generic_pipe_buf_confirm,
7333 .release = buffer_pipe_buf_release,
b9872226 7334 .steal = generic_pipe_buf_nosteal,
2cadf913
SR
7335 .get = buffer_pipe_buf_get,
7336};
7337
7338/*
7339 * Callback from splice_to_pipe(), if we need to release some pages
7340 * at the end of the spd in case we error'ed out in filling the pipe.
7341 */
7342static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7343{
7344 struct buffer_ref *ref =
7345 (struct buffer_ref *)spd->partial[i].private;
7346
b9872226 7347 buffer_ref_release(ref);
2cadf913
SR
7348 spd->partial[i].private = 0;
7349}
7350
7351static ssize_t
7352tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7353 struct pipe_inode_info *pipe, size_t len,
7354 unsigned int flags)
7355{
7356 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 7357 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
7358 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7359 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 7360 struct splice_pipe_desc spd = {
35f3d14d
JA
7361 .pages = pages_def,
7362 .partial = partial_def,
047fe360 7363 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
7364 .ops = &buffer_pipe_buf_ops,
7365 .spd_release = buffer_spd_release,
7366 };
7367 struct buffer_ref *ref;
6b7e633f 7368 int entries, i;
07906da7 7369 ssize_t ret = 0;
2cadf913 7370
6de58e62 7371#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
7372 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7373 return -EBUSY;
6de58e62
SRRH
7374#endif
7375
d716ff71
SRRH
7376 if (*ppos & (PAGE_SIZE - 1))
7377 return -EINVAL;
93cfb3c9
LJ
7378
7379 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
7380 if (len < PAGE_SIZE)
7381 return -EINVAL;
93cfb3c9
LJ
7382 len &= PAGE_MASK;
7383 }
7384
1ae2293d
AV
7385 if (splice_grow_spd(pipe, &spd))
7386 return -ENOMEM;
7387
cc60cdc9
SR
7388 again:
7389 trace_access_lock(iter->cpu_file);
12883efb 7390 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 7391
a786c06d 7392 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
7393 struct page *page;
7394 int r;
7395
7396 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
7397 if (!ref) {
7398 ret = -ENOMEM;
2cadf913 7399 break;
07906da7 7400 }
2cadf913 7401
b9872226 7402 refcount_set(&ref->refcount, 1);
12883efb 7403 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 7404 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
a7e52ad7
SRV
7405 if (IS_ERR(ref->page)) {
7406 ret = PTR_ERR(ref->page);
7407 ref->page = NULL;
2cadf913
SR
7408 kfree(ref);
7409 break;
7410 }
73a757e6 7411 ref->cpu = iter->cpu_file;
2cadf913
SR
7412
7413 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 7414 len, iter->cpu_file, 1);
2cadf913 7415 if (r < 0) {
73a757e6
SRV
7416 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7417 ref->page);
2cadf913
SR
7418 kfree(ref);
7419 break;
7420 }
7421
2cadf913
SR
7422 page = virt_to_page(ref->page);
7423
7424 spd.pages[i] = page;
7425 spd.partial[i].len = PAGE_SIZE;
7426 spd.partial[i].offset = 0;
7427 spd.partial[i].private = (unsigned long)ref;
7428 spd.nr_pages++;
93cfb3c9 7429 *ppos += PAGE_SIZE;
93459c6c 7430
12883efb 7431 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
7432 }
7433
cc60cdc9 7434 trace_access_unlock(iter->cpu_file);
2cadf913
SR
7435 spd.nr_pages = i;
7436
7437 /* did we read anything? */
7438 if (!spd.nr_pages) {
07906da7 7439 if (ret)
1ae2293d 7440 goto out;
d716ff71 7441
1ae2293d 7442 ret = -EAGAIN;
d716ff71 7443 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 7444 goto out;
07906da7 7445
03329f99 7446 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8b8b3683 7447 if (ret)
1ae2293d 7448 goto out;
e30f53aa 7449
cc60cdc9 7450 goto again;
2cadf913
SR
7451 }
7452
7453 ret = splice_to_pipe(pipe, &spd);
1ae2293d 7454out:
047fe360 7455 splice_shrink_spd(&spd);
6de58e62 7456
2cadf913
SR
7457 return ret;
7458}
7459
7460static const struct file_operations tracing_buffers_fops = {
7461 .open = tracing_buffers_open,
7462 .read = tracing_buffers_read,
cc60cdc9 7463 .poll = tracing_buffers_poll,
2cadf913
SR
7464 .release = tracing_buffers_release,
7465 .splice_read = tracing_buffers_splice_read,
7466 .llseek = no_llseek,
7467};
7468
c8d77183
SR
7469static ssize_t
7470tracing_stats_read(struct file *filp, char __user *ubuf,
7471 size_t count, loff_t *ppos)
7472{
4d3435b8
ON
7473 struct inode *inode = file_inode(filp);
7474 struct trace_array *tr = inode->i_private;
12883efb 7475 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 7476 int cpu = tracing_get_cpu(inode);
c8d77183
SR
7477 struct trace_seq *s;
7478 unsigned long cnt;
c64e148a
VN
7479 unsigned long long t;
7480 unsigned long usec_rem;
c8d77183 7481
e4f2d10f 7482 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 7483 if (!s)
a646365c 7484 return -ENOMEM;
c8d77183
SR
7485
7486 trace_seq_init(s);
7487
12883efb 7488 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7489 trace_seq_printf(s, "entries: %ld\n", cnt);
7490
12883efb 7491 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7492 trace_seq_printf(s, "overrun: %ld\n", cnt);
7493
12883efb 7494 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7495 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7496
12883efb 7497 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
7498 trace_seq_printf(s, "bytes: %ld\n", cnt);
7499
58e8eedf 7500 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 7501 /* local or global for trace_clock */
12883efb 7502 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
7503 usec_rem = do_div(t, USEC_PER_SEC);
7504 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7505 t, usec_rem);
7506
12883efb 7507 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
7508 usec_rem = do_div(t, USEC_PER_SEC);
7509 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7510 } else {
7511 /* counter or tsc mode for trace_clock */
7512 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 7513 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 7514
11043d8b 7515 trace_seq_printf(s, "now ts: %llu\n",
12883efb 7516 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 7517 }
c64e148a 7518
12883efb 7519 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
7520 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7521
12883efb 7522 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
7523 trace_seq_printf(s, "read events: %ld\n", cnt);
7524
5ac48378
SRRH
7525 count = simple_read_from_buffer(ubuf, count, ppos,
7526 s->buffer, trace_seq_used(s));
c8d77183
SR
7527
7528 kfree(s);
7529
7530 return count;
7531}
7532
7533static const struct file_operations tracing_stats_fops = {
4d3435b8 7534 .open = tracing_open_generic_tr,
c8d77183 7535 .read = tracing_stats_read,
b444786f 7536 .llseek = generic_file_llseek,
4d3435b8 7537 .release = tracing_release_generic_tr,
c8d77183
SR
7538};
7539
bc0c38d1
SR
7540#ifdef CONFIG_DYNAMIC_FTRACE
7541
7542static ssize_t
b807c3d0 7543tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
7544 size_t cnt, loff_t *ppos)
7545{
7546 unsigned long *p = filp->private_data;
6a9c981b 7547 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
7548 int r;
7549
6a9c981b 7550 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
7551 buf[r++] = '\n';
7552
6a9c981b 7553 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
7554}
7555
5e2336a0 7556static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 7557 .open = tracing_open_generic,
b807c3d0 7558 .read = tracing_read_dyn_info,
b444786f 7559 .llseek = generic_file_llseek,
bc0c38d1 7560};
77fd5c15 7561#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 7562
77fd5c15
SRRH
7563#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7564static void
bca6c8d0 7565ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7566 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7567 void *data)
77fd5c15 7568{
cab50379 7569 tracing_snapshot_instance(tr);
77fd5c15 7570}
bc0c38d1 7571
77fd5c15 7572static void
bca6c8d0 7573ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7574 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7575 void *data)
bc0c38d1 7576{
6e444319 7577 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7578 long *count = NULL;
77fd5c15 7579
1a93f8bd
SRV
7580 if (mapper)
7581 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7582
7583 if (count) {
7584
7585 if (*count <= 0)
7586 return;
bc0c38d1 7587
77fd5c15 7588 (*count)--;
1a93f8bd 7589 }
77fd5c15 7590
cab50379 7591 tracing_snapshot_instance(tr);
77fd5c15
SRRH
7592}
7593
7594static int
7595ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7596 struct ftrace_probe_ops *ops, void *data)
7597{
6e444319 7598 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7599 long *count = NULL;
77fd5c15
SRRH
7600
7601 seq_printf(m, "%ps:", (void *)ip);
7602
fa6f0cc7 7603 seq_puts(m, "snapshot");
77fd5c15 7604
1a93f8bd
SRV
7605 if (mapper)
7606 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7607
7608 if (count)
7609 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 7610 else
1a93f8bd 7611 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
7612
7613 return 0;
7614}
7615
1a93f8bd 7616static int
b5f081b5 7617ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7618 unsigned long ip, void *init_data, void **data)
1a93f8bd 7619{
6e444319
SRV
7620 struct ftrace_func_mapper *mapper = *data;
7621
7622 if (!mapper) {
7623 mapper = allocate_ftrace_func_mapper();
7624 if (!mapper)
7625 return -ENOMEM;
7626 *data = mapper;
7627 }
1a93f8bd 7628
6e444319 7629 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
7630}
7631
7632static void
b5f081b5 7633ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7634 unsigned long ip, void *data)
1a93f8bd 7635{
6e444319
SRV
7636 struct ftrace_func_mapper *mapper = data;
7637
7638 if (!ip) {
7639 if (!mapper)
7640 return;
7641 free_ftrace_func_mapper(mapper, NULL);
7642 return;
7643 }
1a93f8bd
SRV
7644
7645 ftrace_func_mapper_remove_ip(mapper, ip);
7646}
7647
77fd5c15
SRRH
7648static struct ftrace_probe_ops snapshot_probe_ops = {
7649 .func = ftrace_snapshot,
7650 .print = ftrace_snapshot_print,
7651};
7652
7653static struct ftrace_probe_ops snapshot_count_probe_ops = {
7654 .func = ftrace_count_snapshot,
7655 .print = ftrace_snapshot_print,
1a93f8bd
SRV
7656 .init = ftrace_snapshot_init,
7657 .free = ftrace_snapshot_free,
77fd5c15
SRRH
7658};
7659
7660static int
04ec7bb6 7661ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
7662 char *glob, char *cmd, char *param, int enable)
7663{
7664 struct ftrace_probe_ops *ops;
7665 void *count = (void *)-1;
7666 char *number;
7667 int ret;
7668
0f179765
SRV
7669 if (!tr)
7670 return -ENODEV;
7671
77fd5c15
SRRH
7672 /* hash funcs only work with set_ftrace_filter */
7673 if (!enable)
7674 return -EINVAL;
7675
7676 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7677
d3d532d7 7678 if (glob[0] == '!')
7b60f3d8 7679 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
7680
7681 if (!param)
7682 goto out_reg;
7683
7684 number = strsep(&param, ":");
7685
7686 if (!strlen(number))
7687 goto out_reg;
7688
7689 /*
7690 * We use the callback data field (which is a pointer)
7691 * as our counter.
7692 */
7693 ret = kstrtoul(number, 0, (unsigned long *)&count);
7694 if (ret)
7695 return ret;
7696
7697 out_reg:
2824f503 7698 ret = tracing_alloc_snapshot_instance(tr);
df62db5b
SRV
7699 if (ret < 0)
7700 goto out;
77fd5c15 7701
4c174688 7702 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 7703
df62db5b 7704 out:
77fd5c15
SRRH
7705 return ret < 0 ? ret : 0;
7706}
7707
7708static struct ftrace_func_command ftrace_snapshot_cmd = {
7709 .name = "snapshot",
7710 .func = ftrace_trace_snapshot_callback,
7711};
7712
38de93ab 7713static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
7714{
7715 return register_ftrace_command(&ftrace_snapshot_cmd);
7716}
7717#else
38de93ab 7718static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 7719#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 7720
7eeafbca 7721static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 7722{
8434dc93
SRRH
7723 if (WARN_ON(!tr->dir))
7724 return ERR_PTR(-ENODEV);
7725
7726 /* Top directory uses NULL as the parent */
7727 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7728 return NULL;
7729
7730 /* All sub buffers have a descriptor */
2b6080f2 7731 return tr->dir;
bc0c38d1
SR
7732}
7733
2b6080f2 7734static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 7735{
b04cc6b1
FW
7736 struct dentry *d_tracer;
7737
2b6080f2
SR
7738 if (tr->percpu_dir)
7739 return tr->percpu_dir;
b04cc6b1 7740
7eeafbca 7741 d_tracer = tracing_get_dentry(tr);
14a5ae40 7742 if (IS_ERR(d_tracer))
b04cc6b1
FW
7743 return NULL;
7744
8434dc93 7745 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 7746
2b6080f2 7747 WARN_ONCE(!tr->percpu_dir,
8434dc93 7748 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 7749
2b6080f2 7750 return tr->percpu_dir;
b04cc6b1
FW
7751}
7752
649e9c70
ON
7753static struct dentry *
7754trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7755 void *data, long cpu, const struct file_operations *fops)
7756{
7757 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7758
7759 if (ret) /* See tracing_get_cpu() */
7682c918 7760 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
7761 return ret;
7762}
7763
2b6080f2 7764static void
8434dc93 7765tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 7766{
2b6080f2 7767 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 7768 struct dentry *d_cpu;
dd49a38c 7769 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 7770
0a3d7ce7
NK
7771 if (!d_percpu)
7772 return;
7773
dd49a38c 7774 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 7775 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 7776 if (!d_cpu) {
a395d6a7 7777 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
7778 return;
7779 }
b04cc6b1 7780
8656e7a2 7781 /* per cpu trace_pipe */
649e9c70 7782 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 7783 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
7784
7785 /* per cpu trace */
649e9c70 7786 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 7787 tr, cpu, &tracing_fops);
7f96f93f 7788
649e9c70 7789 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 7790 tr, cpu, &tracing_buffers_fops);
7f96f93f 7791
649e9c70 7792 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 7793 tr, cpu, &tracing_stats_fops);
438ced17 7794
649e9c70 7795 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 7796 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
7797
7798#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 7799 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 7800 tr, cpu, &snapshot_fops);
6de58e62 7801
649e9c70 7802 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 7803 tr, cpu, &snapshot_raw_fops);
f1affcaa 7804#endif
b04cc6b1
FW
7805}
7806
60a11774
SR
7807#ifdef CONFIG_FTRACE_SELFTEST
7808/* Let selftest have access to static functions in this file */
7809#include "trace_selftest.c"
7810#endif
7811
577b785f
SR
7812static ssize_t
7813trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7814 loff_t *ppos)
7815{
7816 struct trace_option_dentry *topt = filp->private_data;
7817 char *buf;
7818
7819 if (topt->flags->val & topt->opt->bit)
7820 buf = "1\n";
7821 else
7822 buf = "0\n";
7823
7824 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7825}
7826
7827static ssize_t
7828trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7829 loff_t *ppos)
7830{
7831 struct trace_option_dentry *topt = filp->private_data;
7832 unsigned long val;
577b785f
SR
7833 int ret;
7834
22fe9b54
PH
7835 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7836 if (ret)
577b785f
SR
7837 return ret;
7838
8d18eaaf
LZ
7839 if (val != 0 && val != 1)
7840 return -EINVAL;
577b785f 7841
8d18eaaf 7842 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7843 mutex_lock(&trace_types_lock);
8c1a49ae 7844 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7845 topt->opt, !val);
577b785f
SR
7846 mutex_unlock(&trace_types_lock);
7847 if (ret)
7848 return ret;
577b785f
SR
7849 }
7850
7851 *ppos += cnt;
7852
7853 return cnt;
7854}
7855
7856
7857static const struct file_operations trace_options_fops = {
7858 .open = tracing_open_generic,
7859 .read = trace_options_read,
7860 .write = trace_options_write,
b444786f 7861 .llseek = generic_file_llseek,
577b785f
SR
7862};
7863
9a38a885
SRRH
7864/*
7865 * In order to pass in both the trace_array descriptor as well as the index
7866 * to the flag that the trace option file represents, the trace_array
7867 * has a character array of trace_flags_index[], which holds the index
7868 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7869 * The address of this character array is passed to the flag option file
7870 * read/write callbacks.
7871 *
7872 * In order to extract both the index and the trace_array descriptor,
7873 * get_tr_index() uses the following algorithm.
7874 *
7875 * idx = *ptr;
7876 *
7877 * As the pointer itself contains the address of the index (remember
7878 * index[1] == 1).
7879 *
7880 * Then to get the trace_array descriptor, by subtracting that index
7881 * from the ptr, we get to the start of the index itself.
7882 *
7883 * ptr - idx == &index[0]
7884 *
7885 * Then a simple container_of() from that pointer gets us to the
7886 * trace_array descriptor.
7887 */
7888static void get_tr_index(void *data, struct trace_array **ptr,
7889 unsigned int *pindex)
7890{
7891 *pindex = *(unsigned char *)data;
7892
7893 *ptr = container_of(data - *pindex, struct trace_array,
7894 trace_flags_index);
7895}
7896
a8259075
SR
7897static ssize_t
7898trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7899 loff_t *ppos)
7900{
9a38a885
SRRH
7901 void *tr_index = filp->private_data;
7902 struct trace_array *tr;
7903 unsigned int index;
a8259075
SR
7904 char *buf;
7905
9a38a885
SRRH
7906 get_tr_index(tr_index, &tr, &index);
7907
7908 if (tr->trace_flags & (1 << index))
a8259075
SR
7909 buf = "1\n";
7910 else
7911 buf = "0\n";
7912
7913 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7914}
7915
7916static ssize_t
7917trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7918 loff_t *ppos)
7919{
9a38a885
SRRH
7920 void *tr_index = filp->private_data;
7921 struct trace_array *tr;
7922 unsigned int index;
a8259075
SR
7923 unsigned long val;
7924 int ret;
7925
9a38a885
SRRH
7926 get_tr_index(tr_index, &tr, &index);
7927
22fe9b54
PH
7928 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7929 if (ret)
a8259075
SR
7930 return ret;
7931
f2d84b65 7932 if (val != 0 && val != 1)
a8259075 7933 return -EINVAL;
69d34da2
SRRH
7934
7935 mutex_lock(&trace_types_lock);
2b6080f2 7936 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7937 mutex_unlock(&trace_types_lock);
a8259075 7938
613f04a0
SRRH
7939 if (ret < 0)
7940 return ret;
7941
a8259075
SR
7942 *ppos += cnt;
7943
7944 return cnt;
7945}
7946
a8259075
SR
7947static const struct file_operations trace_options_core_fops = {
7948 .open = tracing_open_generic,
7949 .read = trace_options_core_read,
7950 .write = trace_options_core_write,
b444786f 7951 .llseek = generic_file_llseek,
a8259075
SR
7952};
7953
5452af66 7954struct dentry *trace_create_file(const char *name,
f4ae40a6 7955 umode_t mode,
5452af66
FW
7956 struct dentry *parent,
7957 void *data,
7958 const struct file_operations *fops)
7959{
7960 struct dentry *ret;
7961
8434dc93 7962 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7963 if (!ret)
a395d6a7 7964 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7965
7966 return ret;
7967}
7968
7969
2b6080f2 7970static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7971{
7972 struct dentry *d_tracer;
a8259075 7973
2b6080f2
SR
7974 if (tr->options)
7975 return tr->options;
a8259075 7976
7eeafbca 7977 d_tracer = tracing_get_dentry(tr);
14a5ae40 7978 if (IS_ERR(d_tracer))
a8259075
SR
7979 return NULL;
7980
8434dc93 7981 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7982 if (!tr->options) {
a395d6a7 7983 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7984 return NULL;
7985 }
7986
2b6080f2 7987 return tr->options;
a8259075
SR
7988}
7989
577b785f 7990static void
2b6080f2
SR
7991create_trace_option_file(struct trace_array *tr,
7992 struct trace_option_dentry *topt,
577b785f
SR
7993 struct tracer_flags *flags,
7994 struct tracer_opt *opt)
7995{
7996 struct dentry *t_options;
577b785f 7997
2b6080f2 7998 t_options = trace_options_init_dentry(tr);
577b785f
SR
7999 if (!t_options)
8000 return;
8001
8002 topt->flags = flags;
8003 topt->opt = opt;
2b6080f2 8004 topt->tr = tr;
577b785f 8005
5452af66 8006 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
8007 &trace_options_fops);
8008
577b785f
SR
8009}
8010
37aea98b 8011static void
2b6080f2 8012create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
8013{
8014 struct trace_option_dentry *topts;
37aea98b 8015 struct trace_options *tr_topts;
577b785f
SR
8016 struct tracer_flags *flags;
8017 struct tracer_opt *opts;
8018 int cnt;
37aea98b 8019 int i;
577b785f
SR
8020
8021 if (!tracer)
37aea98b 8022 return;
577b785f
SR
8023
8024 flags = tracer->flags;
8025
8026 if (!flags || !flags->opts)
37aea98b
SRRH
8027 return;
8028
8029 /*
8030 * If this is an instance, only create flags for tracers
8031 * the instance may have.
8032 */
8033 if (!trace_ok_for_array(tracer, tr))
8034 return;
8035
8036 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
8037 /* Make sure there's no duplicate flags. */
8038 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
8039 return;
8040 }
577b785f
SR
8041
8042 opts = flags->opts;
8043
8044 for (cnt = 0; opts[cnt].name; cnt++)
8045 ;
8046
0cfe8245 8047 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 8048 if (!topts)
37aea98b
SRRH
8049 return;
8050
8051 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8052 GFP_KERNEL);
8053 if (!tr_topts) {
8054 kfree(topts);
8055 return;
8056 }
8057
8058 tr->topts = tr_topts;
8059 tr->topts[tr->nr_topts].tracer = tracer;
8060 tr->topts[tr->nr_topts].topts = topts;
8061 tr->nr_topts++;
577b785f 8062
41d9c0be 8063 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 8064 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 8065 &opts[cnt]);
41d9c0be
SRRH
8066 WARN_ONCE(topts[cnt].entry == NULL,
8067 "Failed to create trace option: %s",
8068 opts[cnt].name);
8069 }
577b785f
SR
8070}
8071
a8259075 8072static struct dentry *
2b6080f2
SR
8073create_trace_option_core_file(struct trace_array *tr,
8074 const char *option, long index)
a8259075
SR
8075{
8076 struct dentry *t_options;
a8259075 8077
2b6080f2 8078 t_options = trace_options_init_dentry(tr);
a8259075
SR
8079 if (!t_options)
8080 return NULL;
8081
9a38a885
SRRH
8082 return trace_create_file(option, 0644, t_options,
8083 (void *)&tr->trace_flags_index[index],
8084 &trace_options_core_fops);
a8259075
SR
8085}
8086
16270145 8087static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
8088{
8089 struct dentry *t_options;
16270145 8090 bool top_level = tr == &global_trace;
a8259075
SR
8091 int i;
8092
2b6080f2 8093 t_options = trace_options_init_dentry(tr);
a8259075
SR
8094 if (!t_options)
8095 return;
8096
16270145
SRRH
8097 for (i = 0; trace_options[i]; i++) {
8098 if (top_level ||
8099 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8100 create_trace_option_core_file(tr, trace_options[i], i);
8101 }
a8259075
SR
8102}
8103
499e5470
SR
8104static ssize_t
8105rb_simple_read(struct file *filp, char __user *ubuf,
8106 size_t cnt, loff_t *ppos)
8107{
348f0fc2 8108 struct trace_array *tr = filp->private_data;
499e5470
SR
8109 char buf[64];
8110 int r;
8111
10246fa3 8112 r = tracer_tracing_is_on(tr);
499e5470
SR
8113 r = sprintf(buf, "%d\n", r);
8114
8115 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8116}
8117
8118static ssize_t
8119rb_simple_write(struct file *filp, const char __user *ubuf,
8120 size_t cnt, loff_t *ppos)
8121{
348f0fc2 8122 struct trace_array *tr = filp->private_data;
12883efb 8123 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
8124 unsigned long val;
8125 int ret;
8126
8127 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8128 if (ret)
8129 return ret;
8130
8131 if (buffer) {
2df8f8a6 8132 mutex_lock(&trace_types_lock);
f143641b
SRV
8133 if (!!val == tracer_tracing_is_on(tr)) {
8134 val = 0; /* do nothing */
8135 } else if (val) {
10246fa3 8136 tracer_tracing_on(tr);
2b6080f2
SR
8137 if (tr->current_trace->start)
8138 tr->current_trace->start(tr);
2df8f8a6 8139 } else {
10246fa3 8140 tracer_tracing_off(tr);
2b6080f2
SR
8141 if (tr->current_trace->stop)
8142 tr->current_trace->stop(tr);
2df8f8a6
SR
8143 }
8144 mutex_unlock(&trace_types_lock);
499e5470
SR
8145 }
8146
8147 (*ppos)++;
8148
8149 return cnt;
8150}
8151
8152static const struct file_operations rb_simple_fops = {
7b85af63 8153 .open = tracing_open_generic_tr,
499e5470
SR
8154 .read = rb_simple_read,
8155 .write = rb_simple_write,
7b85af63 8156 .release = tracing_release_generic_tr,
499e5470
SR
8157 .llseek = default_llseek,
8158};
8159
03329f99
SRV
8160static ssize_t
8161buffer_percent_read(struct file *filp, char __user *ubuf,
8162 size_t cnt, loff_t *ppos)
8163{
8164 struct trace_array *tr = filp->private_data;
8165 char buf[64];
8166 int r;
8167
8168 r = tr->buffer_percent;
8169 r = sprintf(buf, "%d\n", r);
8170
8171 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8172}
8173
8174static ssize_t
8175buffer_percent_write(struct file *filp, const char __user *ubuf,
8176 size_t cnt, loff_t *ppos)
8177{
8178 struct trace_array *tr = filp->private_data;
8179 unsigned long val;
8180 int ret;
8181
8182 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8183 if (ret)
8184 return ret;
8185
8186 if (val > 100)
8187 return -EINVAL;
8188
8189 if (!val)
8190 val = 1;
8191
8192 tr->buffer_percent = val;
8193
8194 (*ppos)++;
8195
8196 return cnt;
8197}
8198
8199static const struct file_operations buffer_percent_fops = {
8200 .open = tracing_open_generic_tr,
8201 .read = buffer_percent_read,
8202 .write = buffer_percent_write,
8203 .release = tracing_release_generic_tr,
8204 .llseek = default_llseek,
8205};
8206
ff585c5b 8207static struct dentry *trace_instance_dir;
277ba044
SR
8208
8209static void
8434dc93 8210init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 8211
55034cd6
SRRH
8212static int
8213allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
8214{
8215 enum ring_buffer_flags rb_flags;
737223fb 8216
983f938a 8217 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 8218
dced341b
SRRH
8219 buf->tr = tr;
8220
55034cd6
SRRH
8221 buf->buffer = ring_buffer_alloc(size, rb_flags);
8222 if (!buf->buffer)
8223 return -ENOMEM;
737223fb 8224
55034cd6
SRRH
8225 buf->data = alloc_percpu(struct trace_array_cpu);
8226 if (!buf->data) {
8227 ring_buffer_free(buf->buffer);
4397f045 8228 buf->buffer = NULL;
55034cd6
SRRH
8229 return -ENOMEM;
8230 }
737223fb 8231
737223fb
SRRH
8232 /* Allocate the first page for all buffers */
8233 set_buffer_entries(&tr->trace_buffer,
8234 ring_buffer_size(tr->trace_buffer.buffer, 0));
8235
55034cd6
SRRH
8236 return 0;
8237}
737223fb 8238
55034cd6
SRRH
8239static int allocate_trace_buffers(struct trace_array *tr, int size)
8240{
8241 int ret;
737223fb 8242
55034cd6
SRRH
8243 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8244 if (ret)
8245 return ret;
737223fb 8246
55034cd6
SRRH
8247#ifdef CONFIG_TRACER_MAX_TRACE
8248 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8249 allocate_snapshot ? size : 1);
8250 if (WARN_ON(ret)) {
737223fb 8251 ring_buffer_free(tr->trace_buffer.buffer);
24f2aaf9 8252 tr->trace_buffer.buffer = NULL;
55034cd6 8253 free_percpu(tr->trace_buffer.data);
24f2aaf9 8254 tr->trace_buffer.data = NULL;
55034cd6
SRRH
8255 return -ENOMEM;
8256 }
8257 tr->allocated_snapshot = allocate_snapshot;
737223fb 8258
55034cd6
SRRH
8259 /*
8260 * Only the top level trace array gets its snapshot allocated
8261 * from the kernel command line.
8262 */
8263 allocate_snapshot = false;
737223fb 8264#endif
55034cd6 8265 return 0;
737223fb
SRRH
8266}
8267
f0b70cc4
SRRH
8268static void free_trace_buffer(struct trace_buffer *buf)
8269{
8270 if (buf->buffer) {
8271 ring_buffer_free(buf->buffer);
8272 buf->buffer = NULL;
8273 free_percpu(buf->data);
8274 buf->data = NULL;
8275 }
8276}
8277
23aaa3c1
SRRH
8278static void free_trace_buffers(struct trace_array *tr)
8279{
8280 if (!tr)
8281 return;
8282
f0b70cc4 8283 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
8284
8285#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 8286 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
8287#endif
8288}
8289
9a38a885
SRRH
8290static void init_trace_flags_index(struct trace_array *tr)
8291{
8292 int i;
8293
8294 /* Used by the trace options files */
8295 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8296 tr->trace_flags_index[i] = i;
8297}
8298
37aea98b
SRRH
8299static void __update_tracer_options(struct trace_array *tr)
8300{
8301 struct tracer *t;
8302
8303 for (t = trace_types; t; t = t->next)
8304 add_tracer_options(tr, t);
8305}
8306
8307static void update_tracer_options(struct trace_array *tr)
8308{
8309 mutex_lock(&trace_types_lock);
8310 __update_tracer_options(tr);
8311 mutex_unlock(&trace_types_lock);
8312}
8313
f45d1225 8314struct trace_array *trace_array_create(const char *name)
737223fb 8315{
277ba044
SR
8316 struct trace_array *tr;
8317 int ret;
277ba044 8318
12ecef0c 8319 mutex_lock(&event_mutex);
277ba044
SR
8320 mutex_lock(&trace_types_lock);
8321
8322 ret = -EEXIST;
8323 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8324 if (tr->name && strcmp(tr->name, name) == 0)
8325 goto out_unlock;
8326 }
8327
8328 ret = -ENOMEM;
8329 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8330 if (!tr)
8331 goto out_unlock;
8332
8333 tr->name = kstrdup(name, GFP_KERNEL);
8334 if (!tr->name)
8335 goto out_free_tr;
8336
ccfe9e42
AL
8337 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8338 goto out_free_tr;
8339
20550622 8340 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 8341
ccfe9e42
AL
8342 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8343
277ba044
SR
8344 raw_spin_lock_init(&tr->start_lock);
8345
0b9b12c1
SRRH
8346 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8347
277ba044
SR
8348 tr->current_trace = &nop_trace;
8349
8350 INIT_LIST_HEAD(&tr->systems);
8351 INIT_LIST_HEAD(&tr->events);
067fe038 8352 INIT_LIST_HEAD(&tr->hist_vars);
2f754e77 8353 INIT_LIST_HEAD(&tr->err_log);
277ba044 8354
737223fb 8355 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
8356 goto out_free_tr;
8357
8434dc93 8358 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
8359 if (!tr->dir)
8360 goto out_free_tr;
8361
8362 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 8363 if (ret) {
8434dc93 8364 tracefs_remove_recursive(tr->dir);
277ba044 8365 goto out_free_tr;
609e85a7 8366 }
277ba044 8367
04ec7bb6
SRV
8368 ftrace_init_trace_array(tr);
8369
8434dc93 8370 init_tracer_tracefs(tr, tr->dir);
9a38a885 8371 init_trace_flags_index(tr);
37aea98b 8372 __update_tracer_options(tr);
277ba044
SR
8373
8374 list_add(&tr->list, &ftrace_trace_arrays);
8375
8376 mutex_unlock(&trace_types_lock);
12ecef0c 8377 mutex_unlock(&event_mutex);
277ba044 8378
f45d1225 8379 return tr;
277ba044
SR
8380
8381 out_free_tr:
23aaa3c1 8382 free_trace_buffers(tr);
ccfe9e42 8383 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
8384 kfree(tr->name);
8385 kfree(tr);
8386
8387 out_unlock:
8388 mutex_unlock(&trace_types_lock);
12ecef0c 8389 mutex_unlock(&event_mutex);
277ba044 8390
f45d1225
DI
8391 return ERR_PTR(ret);
8392}
8393EXPORT_SYMBOL_GPL(trace_array_create);
277ba044 8394
f45d1225
DI
8395static int instance_mkdir(const char *name)
8396{
8397 return PTR_ERR_OR_ZERO(trace_array_create(name));
277ba044
SR
8398}
8399
f45d1225 8400static int __remove_instance(struct trace_array *tr)
0c8916c3 8401{
37aea98b 8402 int i;
0c8916c3 8403
cf6ab6d9 8404 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
f45d1225 8405 return -EBUSY;
a695cb58 8406
0c8916c3
SR
8407 list_del(&tr->list);
8408
20550622
SRRH
8409 /* Disable all the flags that were enabled coming in */
8410 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8411 if ((1 << i) & ZEROED_TRACE_FLAGS)
8412 set_tracer_flag(tr, 1 << i, 0);
8413 }
8414
6b450d25 8415 tracing_set_nop(tr);
a0e6369e 8416 clear_ftrace_function_probes(tr);
0c8916c3 8417 event_trace_del_tracer(tr);
d879d0b8 8418 ftrace_clear_pids(tr);
591dffda 8419 ftrace_destroy_function_files(tr);
681a4a2f 8420 tracefs_remove_recursive(tr->dir);
a9fcaaac 8421 free_trace_buffers(tr);
0c8916c3 8422
37aea98b
SRRH
8423 for (i = 0; i < tr->nr_topts; i++) {
8424 kfree(tr->topts[i].topts);
8425 }
8426 kfree(tr->topts);
8427
db9108e0 8428 free_cpumask_var(tr->tracing_cpumask);
0c8916c3
SR
8429 kfree(tr->name);
8430 kfree(tr);
f45d1225 8431 tr = NULL;
0c8916c3 8432
f45d1225
DI
8433 return 0;
8434}
8435
8436int trace_array_destroy(struct trace_array *tr)
8437{
8438 int ret;
8439
8440 if (!tr)
8441 return -EINVAL;
8442
8443 mutex_lock(&event_mutex);
8444 mutex_lock(&trace_types_lock);
8445
8446 ret = __remove_instance(tr);
8447
8448 mutex_unlock(&trace_types_lock);
8449 mutex_unlock(&event_mutex);
8450
8451 return ret;
8452}
8453EXPORT_SYMBOL_GPL(trace_array_destroy);
8454
8455static int instance_rmdir(const char *name)
8456{
8457 struct trace_array *tr;
8458 int ret;
8459
8460 mutex_lock(&event_mutex);
8461 mutex_lock(&trace_types_lock);
8462
8463 ret = -ENODEV;
8464 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8465 if (tr->name && strcmp(tr->name, name) == 0) {
8466 ret = __remove_instance(tr);
8467 break;
8468 }
8469 }
0c8916c3 8470
0c8916c3 8471 mutex_unlock(&trace_types_lock);
12ecef0c 8472 mutex_unlock(&event_mutex);
0c8916c3
SR
8473
8474 return ret;
8475}
8476
277ba044
SR
8477static __init void create_trace_instances(struct dentry *d_tracer)
8478{
eae47358
SRRH
8479 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8480 instance_mkdir,
8481 instance_rmdir);
277ba044
SR
8482 if (WARN_ON(!trace_instance_dir))
8483 return;
277ba044
SR
8484}
8485
2b6080f2 8486static void
8434dc93 8487init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 8488{
3dd80953 8489 struct trace_event_file *file;
121aaee7 8490 int cpu;
2b6080f2 8491
607e2ea1
SRRH
8492 trace_create_file("available_tracers", 0444, d_tracer,
8493 tr, &show_traces_fops);
8494
8495 trace_create_file("current_tracer", 0644, d_tracer,
8496 tr, &set_tracer_fops);
8497
ccfe9e42
AL
8498 trace_create_file("tracing_cpumask", 0644, d_tracer,
8499 tr, &tracing_cpumask_fops);
8500
2b6080f2
SR
8501 trace_create_file("trace_options", 0644, d_tracer,
8502 tr, &tracing_iter_fops);
8503
8504 trace_create_file("trace", 0644, d_tracer,
6484c71c 8505 tr, &tracing_fops);
2b6080f2
SR
8506
8507 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 8508 tr, &tracing_pipe_fops);
2b6080f2
SR
8509
8510 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 8511 tr, &tracing_entries_fops);
2b6080f2
SR
8512
8513 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8514 tr, &tracing_total_entries_fops);
8515
238ae93d 8516 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
8517 tr, &tracing_free_buffer_fops);
8518
8519 trace_create_file("trace_marker", 0220, d_tracer,
8520 tr, &tracing_mark_fops);
8521
3dd80953
SRV
8522 file = __find_event_file(tr, "ftrace", "print");
8523 if (file && file->dir)
8524 trace_create_file("trigger", 0644, file->dir, file,
8525 &event_trigger_fops);
8526 tr->trace_marker_file = file;
8527
fa32e855
SR
8528 trace_create_file("trace_marker_raw", 0220, d_tracer,
8529 tr, &tracing_mark_raw_fops);
8530
2b6080f2
SR
8531 trace_create_file("trace_clock", 0644, d_tracer, tr,
8532 &trace_clock_fops);
8533
8534 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 8535 tr, &rb_simple_fops);
ce9bae55 8536
2c1ea60b
TZ
8537 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8538 &trace_time_stamp_mode_fops);
8539
a7b1d74e 8540 tr->buffer_percent = 50;
03329f99
SRV
8541
8542 trace_create_file("buffer_percent", 0444, d_tracer,
8543 tr, &buffer_percent_fops);
8544
16270145
SRRH
8545 create_trace_options_dir(tr);
8546
f971cc9a 8547#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
8548 trace_create_file("tracing_max_latency", 0644, d_tracer,
8549 &tr->max_latency, &tracing_max_lat_fops);
8550#endif
8551
591dffda
SRRH
8552 if (ftrace_create_function_files(tr, d_tracer))
8553 WARN(1, "Could not allocate function filter files");
8554
ce9bae55
SRRH
8555#ifdef CONFIG_TRACER_SNAPSHOT
8556 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 8557 tr, &snapshot_fops);
ce9bae55 8558#endif
121aaee7 8559
8a062902
TZ
8560 trace_create_file("error_log", 0644, d_tracer,
8561 tr, &tracing_err_log_fops);
8562
121aaee7 8563 for_each_tracing_cpu(cpu)
8434dc93 8564 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 8565
345ddcc8 8566 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
8567}
8568
93faccbb 8569static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
8570{
8571 struct vfsmount *mnt;
8572 struct file_system_type *type;
8573
8574 /*
8575 * To maintain backward compatibility for tools that mount
8576 * debugfs to get to the tracing facility, tracefs is automatically
8577 * mounted to the debugfs/tracing directory.
8578 */
8579 type = get_fs_type("tracefs");
8580 if (!type)
8581 return NULL;
93faccbb 8582 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
8583 put_filesystem(type);
8584 if (IS_ERR(mnt))
8585 return NULL;
8586 mntget(mnt);
8587
8588 return mnt;
8589}
8590
7eeafbca
SRRH
8591/**
8592 * tracing_init_dentry - initialize top level trace array
8593 *
8594 * This is called when creating files or directories in the tracing
8595 * directory. It is called via fs_initcall() by any of the boot up code
8596 * and expects to return the dentry of the top level tracing directory.
8597 */
8598struct dentry *tracing_init_dentry(void)
8599{
8600 struct trace_array *tr = &global_trace;
8601
f76180bc 8602 /* The top level trace array uses NULL as parent */
7eeafbca 8603 if (tr->dir)
f76180bc 8604 return NULL;
7eeafbca 8605
8b129199
JW
8606 if (WARN_ON(!tracefs_initialized()) ||
8607 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8608 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
8609 return ERR_PTR(-ENODEV);
8610
f76180bc
SRRH
8611 /*
8612 * As there may still be users that expect the tracing
8613 * files to exist in debugfs/tracing, we must automount
8614 * the tracefs file system there, so older tools still
8615 * work with the newer kerenl.
8616 */
8617 tr->dir = debugfs_create_automount("tracing", NULL,
8618 trace_automount, NULL);
7eeafbca
SRRH
8619 if (!tr->dir) {
8620 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8621 return ERR_PTR(-ENOMEM);
8622 }
8623
8434dc93 8624 return NULL;
7eeafbca
SRRH
8625}
8626
00f4b652
JL
8627extern struct trace_eval_map *__start_ftrace_eval_maps[];
8628extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 8629
5f60b351 8630static void __init trace_eval_init(void)
0c564a53 8631{
3673b8e4
SRRH
8632 int len;
8633
02fd7f68 8634 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 8635 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
8636}
8637
8638#ifdef CONFIG_MODULES
f57a4143 8639static void trace_module_add_evals(struct module *mod)
3673b8e4 8640{
99be647c 8641 if (!mod->num_trace_evals)
3673b8e4
SRRH
8642 return;
8643
8644 /*
8645 * Modules with bad taint do not have events created, do
8646 * not bother with enums either.
8647 */
8648 if (trace_module_has_bad_taint(mod))
8649 return;
8650
f57a4143 8651 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
8652}
8653
681bec03 8654#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 8655static void trace_module_remove_evals(struct module *mod)
9828413d 8656{
23bf8cb8
JL
8657 union trace_eval_map_item *map;
8658 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 8659
99be647c 8660 if (!mod->num_trace_evals)
9828413d
SRRH
8661 return;
8662
1793ed93 8663 mutex_lock(&trace_eval_mutex);
9828413d 8664
23bf8cb8 8665 map = trace_eval_maps;
9828413d
SRRH
8666
8667 while (map) {
8668 if (map->head.mod == mod)
8669 break;
5f60b351 8670 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
8671 last = &map->tail.next;
8672 map = map->tail.next;
8673 }
8674 if (!map)
8675 goto out;
8676
5f60b351 8677 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
8678 kfree(map);
8679 out:
1793ed93 8680 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
8681}
8682#else
f57a4143 8683static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 8684#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 8685
3673b8e4
SRRH
8686static int trace_module_notify(struct notifier_block *self,
8687 unsigned long val, void *data)
8688{
8689 struct module *mod = data;
8690
8691 switch (val) {
8692 case MODULE_STATE_COMING:
f57a4143 8693 trace_module_add_evals(mod);
3673b8e4 8694 break;
9828413d 8695 case MODULE_STATE_GOING:
f57a4143 8696 trace_module_remove_evals(mod);
9828413d 8697 break;
3673b8e4
SRRH
8698 }
8699
8700 return 0;
0c564a53
SRRH
8701}
8702
3673b8e4
SRRH
8703static struct notifier_block trace_module_nb = {
8704 .notifier_call = trace_module_notify,
8705 .priority = 0,
8706};
9828413d 8707#endif /* CONFIG_MODULES */
3673b8e4 8708
8434dc93 8709static __init int tracer_init_tracefs(void)
bc0c38d1
SR
8710{
8711 struct dentry *d_tracer;
bc0c38d1 8712
7e53bd42
LJ
8713 trace_access_lock_init();
8714
bc0c38d1 8715 d_tracer = tracing_init_dentry();
14a5ae40 8716 if (IS_ERR(d_tracer))
ed6f1c99 8717 return 0;
bc0c38d1 8718
58b92547
SRV
8719 event_trace_init();
8720
8434dc93 8721 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 8722 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 8723
5452af66 8724 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 8725 &global_trace, &tracing_thresh_fops);
a8259075 8726
339ae5d3 8727 trace_create_file("README", 0444, d_tracer,
5452af66
FW
8728 NULL, &tracing_readme_fops);
8729
69abe6a5
AP
8730 trace_create_file("saved_cmdlines", 0444, d_tracer,
8731 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 8732
939c7a4f
YY
8733 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8734 NULL, &tracing_saved_cmdlines_size_fops);
8735
99c621d7
MS
8736 trace_create_file("saved_tgids", 0444, d_tracer,
8737 NULL, &tracing_saved_tgids_fops);
8738
5f60b351 8739 trace_eval_init();
0c564a53 8740
f57a4143 8741 trace_create_eval_file(d_tracer);
9828413d 8742
3673b8e4
SRRH
8743#ifdef CONFIG_MODULES
8744 register_module_notifier(&trace_module_nb);
8745#endif
8746
bc0c38d1 8747#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
8748 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8749 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 8750#endif
b04cc6b1 8751
277ba044 8752 create_trace_instances(d_tracer);
5452af66 8753
37aea98b 8754 update_tracer_options(&global_trace);
09d23a1d 8755
b5ad384e 8756 return 0;
bc0c38d1
SR
8757}
8758
3f5a54e3
SR
8759static int trace_panic_handler(struct notifier_block *this,
8760 unsigned long event, void *unused)
8761{
944ac425 8762 if (ftrace_dump_on_oops)
cecbca96 8763 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8764 return NOTIFY_OK;
8765}
8766
8767static struct notifier_block trace_panic_notifier = {
8768 .notifier_call = trace_panic_handler,
8769 .next = NULL,
8770 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8771};
8772
8773static int trace_die_handler(struct notifier_block *self,
8774 unsigned long val,
8775 void *data)
8776{
8777 switch (val) {
8778 case DIE_OOPS:
944ac425 8779 if (ftrace_dump_on_oops)
cecbca96 8780 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8781 break;
8782 default:
8783 break;
8784 }
8785 return NOTIFY_OK;
8786}
8787
8788static struct notifier_block trace_die_notifier = {
8789 .notifier_call = trace_die_handler,
8790 .priority = 200
8791};
8792
8793/*
8794 * printk is set to max of 1024, we really don't need it that big.
8795 * Nothing should be printing 1000 characters anyway.
8796 */
8797#define TRACE_MAX_PRINT 1000
8798
8799/*
8800 * Define here KERN_TRACE so that we have one place to modify
8801 * it if we decide to change what log level the ftrace dump
8802 * should be at.
8803 */
428aee14 8804#define KERN_TRACE KERN_EMERG
3f5a54e3 8805
955b61e5 8806void
3f5a54e3
SR
8807trace_printk_seq(struct trace_seq *s)
8808{
8809 /* Probably should print a warning here. */
3a161d99
SRRH
8810 if (s->seq.len >= TRACE_MAX_PRINT)
8811 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 8812
820b75f6
SRRH
8813 /*
8814 * More paranoid code. Although the buffer size is set to
8815 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8816 * an extra layer of protection.
8817 */
8818 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8819 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
8820
8821 /* should be zero ended, but we are paranoid. */
3a161d99 8822 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
8823
8824 printk(KERN_TRACE "%s", s->buffer);
8825
f9520750 8826 trace_seq_init(s);
3f5a54e3
SR
8827}
8828
955b61e5
JW
8829void trace_init_global_iter(struct trace_iterator *iter)
8830{
8831 iter->tr = &global_trace;
2b6080f2 8832 iter->trace = iter->tr->current_trace;
ae3b5093 8833 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 8834 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
8835
8836 if (iter->trace && iter->trace->open)
8837 iter->trace->open(iter);
8838
8839 /* Annotate start of buffers if we had overruns */
8840 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8841 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8842
8843 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8844 if (trace_clocks[iter->tr->clock_id].in_ns)
8845 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
8846}
8847
7fe70b57 8848void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 8849{
3f5a54e3
SR
8850 /* use static because iter can be a bit big for the stack */
8851 static struct trace_iterator iter;
7fe70b57 8852 static atomic_t dump_running;
983f938a 8853 struct trace_array *tr = &global_trace;
cf586b61 8854 unsigned int old_userobj;
d769041f
SR
8855 unsigned long flags;
8856 int cnt = 0, cpu;
3f5a54e3 8857
7fe70b57
SRRH
8858 /* Only allow one dump user at a time. */
8859 if (atomic_inc_return(&dump_running) != 1) {
8860 atomic_dec(&dump_running);
8861 return;
8862 }
3f5a54e3 8863
7fe70b57
SRRH
8864 /*
8865 * Always turn off tracing when we dump.
8866 * We don't need to show trace output of what happens
8867 * between multiple crashes.
8868 *
8869 * If the user does a sysrq-z, then they can re-enable
8870 * tracing with echo 1 > tracing_on.
8871 */
0ee6b6cf 8872 tracing_off();
cf586b61 8873
7fe70b57 8874 local_irq_save(flags);
03fc7f9c 8875 printk_nmi_direct_enter();
3f5a54e3 8876
38dbe0b1 8877 /* Simulate the iterator */
955b61e5
JW
8878 trace_init_global_iter(&iter);
8879
d769041f 8880 for_each_tracing_cpu(cpu) {
5e2d5ef8 8881 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
8882 }
8883
983f938a 8884 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 8885
b54d3de9 8886 /* don't look at user memory in panic mode */
983f938a 8887 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 8888
cecbca96
FW
8889 switch (oops_dump_mode) {
8890 case DUMP_ALL:
ae3b5093 8891 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8892 break;
8893 case DUMP_ORIG:
8894 iter.cpu_file = raw_smp_processor_id();
8895 break;
8896 case DUMP_NONE:
8897 goto out_enable;
8898 default:
8899 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 8900 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8901 }
8902
8903 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 8904
7fe70b57
SRRH
8905 /* Did function tracer already get disabled? */
8906 if (ftrace_is_dead()) {
8907 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8908 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8909 }
8910
3f5a54e3
SR
8911 /*
8912 * We need to stop all tracing on all CPUS to read the
8913 * the next buffer. This is a bit expensive, but is
8914 * not done often. We fill all what we can read,
8915 * and then release the locks again.
8916 */
8917
3f5a54e3
SR
8918 while (!trace_empty(&iter)) {
8919
8920 if (!cnt)
8921 printk(KERN_TRACE "---------------------------------\n");
8922
8923 cnt++;
8924
0c97bf86 8925 trace_iterator_reset(&iter);
3f5a54e3 8926 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3f5a54e3 8927
955b61e5 8928 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8929 int ret;
8930
8931 ret = print_trace_line(&iter);
8932 if (ret != TRACE_TYPE_NO_CONSUME)
8933 trace_consume(&iter);
3f5a54e3 8934 }
b892e5c8 8935 touch_nmi_watchdog();
3f5a54e3
SR
8936
8937 trace_printk_seq(&iter.seq);
8938 }
8939
8940 if (!cnt)
8941 printk(KERN_TRACE " (ftrace buffer empty)\n");
8942 else
8943 printk(KERN_TRACE "---------------------------------\n");
8944
cecbca96 8945 out_enable:
983f938a 8946 tr->trace_flags |= old_userobj;
cf586b61 8947
7fe70b57
SRRH
8948 for_each_tracing_cpu(cpu) {
8949 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8950 }
03fc7f9c
PM
8951 atomic_dec(&dump_running);
8952 printk_nmi_direct_exit();
cd891ae0 8953 local_irq_restore(flags);
3f5a54e3 8954}
a8eecf22 8955EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8956
7e465baa
TZ
8957int trace_run_command(const char *buf, int (*createfn)(int, char **))
8958{
8959 char **argv;
8960 int argc, ret;
8961
8962 argc = 0;
8963 ret = 0;
8964 argv = argv_split(GFP_KERNEL, buf, &argc);
8965 if (!argv)
8966 return -ENOMEM;
8967
8968 if (argc)
8969 ret = createfn(argc, argv);
8970
8971 argv_free(argv);
8972
8973 return ret;
8974}
8975
8976#define WRITE_BUFSIZE 4096
8977
8978ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8979 size_t count, loff_t *ppos,
8980 int (*createfn)(int, char **))
8981{
8982 char *kbuf, *buf, *tmp;
8983 int ret = 0;
8984 size_t done = 0;
8985 size_t size;
8986
8987 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8988 if (!kbuf)
8989 return -ENOMEM;
8990
8991 while (done < count) {
8992 size = count - done;
8993
8994 if (size >= WRITE_BUFSIZE)
8995 size = WRITE_BUFSIZE - 1;
8996
8997 if (copy_from_user(kbuf, buffer + done, size)) {
8998 ret = -EFAULT;
8999 goto out;
9000 }
9001 kbuf[size] = '\0';
9002 buf = kbuf;
9003 do {
9004 tmp = strchr(buf, '\n');
9005 if (tmp) {
9006 *tmp = '\0';
9007 size = tmp - buf + 1;
9008 } else {
9009 size = strlen(buf);
9010 if (done + size < count) {
9011 if (buf != kbuf)
9012 break;
9013 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9014 pr_warn("Line length is too long: Should be less than %d\n",
9015 WRITE_BUFSIZE - 2);
9016 ret = -EINVAL;
9017 goto out;
9018 }
9019 }
9020 done += size;
9021
9022 /* Remove comments */
9023 tmp = strchr(buf, '#');
9024
9025 if (tmp)
9026 *tmp = '\0';
9027
9028 ret = trace_run_command(buf, createfn);
9029 if (ret)
9030 goto out;
9031 buf += size;
9032
9033 } while (done < count);
9034 }
9035 ret = done;
9036
9037out:
9038 kfree(kbuf);
9039
9040 return ret;
9041}
9042
3928a8a2 9043__init static int tracer_alloc_buffers(void)
bc0c38d1 9044{
73c5162a 9045 int ring_buf_size;
9e01c1b7 9046 int ret = -ENOMEM;
4c11d7ae 9047
b5e87c05
SRRH
9048 /*
9049 * Make sure we don't accidently add more trace options
9050 * than we have bits for.
9051 */
9a38a885 9052 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 9053
9e01c1b7
RR
9054 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9055 goto out;
9056
ccfe9e42 9057 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 9058 goto out_free_buffer_mask;
4c11d7ae 9059
07d777fe
SR
9060 /* Only allocate trace_printk buffers if a trace_printk exists */
9061 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 9062 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
9063 trace_printk_init_buffers();
9064
73c5162a
SR
9065 /* To save memory, keep the ring buffer size to its minimum */
9066 if (ring_buffer_expanded)
9067 ring_buf_size = trace_buf_size;
9068 else
9069 ring_buf_size = 1;
9070
9e01c1b7 9071 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 9072 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 9073
2b6080f2
SR
9074 raw_spin_lock_init(&global_trace.start_lock);
9075
b32614c0
SAS
9076 /*
9077 * The prepare callbacks allocates some memory for the ring buffer. We
9078 * don't free the buffer if the if the CPU goes down. If we were to free
9079 * the buffer, then the user would lose any trace that was in the
9080 * buffer. The memory will be removed once the "instance" is removed.
9081 */
9082 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9083 "trace/RB:preapre", trace_rb_cpu_prepare,
9084 NULL);
9085 if (ret < 0)
9086 goto out_free_cpumask;
2c4a33ab 9087 /* Used for event triggers */
147d88e0 9088 ret = -ENOMEM;
2c4a33ab
SRRH
9089 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9090 if (!temp_buffer)
b32614c0 9091 goto out_rm_hp_state;
2c4a33ab 9092
939c7a4f
YY
9093 if (trace_create_savedcmd() < 0)
9094 goto out_free_temp_buffer;
9095
9e01c1b7 9096 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 9097 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
9098 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9099 WARN_ON(1);
939c7a4f 9100 goto out_free_savedcmd;
4c11d7ae 9101 }
a7603ff4 9102
499e5470
SR
9103 if (global_trace.buffer_disabled)
9104 tracing_off();
4c11d7ae 9105
e1e232ca
SR
9106 if (trace_boot_clock) {
9107 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9108 if (ret < 0)
a395d6a7
JP
9109 pr_warn("Trace clock %s not defined, going back to default\n",
9110 trace_boot_clock);
e1e232ca
SR
9111 }
9112
ca164318
SRRH
9113 /*
9114 * register_tracer() might reference current_trace, so it
9115 * needs to be set before we register anything. This is
9116 * just a bootstrap of current_trace anyway.
9117 */
2b6080f2
SR
9118 global_trace.current_trace = &nop_trace;
9119
0b9b12c1
SRRH
9120 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9121
4104d326
SRRH
9122 ftrace_init_global_array_ops(&global_trace);
9123
9a38a885
SRRH
9124 init_trace_flags_index(&global_trace);
9125
ca164318
SRRH
9126 register_tracer(&nop_trace);
9127
dbeafd0d
SRV
9128 /* Function tracing may start here (via kernel command line) */
9129 init_function_trace();
9130
60a11774
SR
9131 /* All seems OK, enable tracing */
9132 tracing_disabled = 0;
3928a8a2 9133
3f5a54e3
SR
9134 atomic_notifier_chain_register(&panic_notifier_list,
9135 &trace_panic_notifier);
9136
9137 register_die_notifier(&trace_die_notifier);
2fc1dfbe 9138
ae63b31e
SR
9139 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9140
9141 INIT_LIST_HEAD(&global_trace.systems);
9142 INIT_LIST_HEAD(&global_trace.events);
067fe038 9143 INIT_LIST_HEAD(&global_trace.hist_vars);
2f754e77 9144 INIT_LIST_HEAD(&global_trace.err_log);
ae63b31e
SR
9145 list_add(&global_trace.list, &ftrace_trace_arrays);
9146
a4d1e688 9147 apply_trace_boot_options();
7bcfaf54 9148
77fd5c15
SRRH
9149 register_snapshot_cmd();
9150
2fc1dfbe 9151 return 0;
3f5a54e3 9152
939c7a4f
YY
9153out_free_savedcmd:
9154 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
9155out_free_temp_buffer:
9156 ring_buffer_free(temp_buffer);
b32614c0
SAS
9157out_rm_hp_state:
9158 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 9159out_free_cpumask:
ccfe9e42 9160 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
9161out_free_buffer_mask:
9162 free_cpumask_var(tracing_buffer_mask);
9163out:
9164 return ret;
bc0c38d1 9165}
b2821ae6 9166
e725c731 9167void __init early_trace_init(void)
5f893b26 9168{
0daa2302
SRRH
9169 if (tracepoint_printk) {
9170 tracepoint_print_iter =
9171 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9172 if (WARN_ON(!tracepoint_print_iter))
9173 tracepoint_printk = 0;
42391745
SRRH
9174 else
9175 static_key_enable(&tracepoint_printk_key.key);
0daa2302 9176 }
5f893b26 9177 tracer_alloc_buffers();
e725c731
SRV
9178}
9179
9180void __init trace_init(void)
9181{
0c564a53 9182 trace_event_init();
5f893b26
SRRH
9183}
9184
b2821ae6
SR
9185__init static int clear_boot_tracer(void)
9186{
9187 /*
9188 * The default tracer at boot buffer is an init section.
9189 * This function is called in lateinit. If we did not
9190 * find the boot tracer, then clear it out, to prevent
9191 * later registration from accessing the buffer that is
9192 * about to be freed.
9193 */
9194 if (!default_bootup_tracer)
9195 return 0;
9196
9197 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9198 default_bootup_tracer);
9199 default_bootup_tracer = NULL;
9200
9201 return 0;
9202}
9203
8434dc93 9204fs_initcall(tracer_init_tracefs);
4bb0f0e7 9205late_initcall_sync(clear_boot_tracer);
3fd49c9e
CW
9206
9207#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9208__init static int tracing_set_default_clock(void)
9209{
9210 /* sched_clock_stable() is determined in late_initcall */
5125eee4 9211 if (!trace_boot_clock && !sched_clock_stable()) {
3fd49c9e
CW
9212 printk(KERN_WARNING
9213 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9214 "If you want to keep using the local clock, then add:\n"
9215 " \"trace_clock=local\"\n"
9216 "on the kernel command line\n");
9217 tracing_set_clock(&global_trace, "global");
9218 }
9219
9220 return 0;
9221}
9222late_initcall_sync(tracing_set_default_clock);
9223#endif