]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
tracing: Have the historgram use the result of str_has_prefix() for len of prefix
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
bc0c38d1
SR
2/*
3 * ring buffer based function tracer
4 *
2b6080f2 5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 13 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 14 */
2cadf913 15#include <linux/ring_buffer.h>
273b281f 16#include <generated/utsrelease.h>
2cadf913
SR
17#include <linux/stacktrace.h>
18#include <linux/writeback.h>
bc0c38d1
SR
19#include <linux/kallsyms.h>
20#include <linux/seq_file.h>
3f5a54e3 21#include <linux/notifier.h>
2cadf913 22#include <linux/irqflags.h>
bc0c38d1 23#include <linux/debugfs.h>
8434dc93 24#include <linux/tracefs.h>
4c11d7ae 25#include <linux/pagemap.h>
bc0c38d1
SR
26#include <linux/hardirq.h>
27#include <linux/linkage.h>
28#include <linux/uaccess.h>
76c813e2 29#include <linux/vmalloc.h>
bc0c38d1
SR
30#include <linux/ftrace.h>
31#include <linux/module.h>
32#include <linux/percpu.h>
2cadf913 33#include <linux/splice.h>
3f5a54e3 34#include <linux/kdebug.h>
5f0c6c03 35#include <linux/string.h>
f76180bc 36#include <linux/mount.h>
7e53bd42 37#include <linux/rwsem.h>
5a0e3ad6 38#include <linux/slab.h>
bc0c38d1
SR
39#include <linux/ctype.h>
40#include <linux/init.h>
2a2cc8f7 41#include <linux/poll.h>
b892e5c8 42#include <linux/nmi.h>
bc0c38d1 43#include <linux/fs.h>
478409dd 44#include <linux/trace.h>
3fd49c9e 45#include <linux/sched/clock.h>
8bd75c77 46#include <linux/sched/rt.h>
86387f7e 47
bc0c38d1 48#include "trace.h"
f0868d1e 49#include "trace_output.h"
bc0c38d1 50
73c5162a
SR
51/*
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
55034cd6 55bool ring_buffer_expanded;
73c5162a 56
8e1b82e0
FW
57/*
58 * We need to change this state when a selftest is running.
ff32504f
FW
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
5e1607a0 61 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
62 * at the same time, giving false positive or negative results.
63 */
8e1b82e0 64static bool __read_mostly tracing_selftest_running;
ff32504f 65
b2821ae6
SR
66/*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
020e5f85 69bool __read_mostly tracing_selftest_disabled;
b2821ae6 70
0daa2302
SRRH
71/* Pipe tracepoints to printk */
72struct trace_iterator *tracepoint_print_iter;
73int tracepoint_printk;
42391745 74static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 75
adf9f195
FW
76/* For tracers that don't implement custom flags */
77static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79};
80
8c1a49ae
SRRH
81static int
82dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
83{
84 return 0;
85}
0f048701 86
7ffbd48d
SR
87/*
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
d914ba37 92static DEFINE_PER_CPU(bool, trace_taskinfo_save);
7ffbd48d 93
0f048701
SR
94/*
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
4fd27358 100static int tracing_disabled = 1;
0f048701 101
955b61e5 102cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 103
944ac425
SR
104/*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 118 */
cecbca96
FW
119
120enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 121
de7edd31
SRRH
122/* When set, tracing will stop when a WARN*() is hit */
123int __disable_trace_on_warning;
124
681bec03
JL
125#ifdef CONFIG_TRACE_EVAL_MAP_FILE
126/* Map of enums to their values, for "eval_map" file */
23bf8cb8 127struct trace_eval_map_head {
9828413d
SRRH
128 struct module *mod;
129 unsigned long length;
130};
131
23bf8cb8 132union trace_eval_map_item;
9828413d 133
23bf8cb8 134struct trace_eval_map_tail {
9828413d
SRRH
135 /*
136 * "end" is first and points to NULL as it must be different
00f4b652 137 * than "mod" or "eval_string"
9828413d 138 */
23bf8cb8 139 union trace_eval_map_item *next;
9828413d
SRRH
140 const char *end; /* points to NULL */
141};
142
1793ed93 143static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
144
145/*
23bf8cb8 146 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
681bec03 150 * pointer to the next array of saved eval_map items.
9828413d 151 */
23bf8cb8 152union trace_eval_map_item {
00f4b652 153 struct trace_eval_map map;
23bf8cb8
JL
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
9828413d
SRRH
156};
157
23bf8cb8 158static union trace_eval_map_item *trace_eval_maps;
681bec03 159#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 160
607e2ea1 161static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 162
ee6c2c1b
LZ
163#define MAX_TRACER_SIZE 100
164static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 165static char *default_bootup_tracer;
d9e54076 166
55034cd6
SRRH
167static bool allocate_snapshot;
168
1beee96b 169static int __init set_cmdline_ftrace(char *str)
d9e54076 170{
67012ab1 171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 172 default_bootup_tracer = bootup_tracer_buf;
73c5162a 173 /* We are using ftrace early, expand it */
55034cd6 174 ring_buffer_expanded = true;
d9e54076
PZ
175 return 1;
176}
1beee96b 177__setup("ftrace=", set_cmdline_ftrace);
d9e54076 178
944ac425
SR
179static int __init set_ftrace_dump_on_oops(char *str)
180{
cecbca96
FW
181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
183 return 1;
184 }
185
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
188 return 1;
189 }
190
191 return 0;
944ac425
SR
192}
193__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 194
de7edd31
SRRH
195static int __init stop_trace_on_warning(char *str)
196{
933ff9f2
LCG
197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
de7edd31
SRRH
199 return 1;
200}
933ff9f2 201__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 202
3209cff4 203static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
204{
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
208 return 1;
209}
3209cff4 210__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 211
7bcfaf54
SR
212
213static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
214
215static int __init set_trace_boot_options(char *str)
216{
67012ab1 217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
218 return 0;
219}
220__setup("trace_options=", set_trace_boot_options);
221
e1e232ca
SR
222static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223static char *trace_boot_clock __initdata;
224
225static int __init set_trace_boot_clock(char *str)
226{
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
229 return 0;
230}
231__setup("trace_clock=", set_trace_boot_clock);
232
0daa2302
SRRH
233static int __init set_tracepoint_printk(char *str)
234{
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
237 return 1;
238}
239__setup("tp_printk", set_tracepoint_printk);
de7edd31 240
a5a1d1c2 241unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
242{
243 nsec += 500;
244 do_div(nsec, 1000);
245 return nsec;
246}
247
983f938a
SRRH
248/* trace_flags holds trace_options default values */
249#define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255
16270145
SRRH
256/* trace_options that are only supported by global_trace */
257#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259
20550622
SRRH
260/* trace_flags that are default zero for instances */
261#define ZEROED_TRACE_FLAGS \
1e10486f 262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 263
4fcdae83 264/*
67d04bb2
JF
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
4fcdae83 267 */
983f938a
SRRH
268static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
270};
bc0c38d1 271
ae63b31e 272LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 273
ff451961
SRRH
274int trace_array_get(struct trace_array *this_tr)
275{
276 struct trace_array *tr;
277 int ret = -ENODEV;
278
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281 if (tr == this_tr) {
282 tr->ref++;
283 ret = 0;
284 break;
285 }
286 }
287 mutex_unlock(&trace_types_lock);
288
289 return ret;
290}
291
292static void __trace_array_put(struct trace_array *this_tr)
293{
294 WARN_ON(!this_tr->ref);
295 this_tr->ref--;
296}
297
298void trace_array_put(struct trace_array *this_tr)
299{
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
303}
304
2425bcb9 305int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
308{
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
0fc1b09f 311 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
312 return 1;
313 }
314
315 return 0;
eb02ce01
TZ
316}
317
76c813e2
SRRH
318void trace_free_pid_list(struct trace_pid_list *pid_list)
319{
320 vfree(pid_list->pids);
321 kfree(pid_list);
322}
323
d8275c45
SR
324/**
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
328 *
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 */
331bool
332trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333{
334 /*
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
337 */
338 if (search_pid >= filtered_pids->pid_max)
339 return false;
340
341 return test_bit(search_pid, filtered_pids->pids);
342}
343
344/**
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
348 *
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
352 */
353bool
354trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355{
356 /*
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
359 */
360 if (!filtered_pids)
361 return false;
362
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
364}
365
366/**
5a93bae2 367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
d8275c45
SR
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
371 *
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
376 * of a task.
377 */
378void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
381{
382 if (!pid_list)
383 return;
384
385 /* For forks, we only add if the forking task is listed */
386 if (self) {
387 if (!trace_find_filtered_pid(pid_list, self->pid))
388 return;
389 }
390
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
393 return;
394
395 /* "self" is set for forks, and NULL for exits */
396 if (self)
397 set_bit(task->pid, pid_list->pids);
398 else
399 clear_bit(task->pid, pid_list->pids);
400}
401
5cc8976b
SRRH
402/**
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
407 *
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
410 *
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
413 */
414void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415{
416 unsigned long pid = (unsigned long)v;
417
418 (*pos)++;
419
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
426
427 return NULL;
428}
429
430/**
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
434 *
435 * This is used by seq_file "start" operation to start the iteration
436 * of listing pids.
437 *
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
440 */
441void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
442{
443 unsigned long pid;
444 loff_t l = 0;
445
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
448 return NULL;
449
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
453 ;
454 return (void *)pid;
455}
456
457/**
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
461 *
462 * Can be directly used by seq_file operations to display the current
463 * pid value.
464 */
465int trace_pid_show(struct seq_file *m, void *v)
466{
467 unsigned long pid = (unsigned long)v - 1;
468
469 seq_printf(m, "%lu\n", pid);
470 return 0;
471}
472
76c813e2
SRRH
473/* 128 should be much more than enough */
474#define PID_BUF_SIZE 127
475
476int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
479{
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
482 unsigned long val;
483 int nr_pids = 0;
484 ssize_t read = 0;
485 ssize_t ret = 0;
486 loff_t pos;
487 pid_t pid;
488
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490 return -ENOMEM;
491
492 /*
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
496 * not modified.
497 */
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list)
500 return -ENOMEM;
501
502 pid_list->pid_max = READ_ONCE(pid_max);
503
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506 pid_list->pid_max = filtered_pids->pid_max;
507
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) {
510 kfree(pid_list);
511 return -ENOMEM;
512 }
513
514 if (filtered_pids) {
515 /* copy the current bits to the new max */
67f20b08
WY
516 for_each_set_bit(pid, filtered_pids->pids,
517 filtered_pids->pid_max) {
76c813e2 518 set_bit(pid, pid_list->pids);
76c813e2
SRRH
519 nr_pids++;
520 }
521 }
522
523 while (cnt > 0) {
524
525 pos = 0;
526
527 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528 if (ret < 0 || !trace_parser_loaded(&parser))
529 break;
530
531 read += ret;
532 ubuf += ret;
533 cnt -= ret;
534
76c813e2
SRRH
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
a5a1d1c2 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
9457158b 573 if (!buf->buffer)
37886f6a
SR
574 return trace_clock_local();
575
9457158b
AL
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
578
579 return ts;
580}
bc0c38d1 581
a5a1d1c2 582u64 ftrace_now(int cpu)
9457158b
AL
583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
10246fa3
SRRH
587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
9036990d
SR
596int tracing_is_enabled(void)
597{
10246fa3
SRRH
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
9036990d
SR
605}
606
4fcdae83 607/*
3928a8a2
SR
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
3f5a54e3
SR
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
4fcdae83 616 */
3928a8a2 617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 618
3928a8a2 619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 620
4fcdae83 621/* trace_types holds a link list of available tracers. */
bc0c38d1 622static struct tracer *trace_types __read_mostly;
4fcdae83 623
4fcdae83
SR
624/*
625 * trace_types_lock is used to protect the trace_types list.
4fcdae83 626 */
a8227415 627DEFINE_MUTEX(trace_types_lock);
4fcdae83 628
7e53bd42
LJ
629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
ae3b5093 657 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
ae3b5093 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
ae3b5093 673 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
d78a4614
SRRH
711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
73dddbb5
SRRH
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
ca475e83 719
d78a4614
SRRH
720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
2d34f489
SRRH
726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
73dddbb5
SRRH
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
730{
731}
732
d78a4614
SRRH
733#endif
734
3e9a8aad
SRRH
735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
2290f2c5 760void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
499e5470
SR
777/**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
10246fa3 785 tracer_tracing_on(&global_trace);
499e5470
SR
786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
52ffabe3
SRRH
789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
d914ba37 793 __this_cpu_write(trace_taskinfo_save, true);
52ffabe3
SRRH
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
09ae7234
SRRH
805/**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
8abfb872
J
818 int pc;
819
983f938a 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
821 return 0;
822
8abfb872 823 pc = preempt_count();
09ae7234 824
3132e107
SRRH
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
09ae7234
SRRH
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
09ae7234
SRRH
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
2d34f489 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
8abfb872
J
868 int pc;
869
983f938a 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
871 return 0;
872
8abfb872 873 pc = preempt_count();
09ae7234 874
3132e107
SRRH
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
09ae7234
SRRH
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
09ae7234
SRRH
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
2d34f489 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
ad909e21 896#ifdef CONFIG_TRACER_SNAPSHOT
2824f503 897void tracing_snapshot_instance(struct trace_array *tr)
ad909e21 898{
ad909e21
SRRH
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
1b22e382
SRRH
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
ad909e21 908 if (!tr->allocated_snapshot) {
ca268da6
SRRH
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
ca268da6
SRRH
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
cab50379
SRV
926
927/**
5a93bae2 928 * tracing_snapshot - take a snapshot of the current buffer.
cab50379
SRV
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
1b22e382 947EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
2824f503 953int tracing_alloc_snapshot_instance(struct trace_array *tr)
3209cff4
SRRH
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
ad1438a0 971static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
ad909e21 983
93e31ffb
TZ
984/**
985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
2824f503 999 ret = tracing_alloc_snapshot_instance(tr);
93e31ffb
TZ
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
ad909e21 1006/**
5a93bae2 1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
ad909e21 1008 *
5a93bae2 1009 * This is similar to tracing_snapshot(), but it will allocate the
ad909e21
SRRH
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
ad909e21
SRRH
1019 int ret;
1020
93e31ffb
TZ
1021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
3209cff4 1023 return;
ad909e21
SRRH
1024
1025 tracing_snapshot();
1026}
1b22e382 1027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
1b22e382 1033EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
1b22e382 1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
2290f2c5 1048void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
499e5470
SR
1065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
10246fa3 1075 tracer_tracing_off(&global_trace);
499e5470
SR
1076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
de7edd31
SRRH
1079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
10246fa3
SRRH
1085/**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
ec573508 1091bool tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
499e5470
SR
1098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
10246fa3 1103 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
3928a8a2 1107static int __init set_buf_size(char *str)
bc0c38d1 1108{
3928a8a2 1109 unsigned long buf_size;
c6caeeb1 1110
bc0c38d1
SR
1111 if (!str)
1112 return 0;
9d612bef 1113 buf_size = memparse(str, &str);
c6caeeb1 1114 /* nr_entries can not be zero */
9d612bef 1115 if (buf_size == 0)
c6caeeb1 1116 return 0;
3928a8a2 1117 trace_buf_size = buf_size;
bc0c38d1
SR
1118 return 1;
1119}
3928a8a2 1120__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1121
0e950173
TB
1122static int __init set_tracing_thresh(char *str)
1123{
87abb3b1 1124 unsigned long threshold;
0e950173
TB
1125 int ret;
1126
1127 if (!str)
1128 return 0;
bcd83ea6 1129 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1130 if (ret < 0)
1131 return 0;
87abb3b1 1132 tracing_thresh = threshold * 1000;
0e950173
TB
1133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
57f50be1
SR
1137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
a3418a36
SRRH
1142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1146 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1147 */
1148#undef C
1149#define C(a, b) b
1150
4fcdae83 1151/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1152static const char *trace_options[] = {
a3418a36 1153 TRACE_FLAGS
bc0c38d1
SR
1154 NULL
1155};
1156
5079f326
Z
1157static struct {
1158 u64 (*func)(void);
1159 const char *name;
8be0709f 1160 int in_ns; /* is this clock in nanoseconds? */
5079f326 1161} trace_clocks[] = {
1b3e5c09
TG
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
e7fda6c4 1165 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
a3ed0e43 1169 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1170 ARCH_TRACE_CLOCKS
5079f326
Z
1171};
1172
860f9f6b
TZ
1173bool trace_clock_in_ns(struct trace_array *tr)
1174{
1175 if (trace_clocks[tr->clock_id].in_ns)
1176 return true;
1177
1178 return false;
1179}
1180
b63f39ea 1181/*
1182 * trace_parser_get_init - gets the buffer for trace parser
1183 */
1184int trace_parser_get_init(struct trace_parser *parser, int size)
1185{
1186 memset(parser, 0, sizeof(*parser));
1187
1188 parser->buffer = kmalloc(size, GFP_KERNEL);
1189 if (!parser->buffer)
1190 return 1;
1191
1192 parser->size = size;
1193 return 0;
1194}
1195
1196/*
1197 * trace_parser_put - frees the buffer for trace parser
1198 */
1199void trace_parser_put(struct trace_parser *parser)
1200{
1201 kfree(parser->buffer);
0e684b65 1202 parser->buffer = NULL;
b63f39ea 1203}
1204
1205/*
1206 * trace_get_user - reads the user input string separated by space
1207 * (matched by isspace(ch))
1208 *
1209 * For each string found the 'struct trace_parser' is updated,
1210 * and the function returns.
1211 *
1212 * Returns number of bytes read.
1213 *
1214 * See kernel/trace/trace.h for 'struct trace_parser' details.
1215 */
1216int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1217 size_t cnt, loff_t *ppos)
1218{
1219 char ch;
1220 size_t read = 0;
1221 ssize_t ret;
1222
1223 if (!*ppos)
1224 trace_parser_clear(parser);
1225
1226 ret = get_user(ch, ubuf++);
1227 if (ret)
1228 goto out;
1229
1230 read++;
1231 cnt--;
1232
1233 /*
1234 * The parser is not finished with the last write,
1235 * continue reading the user input without skipping spaces.
1236 */
1237 if (!parser->cont) {
1238 /* skip white space */
1239 while (cnt && isspace(ch)) {
1240 ret = get_user(ch, ubuf++);
1241 if (ret)
1242 goto out;
1243 read++;
1244 cnt--;
1245 }
1246
76638d96
CD
1247 parser->idx = 0;
1248
b63f39ea 1249 /* only spaces were written */
921a7acd 1250 if (isspace(ch) || !ch) {
b63f39ea 1251 *ppos += read;
1252 ret = read;
1253 goto out;
1254 }
b63f39ea 1255 }
1256
1257 /* read the non-space input */
921a7acd 1258 while (cnt && !isspace(ch) && ch) {
3c235a33 1259 if (parser->idx < parser->size - 1)
b63f39ea 1260 parser->buffer[parser->idx++] = ch;
1261 else {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 ret = get_user(ch, ubuf++);
1266 if (ret)
1267 goto out;
1268 read++;
1269 cnt--;
1270 }
1271
1272 /* We either got finished input or we have to wait for another call. */
921a7acd 1273 if (isspace(ch) || !ch) {
b63f39ea 1274 parser->buffer[parser->idx] = 0;
1275 parser->cont = false;
057db848 1276 } else if (parser->idx < parser->size - 1) {
b63f39ea 1277 parser->cont = true;
1278 parser->buffer[parser->idx++] = ch;
f4d0706c
CD
1279 /* Make sure the parsed string always terminates with '\0'. */
1280 parser->buffer[parser->idx] = 0;
057db848
SR
1281 } else {
1282 ret = -EINVAL;
1283 goto out;
b63f39ea 1284 }
1285
1286 *ppos += read;
1287 ret = read;
1288
1289out:
1290 return ret;
1291}
1292
3a161d99 1293/* TODO add a seq_buf_to_buffer() */
b8b94265 1294static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1295{
1296 int len;
3c56819b 1297
5ac48378 1298 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1299 return -EBUSY;
1300
5ac48378 1301 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1302 if (cnt > len)
1303 cnt = len;
3a161d99 1304 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1305
3a161d99 1306 s->seq.readpos += cnt;
3c56819b
EGM
1307 return cnt;
1308}
1309
0e950173
TB
1310unsigned long __read_mostly tracing_thresh;
1311
5d4a9dba 1312#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1313/*
1314 * Copy the new maximum trace into the separate maximum-trace
1315 * structure. (this way the maximum trace is permanently saved,
5a93bae2 1316 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
5d4a9dba
SR
1317 */
1318static void
1319__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1320{
12883efb
SRRH
1321 struct trace_buffer *trace_buf = &tr->trace_buffer;
1322 struct trace_buffer *max_buf = &tr->max_buffer;
1323 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1324 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1325
12883efb
SRRH
1326 max_buf->cpu = cpu;
1327 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1328
6d9b3fa5 1329 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1330 max_data->critical_start = data->critical_start;
1331 max_data->critical_end = data->critical_end;
5d4a9dba 1332
1acaa1b2 1333 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1334 max_data->pid = tsk->pid;
f17a5194
SRRH
1335 /*
1336 * If tsk == current, then use current_uid(), as that does not use
1337 * RCU. The irq tracer can be called out of RCU scope.
1338 */
1339 if (tsk == current)
1340 max_data->uid = current_uid();
1341 else
1342 max_data->uid = task_uid(tsk);
1343
8248ac05
SR
1344 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1345 max_data->policy = tsk->policy;
1346 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1347
1348 /* record this tasks comm */
1349 tracing_record_cmdline(tsk);
1350}
1351
4fcdae83
SR
1352/**
1353 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1354 * @tr: tracer
1355 * @tsk: the task with the latency
1356 * @cpu: The cpu that initiated the trace.
1357 *
1358 * Flip the buffers between the @tr and the max_tr and record information
1359 * about which task was the cause of this latency.
1360 */
e309b41d 1361void
bc0c38d1
SR
1362update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1363{
2b6080f2 1364 if (tr->stop_count)
b8de7bd1
SR
1365 return;
1366
4c11d7ae 1367 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1368
45ad21ca 1369 if (!tr->allocated_snapshot) {
debdd57f 1370 /* Only the nop tracer should hit this when disabling */
2b6080f2 1371 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1372 return;
debdd57f 1373 }
34600f0e 1374
0b9b12c1 1375 arch_spin_lock(&tr->max_lock);
3928a8a2 1376
73c8d894
MH
1377 /* Inherit the recordable setting from trace_buffer */
1378 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1379 ring_buffer_record_on(tr->max_buffer.buffer);
1380 else
1381 ring_buffer_record_off(tr->max_buffer.buffer);
1382
08ae88f8 1383 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
3928a8a2 1384
bc0c38d1 1385 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1386 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1387}
1388
1389/**
1390 * update_max_tr_single - only copy one trace over, and reset the rest
1391 * @tr - tracer
1392 * @tsk - task with the latency
1393 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1394 *
1395 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1396 */
e309b41d 1397void
bc0c38d1
SR
1398update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1399{
3928a8a2 1400 int ret;
bc0c38d1 1401
2b6080f2 1402 if (tr->stop_count)
b8de7bd1
SR
1403 return;
1404
4c11d7ae 1405 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1406 if (!tr->allocated_snapshot) {
2930e04d 1407 /* Only the nop tracer should hit this when disabling */
9e8529af 1408 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1409 return;
2930e04d 1410 }
ef710e10 1411
0b9b12c1 1412 arch_spin_lock(&tr->max_lock);
bc0c38d1 1413
12883efb 1414 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1415
e8165dbb
SR
1416 if (ret == -EBUSY) {
1417 /*
1418 * We failed to swap the buffer due to a commit taking
1419 * place on this CPU. We fail to record, but we reset
1420 * the max trace buffer (no one writes directly to it)
1421 * and flag that it failed.
1422 */
12883efb 1423 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1424 "Failed to swap buffers due to commit in progress\n");
1425 }
1426
e8165dbb 1427 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1428
1429 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1430 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1431}
5d4a9dba 1432#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1433
2c2b0a78 1434static int wait_on_pipe(struct trace_iterator *iter, int full)
0d5c6e1c 1435{
15693458
SRRH
1436 /* Iterators are static, they should be filled or empty */
1437 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1438 return 0;
0d5c6e1c 1439
e30f53aa
RV
1440 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1441 full);
0d5c6e1c
SR
1442}
1443
f4e781c0 1444#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1445static bool selftests_can_run;
1446
1447struct trace_selftests {
1448 struct list_head list;
1449 struct tracer *type;
1450};
1451
1452static LIST_HEAD(postponed_selftests);
1453
1454static int save_selftest(struct tracer *type)
1455{
1456 struct trace_selftests *selftest;
1457
1458 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1459 if (!selftest)
1460 return -ENOMEM;
1461
1462 selftest->type = type;
1463 list_add(&selftest->list, &postponed_selftests);
1464 return 0;
1465}
1466
f4e781c0
SRRH
1467static int run_tracer_selftest(struct tracer *type)
1468{
1469 struct trace_array *tr = &global_trace;
1470 struct tracer *saved_tracer = tr->current_trace;
1471 int ret;
0d5c6e1c 1472
f4e781c0
SRRH
1473 if (!type->selftest || tracing_selftest_disabled)
1474 return 0;
0d5c6e1c 1475
9afecfbb
SRV
1476 /*
1477 * If a tracer registers early in boot up (before scheduling is
1478 * initialized and such), then do not run its selftests yet.
1479 * Instead, run it a little later in the boot process.
1480 */
1481 if (!selftests_can_run)
1482 return save_selftest(type);
1483
0d5c6e1c 1484 /*
f4e781c0
SRRH
1485 * Run a selftest on this tracer.
1486 * Here we reset the trace buffer, and set the current
1487 * tracer to be this tracer. The tracer can then run some
1488 * internal tracing to verify that everything is in order.
1489 * If we fail, we do not register this tracer.
0d5c6e1c 1490 */
f4e781c0 1491 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1492
f4e781c0
SRRH
1493 tr->current_trace = type;
1494
1495#ifdef CONFIG_TRACER_MAX_TRACE
1496 if (type->use_max_tr) {
1497 /* If we expanded the buffers, make sure the max is expanded too */
1498 if (ring_buffer_expanded)
1499 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1500 RING_BUFFER_ALL_CPUS);
1501 tr->allocated_snapshot = true;
1502 }
1503#endif
1504
1505 /* the test is responsible for initializing and enabling */
1506 pr_info("Testing tracer %s: ", type->name);
1507 ret = type->selftest(type, tr);
1508 /* the test is responsible for resetting too */
1509 tr->current_trace = saved_tracer;
1510 if (ret) {
1511 printk(KERN_CONT "FAILED!\n");
1512 /* Add the warning after printing 'FAILED' */
1513 WARN_ON(1);
1514 return -1;
1515 }
1516 /* Only reset on passing, to avoid touching corrupted buffers */
1517 tracing_reset_online_cpus(&tr->trace_buffer);
1518
1519#ifdef CONFIG_TRACER_MAX_TRACE
1520 if (type->use_max_tr) {
1521 tr->allocated_snapshot = false;
0d5c6e1c 1522
f4e781c0
SRRH
1523 /* Shrink the max buffer again */
1524 if (ring_buffer_expanded)
1525 ring_buffer_resize(tr->max_buffer.buffer, 1,
1526 RING_BUFFER_ALL_CPUS);
1527 }
1528#endif
1529
1530 printk(KERN_CONT "PASSED\n");
1531 return 0;
1532}
9afecfbb
SRV
1533
1534static __init int init_trace_selftests(void)
1535{
1536 struct trace_selftests *p, *n;
1537 struct tracer *t, **last;
1538 int ret;
1539
1540 selftests_can_run = true;
1541
1542 mutex_lock(&trace_types_lock);
1543
1544 if (list_empty(&postponed_selftests))
1545 goto out;
1546
1547 pr_info("Running postponed tracer tests:\n");
1548
1549 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1550 ret = run_tracer_selftest(p->type);
1551 /* If the test fails, then warn and remove from available_tracers */
1552 if (ret < 0) {
1553 WARN(1, "tracer: %s failed selftest, disabling\n",
1554 p->type->name);
1555 last = &trace_types;
1556 for (t = trace_types; t; t = t->next) {
1557 if (t == p->type) {
1558 *last = t->next;
1559 break;
1560 }
1561 last = &t->next;
1562 }
1563 }
1564 list_del(&p->list);
1565 kfree(p);
1566 }
1567
1568 out:
1569 mutex_unlock(&trace_types_lock);
1570
1571 return 0;
1572}
b9ef0326 1573core_initcall(init_trace_selftests);
f4e781c0
SRRH
1574#else
1575static inline int run_tracer_selftest(struct tracer *type)
1576{
1577 return 0;
0d5c6e1c 1578}
f4e781c0 1579#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1580
41d9c0be
SRRH
1581static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1582
a4d1e688
JW
1583static void __init apply_trace_boot_options(void);
1584
4fcdae83
SR
1585/**
1586 * register_tracer - register a tracer with the ftrace system.
1587 * @type - the plugin for the tracer
1588 *
1589 * Register a new plugin tracer.
1590 */
a4d1e688 1591int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1592{
1593 struct tracer *t;
bc0c38d1
SR
1594 int ret = 0;
1595
1596 if (!type->name) {
1597 pr_info("Tracer must have a name\n");
1598 return -1;
1599 }
1600
24a461d5 1601 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1602 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1603 return -1;
1604 }
1605
bc0c38d1 1606 mutex_lock(&trace_types_lock);
86fa2f60 1607
8e1b82e0
FW
1608 tracing_selftest_running = true;
1609
bc0c38d1
SR
1610 for (t = trace_types; t; t = t->next) {
1611 if (strcmp(type->name, t->name) == 0) {
1612 /* already found */
ee6c2c1b 1613 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1614 type->name);
1615 ret = -1;
1616 goto out;
1617 }
1618 }
1619
adf9f195
FW
1620 if (!type->set_flag)
1621 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1622 if (!type->flags) {
1623 /*allocate a dummy tracer_flags*/
1624 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1625 if (!type->flags) {
1626 ret = -ENOMEM;
1627 goto out;
1628 }
d39cdd20
CH
1629 type->flags->val = 0;
1630 type->flags->opts = dummy_tracer_opt;
1631 } else
adf9f195
FW
1632 if (!type->flags->opts)
1633 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1634
d39cdd20
CH
1635 /* store the tracer for __set_tracer_option */
1636 type->flags->trace = type;
1637
f4e781c0
SRRH
1638 ret = run_tracer_selftest(type);
1639 if (ret < 0)
1640 goto out;
60a11774 1641
bc0c38d1
SR
1642 type->next = trace_types;
1643 trace_types = type;
41d9c0be 1644 add_tracer_options(&global_trace, type);
60a11774 1645
bc0c38d1 1646 out:
8e1b82e0 1647 tracing_selftest_running = false;
bc0c38d1
SR
1648 mutex_unlock(&trace_types_lock);
1649
dac74940
SR
1650 if (ret || !default_bootup_tracer)
1651 goto out_unlock;
1652
ee6c2c1b 1653 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1654 goto out_unlock;
1655
1656 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1657 /* Do we want this tracer to start on bootup? */
607e2ea1 1658 tracing_set_tracer(&global_trace, type->name);
dac74940 1659 default_bootup_tracer = NULL;
a4d1e688
JW
1660
1661 apply_trace_boot_options();
1662
dac74940 1663 /* disable other selftests, since this will break it. */
55034cd6 1664 tracing_selftest_disabled = true;
b2821ae6 1665#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1666 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1667 type->name);
b2821ae6 1668#endif
b2821ae6 1669
dac74940 1670 out_unlock:
bc0c38d1
SR
1671 return ret;
1672}
1673
12883efb 1674void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1675{
12883efb 1676 struct ring_buffer *buffer = buf->buffer;
f633903a 1677
a5416411
HT
1678 if (!buffer)
1679 return;
1680
f633903a
SR
1681 ring_buffer_record_disable(buffer);
1682
1683 /* Make sure all commits have finished */
1684 synchronize_sched();
68179686 1685 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1686
1687 ring_buffer_record_enable(buffer);
1688}
1689
12883efb 1690void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1691{
12883efb 1692 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1693 int cpu;
1694
a5416411
HT
1695 if (!buffer)
1696 return;
1697
621968cd
SR
1698 ring_buffer_record_disable(buffer);
1699
1700 /* Make sure all commits have finished */
1701 synchronize_sched();
1702
9457158b 1703 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1704
1705 for_each_online_cpu(cpu)
68179686 1706 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1707
1708 ring_buffer_record_enable(buffer);
213cc060
PE
1709}
1710
09d8091c 1711/* Must have trace_types_lock held */
873c642f 1712void tracing_reset_all_online_cpus(void)
9456f0fa 1713{
873c642f
SRRH
1714 struct trace_array *tr;
1715
873c642f 1716 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
065e63f9
SRV
1717 if (!tr->clear_trace)
1718 continue;
1719 tr->clear_trace = false;
12883efb
SRRH
1720 tracing_reset_online_cpus(&tr->trace_buffer);
1721#ifdef CONFIG_TRACER_MAX_TRACE
1722 tracing_reset_online_cpus(&tr->max_buffer);
1723#endif
873c642f 1724 }
9456f0fa
SR
1725}
1726
d914ba37
JF
1727static int *tgid_map;
1728
939c7a4f 1729#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1730#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1731static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1732struct saved_cmdlines_buffer {
1733 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1734 unsigned *map_cmdline_to_pid;
1735 unsigned cmdline_num;
1736 int cmdline_idx;
1737 char *saved_cmdlines;
1738};
1739static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1740
25b0b44a 1741/* temporary disable recording */
d914ba37 1742static atomic_t trace_record_taskinfo_disabled __read_mostly;
bc0c38d1 1743
939c7a4f
YY
1744static inline char *get_saved_cmdlines(int idx)
1745{
1746 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1747}
1748
1749static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1750{
939c7a4f
YY
1751 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1752}
1753
1754static int allocate_cmdlines_buffer(unsigned int val,
1755 struct saved_cmdlines_buffer *s)
1756{
6da2ec56
KC
1757 s->map_cmdline_to_pid = kmalloc_array(val,
1758 sizeof(*s->map_cmdline_to_pid),
1759 GFP_KERNEL);
939c7a4f
YY
1760 if (!s->map_cmdline_to_pid)
1761 return -ENOMEM;
1762
6da2ec56 1763 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
939c7a4f
YY
1764 if (!s->saved_cmdlines) {
1765 kfree(s->map_cmdline_to_pid);
1766 return -ENOMEM;
1767 }
1768
1769 s->cmdline_idx = 0;
1770 s->cmdline_num = val;
1771 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1772 sizeof(s->map_pid_to_cmdline));
1773 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1774 val * sizeof(*s->map_cmdline_to_pid));
1775
1776 return 0;
1777}
1778
1779static int trace_create_savedcmd(void)
1780{
1781 int ret;
1782
a6af8fbf 1783 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1784 if (!savedcmd)
1785 return -ENOMEM;
1786
1787 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1788 if (ret < 0) {
1789 kfree(savedcmd);
1790 savedcmd = NULL;
1791 return -ENOMEM;
1792 }
1793
1794 return 0;
bc0c38d1
SR
1795}
1796
b5130b1e
CE
1797int is_tracing_stopped(void)
1798{
2b6080f2 1799 return global_trace.stop_count;
b5130b1e
CE
1800}
1801
0f048701
SR
1802/**
1803 * tracing_start - quick start of the tracer
1804 *
1805 * If tracing is enabled but was stopped by tracing_stop,
1806 * this will start the tracer back up.
1807 */
1808void tracing_start(void)
1809{
1810 struct ring_buffer *buffer;
1811 unsigned long flags;
1812
1813 if (tracing_disabled)
1814 return;
1815
2b6080f2
SR
1816 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1817 if (--global_trace.stop_count) {
1818 if (global_trace.stop_count < 0) {
b06a8301
SR
1819 /* Someone screwed up their debugging */
1820 WARN_ON_ONCE(1);
2b6080f2 1821 global_trace.stop_count = 0;
b06a8301 1822 }
0f048701
SR
1823 goto out;
1824 }
1825
a2f80714 1826 /* Prevent the buffers from switching */
0b9b12c1 1827 arch_spin_lock(&global_trace.max_lock);
0f048701 1828
12883efb 1829 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1830 if (buffer)
1831 ring_buffer_record_enable(buffer);
1832
12883efb
SRRH
1833#ifdef CONFIG_TRACER_MAX_TRACE
1834 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1835 if (buffer)
1836 ring_buffer_record_enable(buffer);
12883efb 1837#endif
0f048701 1838
0b9b12c1 1839 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1840
0f048701 1841 out:
2b6080f2
SR
1842 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1843}
1844
1845static void tracing_start_tr(struct trace_array *tr)
1846{
1847 struct ring_buffer *buffer;
1848 unsigned long flags;
1849
1850 if (tracing_disabled)
1851 return;
1852
1853 /* If global, we need to also start the max tracer */
1854 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1855 return tracing_start();
1856
1857 raw_spin_lock_irqsave(&tr->start_lock, flags);
1858
1859 if (--tr->stop_count) {
1860 if (tr->stop_count < 0) {
1861 /* Someone screwed up their debugging */
1862 WARN_ON_ONCE(1);
1863 tr->stop_count = 0;
1864 }
1865 goto out;
1866 }
1867
12883efb 1868 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1869 if (buffer)
1870 ring_buffer_record_enable(buffer);
1871
1872 out:
1873 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1874}
1875
1876/**
1877 * tracing_stop - quick stop of the tracer
1878 *
1879 * Light weight way to stop tracing. Use in conjunction with
1880 * tracing_start.
1881 */
1882void tracing_stop(void)
1883{
1884 struct ring_buffer *buffer;
1885 unsigned long flags;
1886
2b6080f2
SR
1887 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1888 if (global_trace.stop_count++)
0f048701
SR
1889 goto out;
1890
a2f80714 1891 /* Prevent the buffers from switching */
0b9b12c1 1892 arch_spin_lock(&global_trace.max_lock);
a2f80714 1893
12883efb 1894 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1895 if (buffer)
1896 ring_buffer_record_disable(buffer);
1897
12883efb
SRRH
1898#ifdef CONFIG_TRACER_MAX_TRACE
1899 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1900 if (buffer)
1901 ring_buffer_record_disable(buffer);
12883efb 1902#endif
0f048701 1903
0b9b12c1 1904 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1905
0f048701 1906 out:
2b6080f2
SR
1907 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1908}
1909
1910static void tracing_stop_tr(struct trace_array *tr)
1911{
1912 struct ring_buffer *buffer;
1913 unsigned long flags;
1914
1915 /* If global, we need to also stop the max tracer */
1916 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1917 return tracing_stop();
1918
1919 raw_spin_lock_irqsave(&tr->start_lock, flags);
1920 if (tr->stop_count++)
1921 goto out;
1922
12883efb 1923 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1924 if (buffer)
1925 ring_buffer_record_disable(buffer);
1926
1927 out:
1928 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1929}
1930
379cfdac 1931static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1932{
a635cf04 1933 unsigned pid, idx;
bc0c38d1 1934
eaf260ac
JF
1935 /* treat recording of idle task as a success */
1936 if (!tsk->pid)
1937 return 1;
1938
1939 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1940 return 0;
bc0c38d1
SR
1941
1942 /*
1943 * It's not the end of the world if we don't get
1944 * the lock, but we also don't want to spin
1945 * nor do we want to disable interrupts,
1946 * so if we miss here, then better luck next time.
1947 */
0199c4e6 1948 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1949 return 0;
bc0c38d1 1950
939c7a4f 1951 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1952 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1953 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1954
a635cf04
CE
1955 /*
1956 * Check whether the cmdline buffer at idx has a pid
1957 * mapped. We are going to overwrite that entry so we
1958 * need to clear the map_pid_to_cmdline. Otherwise we
1959 * would read the new comm for the old pid.
1960 */
939c7a4f 1961 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1962 if (pid != NO_CMDLINE_MAP)
939c7a4f 1963 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1964
939c7a4f
YY
1965 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1966 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1967
939c7a4f 1968 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1969 }
1970
939c7a4f 1971 set_cmdline(idx, tsk->comm);
bc0c38d1 1972
0199c4e6 1973 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1974
1975 return 1;
bc0c38d1
SR
1976}
1977
4c27e756 1978static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1979{
bc0c38d1
SR
1980 unsigned map;
1981
4ca53085
SR
1982 if (!pid) {
1983 strcpy(comm, "<idle>");
1984 return;
1985 }
bc0c38d1 1986
74bf4076
SR
1987 if (WARN_ON_ONCE(pid < 0)) {
1988 strcpy(comm, "<XXX>");
1989 return;
1990 }
1991
4ca53085
SR
1992 if (pid > PID_MAX_DEFAULT) {
1993 strcpy(comm, "<...>");
1994 return;
1995 }
bc0c38d1 1996
939c7a4f 1997 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1998 if (map != NO_CMDLINE_MAP)
e09e2867 1999 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
2000 else
2001 strcpy(comm, "<...>");
4c27e756
SRRH
2002}
2003
2004void trace_find_cmdline(int pid, char comm[])
2005{
2006 preempt_disable();
2007 arch_spin_lock(&trace_cmdline_lock);
2008
2009 __trace_find_cmdline(pid, comm);
bc0c38d1 2010
0199c4e6 2011 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 2012 preempt_enable();
bc0c38d1
SR
2013}
2014
d914ba37
JF
2015int trace_find_tgid(int pid)
2016{
2017 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2018 return 0;
2019
2020 return tgid_map[pid];
2021}
2022
2023static int trace_save_tgid(struct task_struct *tsk)
2024{
bd45d34d
JF
2025 /* treat recording of idle task as a success */
2026 if (!tsk->pid)
2027 return 1;
2028
2029 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
d914ba37
JF
2030 return 0;
2031
2032 tgid_map[tsk->pid] = tsk->tgid;
2033 return 1;
2034}
2035
2036static bool tracing_record_taskinfo_skip(int flags)
2037{
2038 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2039 return true;
2040 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2041 return true;
2042 if (!__this_cpu_read(trace_taskinfo_save))
2043 return true;
2044 return false;
2045}
2046
2047/**
2048 * tracing_record_taskinfo - record the task info of a task
2049 *
2050 * @task - task to record
2051 * @flags - TRACE_RECORD_CMDLINE for recording comm
2052 * - TRACE_RECORD_TGID for recording tgid
2053 */
2054void tracing_record_taskinfo(struct task_struct *task, int flags)
2055{
29b1a8ad
JF
2056 bool done;
2057
d914ba37
JF
2058 if (tracing_record_taskinfo_skip(flags))
2059 return;
29b1a8ad
JF
2060
2061 /*
2062 * Record as much task information as possible. If some fail, continue
2063 * to try to record the others.
2064 */
2065 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2066 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2067
2068 /* If recording any information failed, retry again soon. */
2069 if (!done)
d914ba37
JF
2070 return;
2071
2072 __this_cpu_write(trace_taskinfo_save, false);
2073}
2074
2075/**
2076 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2077 *
2078 * @prev - previous task during sched_switch
2079 * @next - next task during sched_switch
2080 * @flags - TRACE_RECORD_CMDLINE for recording comm
2081 * TRACE_RECORD_TGID for recording tgid
2082 */
2083void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2084 struct task_struct *next, int flags)
bc0c38d1 2085{
29b1a8ad
JF
2086 bool done;
2087
d914ba37
JF
2088 if (tracing_record_taskinfo_skip(flags))
2089 return;
2090
29b1a8ad
JF
2091 /*
2092 * Record as much task information as possible. If some fail, continue
2093 * to try to record the others.
2094 */
2095 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2096 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2097 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2098 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
bc0c38d1 2099
29b1a8ad
JF
2100 /* If recording any information failed, retry again soon. */
2101 if (!done)
7ffbd48d
SR
2102 return;
2103
d914ba37
JF
2104 __this_cpu_write(trace_taskinfo_save, false);
2105}
2106
2107/* Helpers to record a specific task information */
2108void tracing_record_cmdline(struct task_struct *task)
2109{
2110 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2111}
2112
2113void tracing_record_tgid(struct task_struct *task)
2114{
2115 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
bc0c38d1
SR
2116}
2117
af0009fc
SRV
2118/*
2119 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2120 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2121 * simplifies those functions and keeps them in sync.
2122 */
2123enum print_line_t trace_handle_return(struct trace_seq *s)
2124{
2125 return trace_seq_has_overflowed(s) ?
2126 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2127}
2128EXPORT_SYMBOL_GPL(trace_handle_return);
2129
45dcd8b8 2130void
38697053
SR
2131tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2132 int pc)
bc0c38d1
SR
2133{
2134 struct task_struct *tsk = current;
bc0c38d1 2135
777e208d
SR
2136 entry->preempt_count = pc & 0xff;
2137 entry->pid = (tsk) ? tsk->pid : 0;
2138 entry->flags =
9244489a 2139#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2140 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2141#else
2142 TRACE_FLAG_IRQS_NOSUPPORT |
2143#endif
7e6867bf 2144 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2145 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2146 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2147 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2148 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2149}
f413cdb8 2150EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2151
e77405ad
SR
2152struct ring_buffer_event *
2153trace_buffer_lock_reserve(struct ring_buffer *buffer,
2154 int type,
2155 unsigned long len,
2156 unsigned long flags, int pc)
51a763dd 2157{
3e9a8aad 2158 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2159}
2160
2161DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2162DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2163static int trace_buffered_event_ref;
2164
2165/**
2166 * trace_buffered_event_enable - enable buffering events
2167 *
2168 * When events are being filtered, it is quicker to use a temporary
2169 * buffer to write the event data into if there's a likely chance
2170 * that it will not be committed. The discard of the ring buffer
2171 * is not as fast as committing, and is much slower than copying
2172 * a commit.
2173 *
2174 * When an event is to be filtered, allocate per cpu buffers to
2175 * write the event data into, and if the event is filtered and discarded
2176 * it is simply dropped, otherwise, the entire data is to be committed
2177 * in one shot.
2178 */
2179void trace_buffered_event_enable(void)
2180{
2181 struct ring_buffer_event *event;
2182 struct page *page;
2183 int cpu;
51a763dd 2184
0fc1b09f
SRRH
2185 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2186
2187 if (trace_buffered_event_ref++)
2188 return;
2189
2190 for_each_tracing_cpu(cpu) {
2191 page = alloc_pages_node(cpu_to_node(cpu),
2192 GFP_KERNEL | __GFP_NORETRY, 0);
2193 if (!page)
2194 goto failed;
2195
2196 event = page_address(page);
2197 memset(event, 0, sizeof(*event));
2198
2199 per_cpu(trace_buffered_event, cpu) = event;
2200
2201 preempt_disable();
2202 if (cpu == smp_processor_id() &&
2203 this_cpu_read(trace_buffered_event) !=
2204 per_cpu(trace_buffered_event, cpu))
2205 WARN_ON_ONCE(1);
2206 preempt_enable();
51a763dd
ACM
2207 }
2208
0fc1b09f
SRRH
2209 return;
2210 failed:
2211 trace_buffered_event_disable();
2212}
2213
2214static void enable_trace_buffered_event(void *data)
2215{
2216 /* Probably not needed, but do it anyway */
2217 smp_rmb();
2218 this_cpu_dec(trace_buffered_event_cnt);
2219}
2220
2221static void disable_trace_buffered_event(void *data)
2222{
2223 this_cpu_inc(trace_buffered_event_cnt);
2224}
2225
2226/**
2227 * trace_buffered_event_disable - disable buffering events
2228 *
2229 * When a filter is removed, it is faster to not use the buffered
2230 * events, and to commit directly into the ring buffer. Free up
2231 * the temp buffers when there are no more users. This requires
2232 * special synchronization with current events.
2233 */
2234void trace_buffered_event_disable(void)
2235{
2236 int cpu;
2237
2238 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2239
2240 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2241 return;
2242
2243 if (--trace_buffered_event_ref)
2244 return;
2245
2246 preempt_disable();
2247 /* For each CPU, set the buffer as used. */
2248 smp_call_function_many(tracing_buffer_mask,
2249 disable_trace_buffered_event, NULL, 1);
2250 preempt_enable();
2251
2252 /* Wait for all current users to finish */
2253 synchronize_sched();
2254
2255 for_each_tracing_cpu(cpu) {
2256 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2257 per_cpu(trace_buffered_event, cpu) = NULL;
2258 }
2259 /*
2260 * Make sure trace_buffered_event is NULL before clearing
2261 * trace_buffered_event_cnt.
2262 */
2263 smp_wmb();
2264
2265 preempt_disable();
2266 /* Do the work on each cpu */
2267 smp_call_function_many(tracing_buffer_mask,
2268 enable_trace_buffered_event, NULL, 1);
2269 preempt_enable();
51a763dd 2270}
51a763dd 2271
2c4a33ab
SRRH
2272static struct ring_buffer *temp_buffer;
2273
ccb469a1
SR
2274struct ring_buffer_event *
2275trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2276 struct trace_event_file *trace_file,
ccb469a1
SR
2277 int type, unsigned long len,
2278 unsigned long flags, int pc)
2279{
2c4a33ab 2280 struct ring_buffer_event *entry;
0fc1b09f 2281 int val;
2c4a33ab 2282
7f1d2f82 2283 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f 2284
00b41452 2285 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
0fc1b09f
SRRH
2286 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2287 (entry = this_cpu_read(trace_buffered_event))) {
2288 /* Try to use the per cpu buffer first */
2289 val = this_cpu_inc_return(trace_buffered_event_cnt);
2290 if (val == 1) {
2291 trace_event_setup(entry, type, flags, pc);
2292 entry->array[0] = len;
2293 return entry;
2294 }
2295 this_cpu_dec(trace_buffered_event_cnt);
2296 }
2297
3e9a8aad
SRRH
2298 entry = __trace_buffer_lock_reserve(*current_rb,
2299 type, len, flags, pc);
2c4a33ab
SRRH
2300 /*
2301 * If tracing is off, but we have triggers enabled
2302 * we still need to look at the event data. Use the temp_buffer
2303 * to store the trace event for the tigger to use. It's recusive
2304 * safe and will not be recorded anywhere.
2305 */
5d6ad960 2306 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2307 *current_rb = temp_buffer;
3e9a8aad
SRRH
2308 entry = __trace_buffer_lock_reserve(*current_rb,
2309 type, len, flags, pc);
2c4a33ab
SRRH
2310 }
2311 return entry;
ccb469a1
SR
2312}
2313EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2314
42391745
SRRH
2315static DEFINE_SPINLOCK(tracepoint_iter_lock);
2316static DEFINE_MUTEX(tracepoint_printk_mutex);
2317
2318static void output_printk(struct trace_event_buffer *fbuffer)
2319{
2320 struct trace_event_call *event_call;
2321 struct trace_event *event;
2322 unsigned long flags;
2323 struct trace_iterator *iter = tracepoint_print_iter;
2324
2325 /* We should never get here if iter is NULL */
2326 if (WARN_ON_ONCE(!iter))
2327 return;
2328
2329 event_call = fbuffer->trace_file->event_call;
2330 if (!event_call || !event_call->event.funcs ||
2331 !event_call->event.funcs->trace)
2332 return;
2333
2334 event = &fbuffer->trace_file->event_call->event;
2335
2336 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2337 trace_seq_init(&iter->seq);
2338 iter->ent = fbuffer->entry;
2339 event_call->event.funcs->trace(iter, 0, event);
2340 trace_seq_putc(&iter->seq, 0);
2341 printk("%s", iter->seq.buffer);
2342
2343 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2344}
2345
2346int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2347 void __user *buffer, size_t *lenp,
2348 loff_t *ppos)
2349{
2350 int save_tracepoint_printk;
2351 int ret;
2352
2353 mutex_lock(&tracepoint_printk_mutex);
2354 save_tracepoint_printk = tracepoint_printk;
2355
2356 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2357
2358 /*
2359 * This will force exiting early, as tracepoint_printk
2360 * is always zero when tracepoint_printk_iter is not allocated
2361 */
2362 if (!tracepoint_print_iter)
2363 tracepoint_printk = 0;
2364
2365 if (save_tracepoint_printk == tracepoint_printk)
2366 goto out;
2367
2368 if (tracepoint_printk)
2369 static_key_enable(&tracepoint_printk_key.key);
2370 else
2371 static_key_disable(&tracepoint_printk_key.key);
2372
2373 out:
2374 mutex_unlock(&tracepoint_printk_mutex);
2375
2376 return ret;
2377}
2378
2379void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2380{
2381 if (static_key_false(&tracepoint_printk_key.key))
2382 output_printk(fbuffer);
2383
2384 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2385 fbuffer->event, fbuffer->entry,
2386 fbuffer->flags, fbuffer->pc);
2387}
2388EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2389
2ee5b92a
SRV
2390/*
2391 * Skip 3:
2392 *
2393 * trace_buffer_unlock_commit_regs()
2394 * trace_event_buffer_commit()
2395 * trace_event_raw_event_xxx()
13cf912b 2396 */
2ee5b92a
SRV
2397# define STACK_SKIP 3
2398
b7f0c959
SRRH
2399void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2400 struct ring_buffer *buffer,
0d5c6e1c
SR
2401 struct ring_buffer_event *event,
2402 unsigned long flags, int pc,
2403 struct pt_regs *regs)
1fd8df2c 2404{
7ffbd48d 2405 __buffer_unlock_commit(buffer, event);
1fd8df2c 2406
be54f69c 2407 /*
2ee5b92a 2408 * If regs is not set, then skip the necessary functions.
be54f69c
SRRH
2409 * Note, we can still get here via blktrace, wakeup tracer
2410 * and mmiotrace, but that's ok if they lose a function or
2ee5b92a 2411 * two. They are not that meaningful.
be54f69c 2412 */
2ee5b92a 2413 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
1fd8df2c
MH
2414 ftrace_trace_userstack(buffer, flags, pc);
2415}
1fd8df2c 2416
52ffabe3
SRRH
2417/*
2418 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2419 */
2420void
2421trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2422 struct ring_buffer_event *event)
2423{
2424 __buffer_unlock_commit(buffer, event);
2425}
2426
478409dd
CZ
2427static void
2428trace_process_export(struct trace_export *export,
2429 struct ring_buffer_event *event)
2430{
2431 struct trace_entry *entry;
2432 unsigned int size = 0;
2433
2434 entry = ring_buffer_event_data(event);
2435 size = ring_buffer_event_length(event);
a773d419 2436 export->write(export, entry, size);
478409dd
CZ
2437}
2438
2439static DEFINE_MUTEX(ftrace_export_lock);
2440
2441static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2442
2443static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2444
2445static inline void ftrace_exports_enable(void)
2446{
2447 static_branch_enable(&ftrace_exports_enabled);
2448}
2449
2450static inline void ftrace_exports_disable(void)
2451{
2452 static_branch_disable(&ftrace_exports_enabled);
2453}
2454
1cce377d 2455static void ftrace_exports(struct ring_buffer_event *event)
478409dd
CZ
2456{
2457 struct trace_export *export;
2458
2459 preempt_disable_notrace();
2460
2461 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2462 while (export) {
2463 trace_process_export(export, event);
2464 export = rcu_dereference_raw_notrace(export->next);
2465 }
2466
2467 preempt_enable_notrace();
2468}
2469
2470static inline void
2471add_trace_export(struct trace_export **list, struct trace_export *export)
2472{
2473 rcu_assign_pointer(export->next, *list);
2474 /*
2475 * We are entering export into the list but another
2476 * CPU might be walking that list. We need to make sure
2477 * the export->next pointer is valid before another CPU sees
2478 * the export pointer included into the list.
2479 */
2480 rcu_assign_pointer(*list, export);
2481}
2482
2483static inline int
2484rm_trace_export(struct trace_export **list, struct trace_export *export)
2485{
2486 struct trace_export **p;
2487
2488 for (p = list; *p != NULL; p = &(*p)->next)
2489 if (*p == export)
2490 break;
2491
2492 if (*p != export)
2493 return -1;
2494
2495 rcu_assign_pointer(*p, (*p)->next);
2496
2497 return 0;
2498}
2499
2500static inline void
2501add_ftrace_export(struct trace_export **list, struct trace_export *export)
2502{
2503 if (*list == NULL)
2504 ftrace_exports_enable();
2505
2506 add_trace_export(list, export);
2507}
2508
2509static inline int
2510rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2511{
2512 int ret;
2513
2514 ret = rm_trace_export(list, export);
2515 if (*list == NULL)
2516 ftrace_exports_disable();
2517
2518 return ret;
2519}
2520
2521int register_ftrace_export(struct trace_export *export)
2522{
2523 if (WARN_ON_ONCE(!export->write))
2524 return -1;
2525
2526 mutex_lock(&ftrace_export_lock);
2527
2528 add_ftrace_export(&ftrace_exports_list, export);
2529
2530 mutex_unlock(&ftrace_export_lock);
2531
2532 return 0;
2533}
2534EXPORT_SYMBOL_GPL(register_ftrace_export);
2535
2536int unregister_ftrace_export(struct trace_export *export)
2537{
2538 int ret;
2539
2540 mutex_lock(&ftrace_export_lock);
2541
2542 ret = rm_ftrace_export(&ftrace_exports_list, export);
2543
2544 mutex_unlock(&ftrace_export_lock);
2545
2546 return ret;
2547}
2548EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2549
e309b41d 2550void
7be42151 2551trace_function(struct trace_array *tr,
38697053
SR
2552 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2553 int pc)
bc0c38d1 2554{
2425bcb9 2555 struct trace_event_call *call = &event_function;
12883efb 2556 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2557 struct ring_buffer_event *event;
777e208d 2558 struct ftrace_entry *entry;
bc0c38d1 2559
3e9a8aad
SRRH
2560 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2561 flags, pc);
3928a8a2
SR
2562 if (!event)
2563 return;
2564 entry = ring_buffer_event_data(event);
777e208d
SR
2565 entry->ip = ip;
2566 entry->parent_ip = parent_ip;
e1112b4d 2567
478409dd
CZ
2568 if (!call_filter_check_discard(call, entry, buffer, event)) {
2569 if (static_branch_unlikely(&ftrace_exports_enabled))
2570 ftrace_exports(event);
7ffbd48d 2571 __buffer_unlock_commit(buffer, event);
478409dd 2572 }
bc0c38d1
SR
2573}
2574
c0a0d0d3 2575#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2576
2577#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2578struct ftrace_stack {
2579 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2580};
2581
2582static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2583static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2584
e77405ad 2585static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2586 unsigned long flags,
1fd8df2c 2587 int skip, int pc, struct pt_regs *regs)
86387f7e 2588{
2425bcb9 2589 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2590 struct ring_buffer_event *event;
777e208d 2591 struct stack_entry *entry;
86387f7e 2592 struct stack_trace trace;
4a9bd3f1
SR
2593 int use_stack;
2594 int size = FTRACE_STACK_ENTRIES;
2595
2596 trace.nr_entries = 0;
2597 trace.skip = skip;
2598
be54f69c 2599 /*
2ee5b92a 2600 * Add one, for this function and the call to save_stack_trace()
be54f69c
SRRH
2601 * If regs is set, then these functions will not be in the way.
2602 */
2ee5b92a 2603#ifndef CONFIG_UNWINDER_ORC
be54f69c 2604 if (!regs)
2ee5b92a
SRV
2605 trace.skip++;
2606#endif
be54f69c 2607
4a9bd3f1
SR
2608 /*
2609 * Since events can happen in NMIs there's no safe way to
2610 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2611 * or NMI comes in, it will just have to use the default
2612 * FTRACE_STACK_SIZE.
2613 */
2614 preempt_disable_notrace();
2615
82146529 2616 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2617 /*
2618 * We don't need any atomic variables, just a barrier.
2619 * If an interrupt comes in, we don't care, because it would
2620 * have exited and put the counter back to what we want.
2621 * We just need a barrier to keep gcc from moving things
2622 * around.
2623 */
2624 barrier();
2625 if (use_stack == 1) {
bdffd893 2626 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2627 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2628
2629 if (regs)
2630 save_stack_trace_regs(regs, &trace);
2631 else
2632 save_stack_trace(&trace);
2633
2634 if (trace.nr_entries > size)
2635 size = trace.nr_entries;
2636 } else
2637 /* From now on, use_stack is a boolean */
2638 use_stack = 0;
2639
2640 size *= sizeof(unsigned long);
86387f7e 2641
3e9a8aad
SRRH
2642 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2643 sizeof(*entry) + size, flags, pc);
3928a8a2 2644 if (!event)
4a9bd3f1
SR
2645 goto out;
2646 entry = ring_buffer_event_data(event);
86387f7e 2647
4a9bd3f1
SR
2648 memset(&entry->caller, 0, size);
2649
2650 if (use_stack)
2651 memcpy(&entry->caller, trace.entries,
2652 trace.nr_entries * sizeof(unsigned long));
2653 else {
2654 trace.max_entries = FTRACE_STACK_ENTRIES;
2655 trace.entries = entry->caller;
2656 if (regs)
2657 save_stack_trace_regs(regs, &trace);
2658 else
2659 save_stack_trace(&trace);
2660 }
2661
2662 entry->size = trace.nr_entries;
86387f7e 2663
f306cc82 2664 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2665 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2666
2667 out:
2668 /* Again, don't let gcc optimize things here */
2669 barrier();
82146529 2670 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2671 preempt_enable_notrace();
2672
f0a920d5
IM
2673}
2674
2d34f489
SRRH
2675static inline void ftrace_trace_stack(struct trace_array *tr,
2676 struct ring_buffer *buffer,
73dddbb5
SRRH
2677 unsigned long flags,
2678 int skip, int pc, struct pt_regs *regs)
53614991 2679{
2d34f489 2680 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2681 return;
2682
73dddbb5 2683 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2684}
2685
c0a0d0d3
FW
2686void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2687 int pc)
38697053 2688{
a33d7d94
SRV
2689 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2690
2691 if (rcu_is_watching()) {
2692 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2693 return;
2694 }
2695
2696 /*
2697 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2698 * but if the above rcu_is_watching() failed, then the NMI
2699 * triggered someplace critical, and rcu_irq_enter() should
2700 * not be called from NMI.
2701 */
2702 if (unlikely(in_nmi()))
2703 return;
2704
a33d7d94
SRV
2705 rcu_irq_enter_irqson();
2706 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2707 rcu_irq_exit_irqson();
38697053
SR
2708}
2709
03889384
SR
2710/**
2711 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2712 * @skip: Number of functions to skip (helper handlers)
03889384 2713 */
c142be8e 2714void trace_dump_stack(int skip)
03889384
SR
2715{
2716 unsigned long flags;
2717
2718 if (tracing_disabled || tracing_selftest_running)
e36c5458 2719 return;
03889384
SR
2720
2721 local_save_flags(flags);
2722
2ee5b92a
SRV
2723#ifndef CONFIG_UNWINDER_ORC
2724 /* Skip 1 to skip this function. */
2725 skip++;
2726#endif
c142be8e
SRRH
2727 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2728 flags, skip, preempt_count(), NULL);
03889384 2729}
da387e5c 2730EXPORT_SYMBOL_GPL(trace_dump_stack);
03889384 2731
91e86e56
SR
2732static DEFINE_PER_CPU(int, user_stack_count);
2733
e77405ad
SR
2734void
2735ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2736{
2425bcb9 2737 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2738 struct ring_buffer_event *event;
02b67518
TE
2739 struct userstack_entry *entry;
2740 struct stack_trace trace;
02b67518 2741
983f938a 2742 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2743 return;
2744
b6345879
SR
2745 /*
2746 * NMIs can not handle page faults, even with fix ups.
2747 * The save user stack can (and often does) fault.
2748 */
2749 if (unlikely(in_nmi()))
2750 return;
02b67518 2751
91e86e56
SR
2752 /*
2753 * prevent recursion, since the user stack tracing may
2754 * trigger other kernel events.
2755 */
2756 preempt_disable();
2757 if (__this_cpu_read(user_stack_count))
2758 goto out;
2759
2760 __this_cpu_inc(user_stack_count);
2761
3e9a8aad
SRRH
2762 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2763 sizeof(*entry), flags, pc);
02b67518 2764 if (!event)
1dbd1951 2765 goto out_drop_count;
02b67518 2766 entry = ring_buffer_event_data(event);
02b67518 2767
48659d31 2768 entry->tgid = current->tgid;
02b67518
TE
2769 memset(&entry->caller, 0, sizeof(entry->caller));
2770
2771 trace.nr_entries = 0;
2772 trace.max_entries = FTRACE_STACK_ENTRIES;
2773 trace.skip = 0;
2774 trace.entries = entry->caller;
2775
2776 save_stack_trace_user(&trace);
f306cc82 2777 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2778 __buffer_unlock_commit(buffer, event);
91e86e56 2779
1dbd1951 2780 out_drop_count:
91e86e56 2781 __this_cpu_dec(user_stack_count);
91e86e56
SR
2782 out:
2783 preempt_enable();
02b67518
TE
2784}
2785
4fd27358
HE
2786#ifdef UNUSED
2787static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2788{
7be42151 2789 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2790}
4fd27358 2791#endif /* UNUSED */
02b67518 2792
c0a0d0d3
FW
2793#endif /* CONFIG_STACKTRACE */
2794
07d777fe
SR
2795/* created for use with alloc_percpu */
2796struct trace_buffer_struct {
e2ace001
AL
2797 int nesting;
2798 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2799};
2800
2801static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2802
2803/*
e2ace001
AL
2804 * Thise allows for lockless recording. If we're nested too deeply, then
2805 * this returns NULL.
07d777fe
SR
2806 */
2807static char *get_trace_buf(void)
2808{
e2ace001 2809 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2810
e2ace001 2811 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2812 return NULL;
2813
3d9622c1
SRV
2814 buffer->nesting++;
2815
2816 /* Interrupts must see nesting incremented before we use the buffer */
2817 barrier();
2818 return &buffer->buffer[buffer->nesting][0];
e2ace001
AL
2819}
2820
2821static void put_trace_buf(void)
2822{
3d9622c1
SRV
2823 /* Don't let the decrement of nesting leak before this */
2824 barrier();
e2ace001 2825 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2826}
2827
2828static int alloc_percpu_trace_buffer(void)
2829{
2830 struct trace_buffer_struct *buffers;
07d777fe
SR
2831
2832 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2833 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2834 return -ENOMEM;
07d777fe
SR
2835
2836 trace_percpu_buffer = buffers;
07d777fe 2837 return 0;
07d777fe
SR
2838}
2839
81698831
SR
2840static int buffers_allocated;
2841
07d777fe
SR
2842void trace_printk_init_buffers(void)
2843{
07d777fe
SR
2844 if (buffers_allocated)
2845 return;
2846
2847 if (alloc_percpu_trace_buffer())
2848 return;
2849
2184db46
SR
2850 /* trace_printk() is for debug use only. Don't use it in production. */
2851
a395d6a7
JP
2852 pr_warn("\n");
2853 pr_warn("**********************************************************\n");
2854 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2855 pr_warn("** **\n");
2856 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2857 pr_warn("** **\n");
2858 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2859 pr_warn("** unsafe for production use. **\n");
2860 pr_warn("** **\n");
2861 pr_warn("** If you see this message and you are not debugging **\n");
2862 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2863 pr_warn("** **\n");
2864 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2865 pr_warn("**********************************************************\n");
07d777fe 2866
b382ede6
SR
2867 /* Expand the buffers to set size */
2868 tracing_update_buffers();
2869
07d777fe 2870 buffers_allocated = 1;
81698831
SR
2871
2872 /*
2873 * trace_printk_init_buffers() can be called by modules.
2874 * If that happens, then we need to start cmdline recording
2875 * directly here. If the global_trace.buffer is already
2876 * allocated here, then this was called by module code.
2877 */
12883efb 2878 if (global_trace.trace_buffer.buffer)
81698831
SR
2879 tracing_start_cmdline_record();
2880}
2881
2882void trace_printk_start_comm(void)
2883{
2884 /* Start tracing comms if trace printk is set */
2885 if (!buffers_allocated)
2886 return;
2887 tracing_start_cmdline_record();
2888}
2889
2890static void trace_printk_start_stop_comm(int enabled)
2891{
2892 if (!buffers_allocated)
2893 return;
2894
2895 if (enabled)
2896 tracing_start_cmdline_record();
2897 else
2898 tracing_stop_cmdline_record();
07d777fe
SR
2899}
2900
769b0441 2901/**
48ead020 2902 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2903 *
2904 */
40ce74f1 2905int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2906{
2425bcb9 2907 struct trace_event_call *call = &event_bprint;
769b0441 2908 struct ring_buffer_event *event;
e77405ad 2909 struct ring_buffer *buffer;
769b0441 2910 struct trace_array *tr = &global_trace;
48ead020 2911 struct bprint_entry *entry;
769b0441 2912 unsigned long flags;
07d777fe
SR
2913 char *tbuffer;
2914 int len = 0, size, pc;
769b0441
FW
2915
2916 if (unlikely(tracing_selftest_running || tracing_disabled))
2917 return 0;
2918
2919 /* Don't pollute graph traces with trace_vprintk internals */
2920 pause_graph_tracing();
2921
2922 pc = preempt_count();
5168ae50 2923 preempt_disable_notrace();
769b0441 2924
07d777fe
SR
2925 tbuffer = get_trace_buf();
2926 if (!tbuffer) {
2927 len = 0;
e2ace001 2928 goto out_nobuffer;
07d777fe 2929 }
769b0441 2930
07d777fe 2931 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2932
07d777fe
SR
2933 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2934 goto out;
769b0441 2935
07d777fe 2936 local_save_flags(flags);
769b0441 2937 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2938 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2939 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2940 flags, pc);
769b0441 2941 if (!event)
07d777fe 2942 goto out;
769b0441
FW
2943 entry = ring_buffer_event_data(event);
2944 entry->ip = ip;
769b0441
FW
2945 entry->fmt = fmt;
2946
07d777fe 2947 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2948 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2949 __buffer_unlock_commit(buffer, event);
2d34f489 2950 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2951 }
769b0441 2952
769b0441 2953out:
e2ace001
AL
2954 put_trace_buf();
2955
2956out_nobuffer:
5168ae50 2957 preempt_enable_notrace();
769b0441
FW
2958 unpause_graph_tracing();
2959
2960 return len;
2961}
48ead020
FW
2962EXPORT_SYMBOL_GPL(trace_vbprintk);
2963
26b68dd2 2964__printf(3, 0)
12883efb
SRRH
2965static int
2966__trace_array_vprintk(struct ring_buffer *buffer,
2967 unsigned long ip, const char *fmt, va_list args)
48ead020 2968{
2425bcb9 2969 struct trace_event_call *call = &event_print;
48ead020 2970 struct ring_buffer_event *event;
07d777fe 2971 int len = 0, size, pc;
48ead020 2972 struct print_entry *entry;
07d777fe
SR
2973 unsigned long flags;
2974 char *tbuffer;
48ead020
FW
2975
2976 if (tracing_disabled || tracing_selftest_running)
2977 return 0;
2978
07d777fe
SR
2979 /* Don't pollute graph traces with trace_vprintk internals */
2980 pause_graph_tracing();
2981
48ead020
FW
2982 pc = preempt_count();
2983 preempt_disable_notrace();
48ead020 2984
07d777fe
SR
2985
2986 tbuffer = get_trace_buf();
2987 if (!tbuffer) {
2988 len = 0;
e2ace001 2989 goto out_nobuffer;
07d777fe 2990 }
48ead020 2991
3558a5ac 2992 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2993
07d777fe 2994 local_save_flags(flags);
48ead020 2995 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2996 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2997 flags, pc);
48ead020 2998 if (!event)
07d777fe 2999 goto out;
48ead020 3000 entry = ring_buffer_event_data(event);
c13d2f7c 3001 entry->ip = ip;
48ead020 3002
3558a5ac 3003 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 3004 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 3005 __buffer_unlock_commit(buffer, event);
2d34f489 3006 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 3007 }
e2ace001
AL
3008
3009out:
3010 put_trace_buf();
3011
3012out_nobuffer:
48ead020 3013 preempt_enable_notrace();
07d777fe 3014 unpause_graph_tracing();
48ead020
FW
3015
3016 return len;
3017}
659372d3 3018
26b68dd2 3019__printf(3, 0)
12883efb
SRRH
3020int trace_array_vprintk(struct trace_array *tr,
3021 unsigned long ip, const char *fmt, va_list args)
3022{
3023 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3024}
3025
26b68dd2 3026__printf(3, 0)
12883efb
SRRH
3027int trace_array_printk(struct trace_array *tr,
3028 unsigned long ip, const char *fmt, ...)
3029{
3030 int ret;
3031 va_list ap;
3032
983f938a 3033 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3034 return 0;
3035
3036 va_start(ap, fmt);
3037 ret = trace_array_vprintk(tr, ip, fmt, ap);
3038 va_end(ap);
3039 return ret;
3040}
3041
26b68dd2 3042__printf(3, 4)
12883efb
SRRH
3043int trace_array_printk_buf(struct ring_buffer *buffer,
3044 unsigned long ip, const char *fmt, ...)
3045{
3046 int ret;
3047 va_list ap;
3048
983f938a 3049 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3050 return 0;
3051
3052 va_start(ap, fmt);
3053 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3054 va_end(ap);
3055 return ret;
3056}
3057
26b68dd2 3058__printf(2, 0)
659372d3
SR
3059int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3060{
a813a159 3061 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 3062}
769b0441
FW
3063EXPORT_SYMBOL_GPL(trace_vprintk);
3064
e2ac8ef5 3065static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 3066{
6d158a81
SR
3067 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3068
5a90f577 3069 iter->idx++;
6d158a81
SR
3070 if (buf_iter)
3071 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
3072}
3073
e309b41d 3074static struct trace_entry *
bc21b478
SR
3075peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3076 unsigned long *lost_events)
dd0e545f 3077{
3928a8a2 3078 struct ring_buffer_event *event;
6d158a81 3079 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 3080
d769041f
SR
3081 if (buf_iter)
3082 event = ring_buffer_iter_peek(buf_iter, ts);
3083 else
12883efb 3084 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 3085 lost_events);
d769041f 3086
4a9bd3f1
SR
3087 if (event) {
3088 iter->ent_size = ring_buffer_event_length(event);
3089 return ring_buffer_event_data(event);
3090 }
3091 iter->ent_size = 0;
3092 return NULL;
dd0e545f 3093}
d769041f 3094
dd0e545f 3095static struct trace_entry *
bc21b478
SR
3096__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3097 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 3098{
12883efb 3099 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 3100 struct trace_entry *ent, *next = NULL;
aa27497c 3101 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 3102 int cpu_file = iter->cpu_file;
3928a8a2 3103 u64 next_ts = 0, ts;
bc0c38d1 3104 int next_cpu = -1;
12b5da34 3105 int next_size = 0;
bc0c38d1
SR
3106 int cpu;
3107
b04cc6b1
FW
3108 /*
3109 * If we are in a per_cpu trace file, don't bother by iterating over
3110 * all cpu and peek directly.
3111 */
ae3b5093 3112 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
3113 if (ring_buffer_empty_cpu(buffer, cpu_file))
3114 return NULL;
bc21b478 3115 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
3116 if (ent_cpu)
3117 *ent_cpu = cpu_file;
3118
3119 return ent;
3120 }
3121
ab46428c 3122 for_each_tracing_cpu(cpu) {
dd0e545f 3123
3928a8a2
SR
3124 if (ring_buffer_empty_cpu(buffer, cpu))
3125 continue;
dd0e545f 3126
bc21b478 3127 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3128
cdd31cd2
IM
3129 /*
3130 * Pick the entry with the smallest timestamp:
3131 */
3928a8a2 3132 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3133 next = ent;
3134 next_cpu = cpu;
3928a8a2 3135 next_ts = ts;
bc21b478 3136 next_lost = lost_events;
12b5da34 3137 next_size = iter->ent_size;
bc0c38d1
SR
3138 }
3139 }
3140
12b5da34
SR
3141 iter->ent_size = next_size;
3142
bc0c38d1
SR
3143 if (ent_cpu)
3144 *ent_cpu = next_cpu;
3145
3928a8a2
SR
3146 if (ent_ts)
3147 *ent_ts = next_ts;
3148
bc21b478
SR
3149 if (missing_events)
3150 *missing_events = next_lost;
3151
bc0c38d1
SR
3152 return next;
3153}
3154
dd0e545f 3155/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3156struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3157 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3158{
bc21b478 3159 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3160}
3161
3162/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3163void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3164{
bc21b478
SR
3165 iter->ent = __find_next_entry(iter, &iter->cpu,
3166 &iter->lost_events, &iter->ts);
dd0e545f 3167
3928a8a2 3168 if (iter->ent)
e2ac8ef5 3169 trace_iterator_increment(iter);
dd0e545f 3170
3928a8a2 3171 return iter->ent ? iter : NULL;
b3806b43 3172}
bc0c38d1 3173
e309b41d 3174static void trace_consume(struct trace_iterator *iter)
b3806b43 3175{
12883efb 3176 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3177 &iter->lost_events);
bc0c38d1
SR
3178}
3179
e309b41d 3180static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3181{
3182 struct trace_iterator *iter = m->private;
bc0c38d1 3183 int i = (int)*pos;
4e3c3333 3184 void *ent;
bc0c38d1 3185
a63ce5b3
SR
3186 WARN_ON_ONCE(iter->leftover);
3187
bc0c38d1
SR
3188 (*pos)++;
3189
3190 /* can't go backwards */
3191 if (iter->idx > i)
3192 return NULL;
3193
3194 if (iter->idx < 0)
955b61e5 3195 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3196 else
3197 ent = iter;
3198
3199 while (ent && iter->idx < i)
955b61e5 3200 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3201
3202 iter->pos = *pos;
3203
bc0c38d1
SR
3204 return ent;
3205}
3206
955b61e5 3207void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3208{
2f26ebd5
SR
3209 struct ring_buffer_event *event;
3210 struct ring_buffer_iter *buf_iter;
3211 unsigned long entries = 0;
3212 u64 ts;
3213
12883efb 3214 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3215
6d158a81
SR
3216 buf_iter = trace_buffer_iter(iter, cpu);
3217 if (!buf_iter)
2f26ebd5
SR
3218 return;
3219
2f26ebd5
SR
3220 ring_buffer_iter_reset(buf_iter);
3221
3222 /*
3223 * We could have the case with the max latency tracers
3224 * that a reset never took place on a cpu. This is evident
3225 * by the timestamp being before the start of the buffer.
3226 */
3227 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3228 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3229 break;
3230 entries++;
3231 ring_buffer_read(buf_iter, NULL);
3232 }
3233
12883efb 3234 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3235}
3236
d7350c3f 3237/*
d7350c3f
FW
3238 * The current tracer is copied to avoid a global locking
3239 * all around.
3240 */
bc0c38d1
SR
3241static void *s_start(struct seq_file *m, loff_t *pos)
3242{
3243 struct trace_iterator *iter = m->private;
2b6080f2 3244 struct trace_array *tr = iter->tr;
b04cc6b1 3245 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3246 void *p = NULL;
3247 loff_t l = 0;
3928a8a2 3248 int cpu;
bc0c38d1 3249
2fd196ec
HT
3250 /*
3251 * copy the tracer to avoid using a global lock all around.
3252 * iter->trace is a copy of current_trace, the pointer to the
3253 * name may be used instead of a strcmp(), as iter->trace->name
3254 * will point to the same string as current_trace->name.
3255 */
bc0c38d1 3256 mutex_lock(&trace_types_lock);
2b6080f2
SR
3257 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3258 *iter->trace = *tr->current_trace;
d7350c3f 3259 mutex_unlock(&trace_types_lock);
bc0c38d1 3260
12883efb 3261#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3262 if (iter->snapshot && iter->trace->use_max_tr)
3263 return ERR_PTR(-EBUSY);
12883efb 3264#endif
debdd57f
HT
3265
3266 if (!iter->snapshot)
d914ba37 3267 atomic_inc(&trace_record_taskinfo_disabled);
bc0c38d1 3268
bc0c38d1
SR
3269 if (*pos != iter->pos) {
3270 iter->ent = NULL;
3271 iter->cpu = 0;
3272 iter->idx = -1;
3273
ae3b5093 3274 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3275 for_each_tracing_cpu(cpu)
2f26ebd5 3276 tracing_iter_reset(iter, cpu);
b04cc6b1 3277 } else
2f26ebd5 3278 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3279
ac91d854 3280 iter->leftover = 0;
bc0c38d1
SR
3281 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3282 ;
3283
3284 } else {
a63ce5b3
SR
3285 /*
3286 * If we overflowed the seq_file before, then we want
3287 * to just reuse the trace_seq buffer again.
3288 */
3289 if (iter->leftover)
3290 p = iter;
3291 else {
3292 l = *pos - 1;
3293 p = s_next(m, p, &l);
3294 }
bc0c38d1
SR
3295 }
3296
4f535968 3297 trace_event_read_lock();
7e53bd42 3298 trace_access_lock(cpu_file);
bc0c38d1
SR
3299 return p;
3300}
3301
3302static void s_stop(struct seq_file *m, void *p)
3303{
7e53bd42
LJ
3304 struct trace_iterator *iter = m->private;
3305
12883efb 3306#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3307 if (iter->snapshot && iter->trace->use_max_tr)
3308 return;
12883efb 3309#endif
debdd57f
HT
3310
3311 if (!iter->snapshot)
d914ba37 3312 atomic_dec(&trace_record_taskinfo_disabled);
12883efb 3313
7e53bd42 3314 trace_access_unlock(iter->cpu_file);
4f535968 3315 trace_event_read_unlock();
bc0c38d1
SR
3316}
3317
39eaf7ef 3318static void
12883efb
SRRH
3319get_total_entries(struct trace_buffer *buf,
3320 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3321{
3322 unsigned long count;
3323 int cpu;
3324
3325 *total = 0;
3326 *entries = 0;
3327
3328 for_each_tracing_cpu(cpu) {
12883efb 3329 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3330 /*
3331 * If this buffer has skipped entries, then we hold all
3332 * entries for the trace and we need to ignore the
3333 * ones before the time stamp.
3334 */
12883efb
SRRH
3335 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3336 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3337 /* total is the same as the entries */
3338 *total += count;
3339 } else
3340 *total += count +
12883efb 3341 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3342 *entries += count;
3343 }
3344}
3345
e309b41d 3346static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3347{
d79ac28f
RV
3348 seq_puts(m, "# _------=> CPU# \n"
3349 "# / _-----=> irqs-off \n"
3350 "# | / _----=> need-resched \n"
3351 "# || / _---=> hardirq/softirq \n"
3352 "# ||| / _--=> preempt-depth \n"
3353 "# |||| / delay \n"
3354 "# cmd pid ||||| time | caller \n"
3355 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3356}
3357
12883efb 3358static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3359{
39eaf7ef
SR
3360 unsigned long total;
3361 unsigned long entries;
3362
12883efb 3363 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3364 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3365 entries, total, num_online_cpus());
3366 seq_puts(m, "#\n");
3367}
3368
441dae8f
JF
3369static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3370 unsigned int flags)
39eaf7ef 3371{
441dae8f
JF
3372 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3373
12883efb 3374 print_event_info(buf, m);
441dae8f 3375
f8494fa3
JFG
3376 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3377 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
bc0c38d1
SR
3378}
3379
441dae8f
JF
3380static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3381 unsigned int flags)
77271ce4 3382{
441dae8f 3383 bool tgid = flags & TRACE_ITER_RECORD_TGID;
b11fb737
SRV
3384 const char tgid_space[] = " ";
3385 const char space[] = " ";
3386
3387 seq_printf(m, "# %s _-----=> irqs-off\n",
3388 tgid ? tgid_space : space);
3389 seq_printf(m, "# %s / _----=> need-resched\n",
3390 tgid ? tgid_space : space);
3391 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3392 tgid ? tgid_space : space);
3393 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3394 tgid ? tgid_space : space);
3395 seq_printf(m, "# %s||| / delay\n",
3396 tgid ? tgid_space : space);
f8494fa3 3397 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
b11fb737 3398 tgid ? " TGID " : space);
f8494fa3 3399 seq_printf(m, "# | | %s | |||| | |\n",
b11fb737 3400 tgid ? " | " : space);
77271ce4 3401}
bc0c38d1 3402
62b915f1 3403void
bc0c38d1
SR
3404print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3405{
983f938a 3406 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3407 struct trace_buffer *buf = iter->trace_buffer;
3408 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3409 struct tracer *type = iter->trace;
39eaf7ef
SR
3410 unsigned long entries;
3411 unsigned long total;
bc0c38d1
SR
3412 const char *name = "preemption";
3413
d840f718 3414 name = type->name;
bc0c38d1 3415
12883efb 3416 get_total_entries(buf, &total, &entries);
bc0c38d1 3417
888b55dc 3418 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3419 name, UTS_RELEASE);
888b55dc 3420 seq_puts(m, "# -----------------------------------"
bc0c38d1 3421 "---------------------------------\n");
888b55dc 3422 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3423 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3424 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3425 entries,
4c11d7ae 3426 total,
12883efb 3427 buf->cpu,
bc0c38d1
SR
3428#if defined(CONFIG_PREEMPT_NONE)
3429 "server",
3430#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3431 "desktop",
b5c21b45 3432#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3433 "preempt",
3434#else
3435 "unknown",
3436#endif
3437 /* These are reserved for later use */
3438 0, 0, 0, 0);
3439#ifdef CONFIG_SMP
3440 seq_printf(m, " #P:%d)\n", num_online_cpus());
3441#else
3442 seq_puts(m, ")\n");
3443#endif
888b55dc
KM
3444 seq_puts(m, "# -----------------\n");
3445 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3446 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3447 data->comm, data->pid,
3448 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3449 data->policy, data->rt_priority);
888b55dc 3450 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3451
3452 if (data->critical_start) {
888b55dc 3453 seq_puts(m, "# => started at: ");
214023c3
SR
3454 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3455 trace_print_seq(m, &iter->seq);
888b55dc 3456 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3457 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3458 trace_print_seq(m, &iter->seq);
8248ac05 3459 seq_puts(m, "\n#\n");
bc0c38d1
SR
3460 }
3461
888b55dc 3462 seq_puts(m, "#\n");
bc0c38d1
SR
3463}
3464
a309720c
SR
3465static void test_cpu_buff_start(struct trace_iterator *iter)
3466{
3467 struct trace_seq *s = &iter->seq;
983f938a 3468 struct trace_array *tr = iter->tr;
a309720c 3469
983f938a 3470 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3471 return;
3472
3473 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3474 return;
3475
4dbbe2d8
MK
3476 if (cpumask_available(iter->started) &&
3477 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3478 return;
3479
12883efb 3480 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3481 return;
3482
4dbbe2d8 3483 if (cpumask_available(iter->started))
919cd979 3484 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3485
3486 /* Don't print started cpu buffer for the first entry of the trace */
3487 if (iter->idx > 1)
3488 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3489 iter->cpu);
a309720c
SR
3490}
3491
2c4f035f 3492static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3493{
983f938a 3494 struct trace_array *tr = iter->tr;
214023c3 3495 struct trace_seq *s = &iter->seq;
983f938a 3496 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3497 struct trace_entry *entry;
f633cef0 3498 struct trace_event *event;
bc0c38d1 3499
4e3c3333 3500 entry = iter->ent;
dd0e545f 3501
a309720c
SR
3502 test_cpu_buff_start(iter);
3503
c4a8e8be 3504 event = ftrace_find_event(entry->type);
bc0c38d1 3505
983f938a 3506 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3507 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3508 trace_print_lat_context(iter);
3509 else
3510 trace_print_context(iter);
c4a8e8be 3511 }
bc0c38d1 3512
19a7fe20
SRRH
3513 if (trace_seq_has_overflowed(s))
3514 return TRACE_TYPE_PARTIAL_LINE;
3515
268ccda0 3516 if (event)
a9a57763 3517 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3518
19a7fe20 3519 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3520
19a7fe20 3521 return trace_handle_return(s);
bc0c38d1
SR
3522}
3523
2c4f035f 3524static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3525{
983f938a 3526 struct trace_array *tr = iter->tr;
f9896bf3
IM
3527 struct trace_seq *s = &iter->seq;
3528 struct trace_entry *entry;
f633cef0 3529 struct trace_event *event;
f9896bf3
IM
3530
3531 entry = iter->ent;
dd0e545f 3532
983f938a 3533 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3534 trace_seq_printf(s, "%d %d %llu ",
3535 entry->pid, iter->cpu, iter->ts);
3536
3537 if (trace_seq_has_overflowed(s))
3538 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3539
f633cef0 3540 event = ftrace_find_event(entry->type);
268ccda0 3541 if (event)
a9a57763 3542 return event->funcs->raw(iter, 0, event);
d9793bd8 3543
19a7fe20 3544 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3545
19a7fe20 3546 return trace_handle_return(s);
f9896bf3
IM
3547}
3548
2c4f035f 3549static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3550{
983f938a 3551 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3552 struct trace_seq *s = &iter->seq;
3553 unsigned char newline = '\n';
3554 struct trace_entry *entry;
f633cef0 3555 struct trace_event *event;
5e3ca0ec
IM
3556
3557 entry = iter->ent;
dd0e545f 3558
983f938a 3559 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3560 SEQ_PUT_HEX_FIELD(s, entry->pid);
3561 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3562 SEQ_PUT_HEX_FIELD(s, iter->ts);
3563 if (trace_seq_has_overflowed(s))
3564 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3565 }
5e3ca0ec 3566
f633cef0 3567 event = ftrace_find_event(entry->type);
268ccda0 3568 if (event) {
a9a57763 3569 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3570 if (ret != TRACE_TYPE_HANDLED)
3571 return ret;
3572 }
7104f300 3573
19a7fe20 3574 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3575
19a7fe20 3576 return trace_handle_return(s);
5e3ca0ec
IM
3577}
3578
2c4f035f 3579static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3580{
983f938a 3581 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3582 struct trace_seq *s = &iter->seq;
3583 struct trace_entry *entry;
f633cef0 3584 struct trace_event *event;
cb0f12aa
IM
3585
3586 entry = iter->ent;
dd0e545f 3587
983f938a 3588 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3589 SEQ_PUT_FIELD(s, entry->pid);
3590 SEQ_PUT_FIELD(s, iter->cpu);
3591 SEQ_PUT_FIELD(s, iter->ts);
3592 if (trace_seq_has_overflowed(s))
3593 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3594 }
cb0f12aa 3595
f633cef0 3596 event = ftrace_find_event(entry->type);
a9a57763
SR
3597 return event ? event->funcs->binary(iter, 0, event) :
3598 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3599}
3600
62b915f1 3601int trace_empty(struct trace_iterator *iter)
bc0c38d1 3602{
6d158a81 3603 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3604 int cpu;
3605
9aba60fe 3606 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3607 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3608 cpu = iter->cpu_file;
6d158a81
SR
3609 buf_iter = trace_buffer_iter(iter, cpu);
3610 if (buf_iter) {
3611 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3612 return 0;
3613 } else {
12883efb 3614 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3615 return 0;
3616 }
3617 return 1;
3618 }
3619
ab46428c 3620 for_each_tracing_cpu(cpu) {
6d158a81
SR
3621 buf_iter = trace_buffer_iter(iter, cpu);
3622 if (buf_iter) {
3623 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3624 return 0;
3625 } else {
12883efb 3626 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3627 return 0;
3628 }
bc0c38d1 3629 }
d769041f 3630
797d3712 3631 return 1;
bc0c38d1
SR
3632}
3633
4f535968 3634/* Called with trace_event_read_lock() held. */
955b61e5 3635enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3636{
983f938a
SRRH
3637 struct trace_array *tr = iter->tr;
3638 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3639 enum print_line_t ret;
3640
19a7fe20
SRRH
3641 if (iter->lost_events) {
3642 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3643 iter->cpu, iter->lost_events);
3644 if (trace_seq_has_overflowed(&iter->seq))
3645 return TRACE_TYPE_PARTIAL_LINE;
3646 }
bc21b478 3647
2c4f035f
FW
3648 if (iter->trace && iter->trace->print_line) {
3649 ret = iter->trace->print_line(iter);
3650 if (ret != TRACE_TYPE_UNHANDLED)
3651 return ret;
3652 }
72829bc3 3653
09ae7234
SRRH
3654 if (iter->ent->type == TRACE_BPUTS &&
3655 trace_flags & TRACE_ITER_PRINTK &&
3656 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3657 return trace_print_bputs_msg_only(iter);
3658
48ead020
FW
3659 if (iter->ent->type == TRACE_BPRINT &&
3660 trace_flags & TRACE_ITER_PRINTK &&
3661 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3662 return trace_print_bprintk_msg_only(iter);
48ead020 3663
66896a85
FW
3664 if (iter->ent->type == TRACE_PRINT &&
3665 trace_flags & TRACE_ITER_PRINTK &&
3666 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3667 return trace_print_printk_msg_only(iter);
66896a85 3668
cb0f12aa
IM
3669 if (trace_flags & TRACE_ITER_BIN)
3670 return print_bin_fmt(iter);
3671
5e3ca0ec
IM
3672 if (trace_flags & TRACE_ITER_HEX)
3673 return print_hex_fmt(iter);
3674
f9896bf3
IM
3675 if (trace_flags & TRACE_ITER_RAW)
3676 return print_raw_fmt(iter);
3677
f9896bf3
IM
3678 return print_trace_fmt(iter);
3679}
3680
7e9a49ef
JO
3681void trace_latency_header(struct seq_file *m)
3682{
3683 struct trace_iterator *iter = m->private;
983f938a 3684 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3685
3686 /* print nothing if the buffers are empty */
3687 if (trace_empty(iter))
3688 return;
3689
3690 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3691 print_trace_header(m, iter);
3692
983f938a 3693 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3694 print_lat_help_header(m);
3695}
3696
62b915f1
JO
3697void trace_default_header(struct seq_file *m)
3698{
3699 struct trace_iterator *iter = m->private;
983f938a
SRRH
3700 struct trace_array *tr = iter->tr;
3701 unsigned long trace_flags = tr->trace_flags;
62b915f1 3702
f56e7f8e
JO
3703 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3704 return;
3705
62b915f1
JO
3706 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3707 /* print nothing if the buffers are empty */
3708 if (trace_empty(iter))
3709 return;
3710 print_trace_header(m, iter);
3711 if (!(trace_flags & TRACE_ITER_VERBOSE))
3712 print_lat_help_header(m);
3713 } else {
77271ce4
SR
3714 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3715 if (trace_flags & TRACE_ITER_IRQ_INFO)
441dae8f
JF
3716 print_func_help_header_irq(iter->trace_buffer,
3717 m, trace_flags);
77271ce4 3718 else
441dae8f
JF
3719 print_func_help_header(iter->trace_buffer, m,
3720 trace_flags);
77271ce4 3721 }
62b915f1
JO
3722 }
3723}
3724
e0a413f6
SR
3725static void test_ftrace_alive(struct seq_file *m)
3726{
3727 if (!ftrace_is_dead())
3728 return;
d79ac28f
RV
3729 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3730 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3731}
3732
d8741e2e 3733#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3734static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3735{
d79ac28f
RV
3736 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3737 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3738 "# Takes a snapshot of the main buffer.\n"
3739 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3740 "# (Doesn't have to be '2' works with any number that\n"
3741 "# is not a '0' or '1')\n");
d8741e2e 3742}
f1affcaa
SRRH
3743
3744static void show_snapshot_percpu_help(struct seq_file *m)
3745{
fa6f0cc7 3746 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3747#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3748 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3749 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3750#else
d79ac28f
RV
3751 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3752 "# Must use main snapshot file to allocate.\n");
f1affcaa 3753#endif
d79ac28f
RV
3754 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3755 "# (Doesn't have to be '2' works with any number that\n"
3756 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3757}
3758
d8741e2e
SRRH
3759static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3760{
45ad21ca 3761 if (iter->tr->allocated_snapshot)
fa6f0cc7 3762 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3763 else
fa6f0cc7 3764 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3765
fa6f0cc7 3766 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3767 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3768 show_snapshot_main_help(m);
3769 else
3770 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3771}
3772#else
3773/* Should never be called */
3774static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3775#endif
3776
bc0c38d1
SR
3777static int s_show(struct seq_file *m, void *v)
3778{
3779 struct trace_iterator *iter = v;
a63ce5b3 3780 int ret;
bc0c38d1
SR
3781
3782 if (iter->ent == NULL) {
3783 if (iter->tr) {
3784 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3785 seq_puts(m, "#\n");
e0a413f6 3786 test_ftrace_alive(m);
bc0c38d1 3787 }
d8741e2e
SRRH
3788 if (iter->snapshot && trace_empty(iter))
3789 print_snapshot_help(m, iter);
3790 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3791 iter->trace->print_header(m);
62b915f1
JO
3792 else
3793 trace_default_header(m);
3794
a63ce5b3
SR
3795 } else if (iter->leftover) {
3796 /*
3797 * If we filled the seq_file buffer earlier, we
3798 * want to just show it now.
3799 */
3800 ret = trace_print_seq(m, &iter->seq);
3801
3802 /* ret should this time be zero, but you never know */
3803 iter->leftover = ret;
3804
bc0c38d1 3805 } else {
f9896bf3 3806 print_trace_line(iter);
a63ce5b3
SR
3807 ret = trace_print_seq(m, &iter->seq);
3808 /*
3809 * If we overflow the seq_file buffer, then it will
3810 * ask us for this data again at start up.
3811 * Use that instead.
3812 * ret is 0 if seq_file write succeeded.
3813 * -1 otherwise.
3814 */
3815 iter->leftover = ret;
bc0c38d1
SR
3816 }
3817
3818 return 0;
3819}
3820
649e9c70
ON
3821/*
3822 * Should be used after trace_array_get(), trace_types_lock
3823 * ensures that i_cdev was already initialized.
3824 */
3825static inline int tracing_get_cpu(struct inode *inode)
3826{
3827 if (inode->i_cdev) /* See trace_create_cpu_file() */
3828 return (long)inode->i_cdev - 1;
3829 return RING_BUFFER_ALL_CPUS;
3830}
3831
88e9d34c 3832static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3833 .start = s_start,
3834 .next = s_next,
3835 .stop = s_stop,
3836 .show = s_show,
bc0c38d1
SR
3837};
3838
e309b41d 3839static struct trace_iterator *
6484c71c 3840__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3841{
6484c71c 3842 struct trace_array *tr = inode->i_private;
bc0c38d1 3843 struct trace_iterator *iter;
50e18b94 3844 int cpu;
bc0c38d1 3845
85a2f9b4
SR
3846 if (tracing_disabled)
3847 return ERR_PTR(-ENODEV);
60a11774 3848
50e18b94 3849 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3850 if (!iter)
3851 return ERR_PTR(-ENOMEM);
bc0c38d1 3852
72917235 3853 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3854 GFP_KERNEL);
93574fcc
DC
3855 if (!iter->buffer_iter)
3856 goto release;
3857
d7350c3f
FW
3858 /*
3859 * We make a copy of the current tracer to avoid concurrent
3860 * changes on it while we are reading.
3861 */
bc0c38d1 3862 mutex_lock(&trace_types_lock);
d7350c3f 3863 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3864 if (!iter->trace)
d7350c3f 3865 goto fail;
85a2f9b4 3866
2b6080f2 3867 *iter->trace = *tr->current_trace;
d7350c3f 3868
79f55997 3869 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3870 goto fail;
3871
12883efb
SRRH
3872 iter->tr = tr;
3873
3874#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3875 /* Currently only the top directory has a snapshot */
3876 if (tr->current_trace->print_max || snapshot)
12883efb 3877 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3878 else
12883efb
SRRH
3879#endif
3880 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3881 iter->snapshot = snapshot;
bc0c38d1 3882 iter->pos = -1;
6484c71c 3883 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3884 mutex_init(&iter->mutex);
bc0c38d1 3885
8bba1bf5
MM
3886 /* Notify the tracer early; before we stop tracing. */
3887 if (iter->trace && iter->trace->open)
a93751ca 3888 iter->trace->open(iter);
8bba1bf5 3889
12ef7d44 3890 /* Annotate start of buffers if we had overruns */
12883efb 3891 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3892 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3893
8be0709f 3894 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3895 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3896 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3897
debdd57f
HT
3898 /* stop the trace while dumping if we are not opening "snapshot" */
3899 if (!iter->snapshot)
2b6080f2 3900 tracing_stop_tr(tr);
2f26ebd5 3901
ae3b5093 3902 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3903 for_each_tracing_cpu(cpu) {
b04cc6b1 3904 iter->buffer_iter[cpu] =
12883efb 3905 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3906 }
3907 ring_buffer_read_prepare_sync();
3908 for_each_tracing_cpu(cpu) {
3909 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3910 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3911 }
3912 } else {
3913 cpu = iter->cpu_file;
3928a8a2 3914 iter->buffer_iter[cpu] =
12883efb 3915 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3916 ring_buffer_read_prepare_sync();
3917 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3918 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3919 }
3920
bc0c38d1
SR
3921 mutex_unlock(&trace_types_lock);
3922
bc0c38d1 3923 return iter;
3928a8a2 3924
d7350c3f 3925 fail:
3928a8a2 3926 mutex_unlock(&trace_types_lock);
d7350c3f 3927 kfree(iter->trace);
6d158a81 3928 kfree(iter->buffer_iter);
93574fcc 3929release:
50e18b94
JO
3930 seq_release_private(inode, file);
3931 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3932}
3933
3934int tracing_open_generic(struct inode *inode, struct file *filp)
3935{
60a11774
SR
3936 if (tracing_disabled)
3937 return -ENODEV;
3938
bc0c38d1
SR
3939 filp->private_data = inode->i_private;
3940 return 0;
3941}
3942
2e86421d
GB
3943bool tracing_is_disabled(void)
3944{
3945 return (tracing_disabled) ? true: false;
3946}
3947
7b85af63
SRRH
3948/*
3949 * Open and update trace_array ref count.
3950 * Must have the current trace_array passed to it.
3951 */
dcc30223 3952static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3953{
3954 struct trace_array *tr = inode->i_private;
3955
3956 if (tracing_disabled)
3957 return -ENODEV;
3958
3959 if (trace_array_get(tr) < 0)
3960 return -ENODEV;
3961
3962 filp->private_data = inode->i_private;
3963
3964 return 0;
7b85af63
SRRH
3965}
3966
4fd27358 3967static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3968{
6484c71c 3969 struct trace_array *tr = inode->i_private;
907f2784 3970 struct seq_file *m = file->private_data;
4acd4d00 3971 struct trace_iterator *iter;
3928a8a2 3972 int cpu;
bc0c38d1 3973
ff451961 3974 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3975 trace_array_put(tr);
4acd4d00 3976 return 0;
ff451961 3977 }
4acd4d00 3978
6484c71c 3979 /* Writes do not use seq_file */
4acd4d00 3980 iter = m->private;
bc0c38d1 3981 mutex_lock(&trace_types_lock);
a695cb58 3982
3928a8a2
SR
3983 for_each_tracing_cpu(cpu) {
3984 if (iter->buffer_iter[cpu])
3985 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3986 }
3987
bc0c38d1
SR
3988 if (iter->trace && iter->trace->close)
3989 iter->trace->close(iter);
3990
debdd57f
HT
3991 if (!iter->snapshot)
3992 /* reenable tracing if it was previously enabled */
2b6080f2 3993 tracing_start_tr(tr);
f77d09a3
AL
3994
3995 __trace_array_put(tr);
3996
bc0c38d1
SR
3997 mutex_unlock(&trace_types_lock);
3998
d7350c3f 3999 mutex_destroy(&iter->mutex);
b0dfa978 4000 free_cpumask_var(iter->started);
d7350c3f 4001 kfree(iter->trace);
6d158a81 4002 kfree(iter->buffer_iter);
50e18b94 4003 seq_release_private(inode, file);
ff451961 4004
bc0c38d1
SR
4005 return 0;
4006}
4007
7b85af63
SRRH
4008static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4009{
4010 struct trace_array *tr = inode->i_private;
4011
4012 trace_array_put(tr);
bc0c38d1
SR
4013 return 0;
4014}
4015
7b85af63
SRRH
4016static int tracing_single_release_tr(struct inode *inode, struct file *file)
4017{
4018 struct trace_array *tr = inode->i_private;
4019
4020 trace_array_put(tr);
4021
4022 return single_release(inode, file);
4023}
4024
bc0c38d1
SR
4025static int tracing_open(struct inode *inode, struct file *file)
4026{
6484c71c 4027 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
4028 struct trace_iterator *iter;
4029 int ret = 0;
bc0c38d1 4030
ff451961
SRRH
4031 if (trace_array_get(tr) < 0)
4032 return -ENODEV;
4033
4acd4d00 4034 /* If this file was open for write, then erase contents */
6484c71c
ON
4035 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4036 int cpu = tracing_get_cpu(inode);
8dd33bcb
BY
4037 struct trace_buffer *trace_buf = &tr->trace_buffer;
4038
4039#ifdef CONFIG_TRACER_MAX_TRACE
4040 if (tr->current_trace->print_max)
4041 trace_buf = &tr->max_buffer;
4042#endif
6484c71c
ON
4043
4044 if (cpu == RING_BUFFER_ALL_CPUS)
8dd33bcb 4045 tracing_reset_online_cpus(trace_buf);
4acd4d00 4046 else
8dd33bcb 4047 tracing_reset(trace_buf, cpu);
4acd4d00 4048 }
bc0c38d1 4049
4acd4d00 4050 if (file->f_mode & FMODE_READ) {
6484c71c 4051 iter = __tracing_open(inode, file, false);
4acd4d00
SR
4052 if (IS_ERR(iter))
4053 ret = PTR_ERR(iter);
983f938a 4054 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
4055 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4056 }
ff451961
SRRH
4057
4058 if (ret < 0)
4059 trace_array_put(tr);
4060
bc0c38d1
SR
4061 return ret;
4062}
4063
607e2ea1
SRRH
4064/*
4065 * Some tracers are not suitable for instance buffers.
4066 * A tracer is always available for the global array (toplevel)
4067 * or if it explicitly states that it is.
4068 */
4069static bool
4070trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4071{
4072 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4073}
4074
4075/* Find the next tracer that this trace array may use */
4076static struct tracer *
4077get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4078{
4079 while (t && !trace_ok_for_array(t, tr))
4080 t = t->next;
4081
4082 return t;
4083}
4084
e309b41d 4085static void *
bc0c38d1
SR
4086t_next(struct seq_file *m, void *v, loff_t *pos)
4087{
607e2ea1 4088 struct trace_array *tr = m->private;
f129e965 4089 struct tracer *t = v;
bc0c38d1
SR
4090
4091 (*pos)++;
4092
4093 if (t)
607e2ea1 4094 t = get_tracer_for_array(tr, t->next);
bc0c38d1 4095
bc0c38d1
SR
4096 return t;
4097}
4098
4099static void *t_start(struct seq_file *m, loff_t *pos)
4100{
607e2ea1 4101 struct trace_array *tr = m->private;
f129e965 4102 struct tracer *t;
bc0c38d1
SR
4103 loff_t l = 0;
4104
4105 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
4106
4107 t = get_tracer_for_array(tr, trace_types);
4108 for (; t && l < *pos; t = t_next(m, t, &l))
4109 ;
bc0c38d1
SR
4110
4111 return t;
4112}
4113
4114static void t_stop(struct seq_file *m, void *p)
4115{
4116 mutex_unlock(&trace_types_lock);
4117}
4118
4119static int t_show(struct seq_file *m, void *v)
4120{
4121 struct tracer *t = v;
4122
4123 if (!t)
4124 return 0;
4125
fa6f0cc7 4126 seq_puts(m, t->name);
bc0c38d1
SR
4127 if (t->next)
4128 seq_putc(m, ' ');
4129 else
4130 seq_putc(m, '\n');
4131
4132 return 0;
4133}
4134
88e9d34c 4135static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
4136 .start = t_start,
4137 .next = t_next,
4138 .stop = t_stop,
4139 .show = t_show,
bc0c38d1
SR
4140};
4141
4142static int show_traces_open(struct inode *inode, struct file *file)
4143{
607e2ea1
SRRH
4144 struct trace_array *tr = inode->i_private;
4145 struct seq_file *m;
4146 int ret;
4147
60a11774
SR
4148 if (tracing_disabled)
4149 return -ENODEV;
4150
607e2ea1
SRRH
4151 ret = seq_open(file, &show_traces_seq_ops);
4152 if (ret)
4153 return ret;
4154
4155 m = file->private_data;
4156 m->private = tr;
4157
4158 return 0;
bc0c38d1
SR
4159}
4160
4acd4d00
SR
4161static ssize_t
4162tracing_write_stub(struct file *filp, const char __user *ubuf,
4163 size_t count, loff_t *ppos)
4164{
4165 return count;
4166}
4167
098c879e 4168loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4169{
098c879e
SRRH
4170 int ret;
4171
364829b1 4172 if (file->f_mode & FMODE_READ)
098c879e 4173 ret = seq_lseek(file, offset, whence);
364829b1 4174 else
098c879e
SRRH
4175 file->f_pos = ret = 0;
4176
4177 return ret;
364829b1
SP
4178}
4179
5e2336a0 4180static const struct file_operations tracing_fops = {
4bf39a94
IM
4181 .open = tracing_open,
4182 .read = seq_read,
4acd4d00 4183 .write = tracing_write_stub,
098c879e 4184 .llseek = tracing_lseek,
4bf39a94 4185 .release = tracing_release,
bc0c38d1
SR
4186};
4187
5e2336a0 4188static const struct file_operations show_traces_fops = {
c7078de1
IM
4189 .open = show_traces_open,
4190 .read = seq_read,
4191 .release = seq_release,
b444786f 4192 .llseek = seq_lseek,
c7078de1
IM
4193};
4194
4195static ssize_t
4196tracing_cpumask_read(struct file *filp, char __user *ubuf,
4197 size_t count, loff_t *ppos)
4198{
ccfe9e42 4199 struct trace_array *tr = file_inode(filp)->i_private;
90e406f9 4200 char *mask_str;
36dfe925 4201 int len;
c7078de1 4202
90e406f9
CD
4203 len = snprintf(NULL, 0, "%*pb\n",
4204 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4205 mask_str = kmalloc(len, GFP_KERNEL);
4206 if (!mask_str)
4207 return -ENOMEM;
36dfe925 4208
90e406f9 4209 len = snprintf(mask_str, len, "%*pb\n",
1a40243b
TH
4210 cpumask_pr_args(tr->tracing_cpumask));
4211 if (len >= count) {
36dfe925
IM
4212 count = -EINVAL;
4213 goto out_err;
4214 }
90e406f9 4215 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
36dfe925
IM
4216
4217out_err:
90e406f9 4218 kfree(mask_str);
c7078de1
IM
4219
4220 return count;
4221}
4222
4223static ssize_t
4224tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4225 size_t count, loff_t *ppos)
4226{
ccfe9e42 4227 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4228 cpumask_var_t tracing_cpumask_new;
2b6080f2 4229 int err, cpu;
9e01c1b7
RR
4230
4231 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4232 return -ENOMEM;
c7078de1 4233
9e01c1b7 4234 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4235 if (err)
36dfe925
IM
4236 goto err_unlock;
4237
a5e25883 4238 local_irq_disable();
0b9b12c1 4239 arch_spin_lock(&tr->max_lock);
ab46428c 4240 for_each_tracing_cpu(cpu) {
36dfe925
IM
4241 /*
4242 * Increase/decrease the disabled counter if we are
4243 * about to flip a bit in the cpumask:
4244 */
ccfe9e42 4245 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4246 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4247 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4248 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4249 }
ccfe9e42 4250 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4251 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4252 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4253 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4254 }
4255 }
0b9b12c1 4256 arch_spin_unlock(&tr->max_lock);
a5e25883 4257 local_irq_enable();
36dfe925 4258
ccfe9e42 4259 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
9e01c1b7 4260 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4261
4262 return count;
36dfe925
IM
4263
4264err_unlock:
215368e8 4265 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4266
4267 return err;
c7078de1
IM
4268}
4269
5e2336a0 4270static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4271 .open = tracing_open_generic_tr,
c7078de1
IM
4272 .read = tracing_cpumask_read,
4273 .write = tracing_cpumask_write,
ccfe9e42 4274 .release = tracing_release_generic_tr,
b444786f 4275 .llseek = generic_file_llseek,
bc0c38d1
SR
4276};
4277
fdb372ed 4278static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4279{
d8e83d26 4280 struct tracer_opt *trace_opts;
2b6080f2 4281 struct trace_array *tr = m->private;
d8e83d26 4282 u32 tracer_flags;
d8e83d26 4283 int i;
adf9f195 4284
d8e83d26 4285 mutex_lock(&trace_types_lock);
2b6080f2
SR
4286 tracer_flags = tr->current_trace->flags->val;
4287 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4288
bc0c38d1 4289 for (i = 0; trace_options[i]; i++) {
983f938a 4290 if (tr->trace_flags & (1 << i))
fdb372ed 4291 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4292 else
fdb372ed 4293 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4294 }
4295
adf9f195
FW
4296 for (i = 0; trace_opts[i].name; i++) {
4297 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4298 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4299 else
fdb372ed 4300 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4301 }
d8e83d26 4302 mutex_unlock(&trace_types_lock);
adf9f195 4303
fdb372ed 4304 return 0;
bc0c38d1 4305}
bc0c38d1 4306
8c1a49ae 4307static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4308 struct tracer_flags *tracer_flags,
4309 struct tracer_opt *opts, int neg)
4310{
d39cdd20 4311 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4312 int ret;
bc0c38d1 4313
8c1a49ae 4314 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4315 if (ret)
4316 return ret;
4317
4318 if (neg)
4319 tracer_flags->val &= ~opts->bit;
4320 else
4321 tracer_flags->val |= opts->bit;
4322 return 0;
bc0c38d1
SR
4323}
4324
adf9f195 4325/* Try to assign a tracer specific option */
8c1a49ae 4326static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4327{
8c1a49ae 4328 struct tracer *trace = tr->current_trace;
7770841e 4329 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4330 struct tracer_opt *opts = NULL;
8d18eaaf 4331 int i;
adf9f195 4332
7770841e
Z
4333 for (i = 0; tracer_flags->opts[i].name; i++) {
4334 opts = &tracer_flags->opts[i];
adf9f195 4335
8d18eaaf 4336 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4337 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4338 }
adf9f195 4339
8d18eaaf 4340 return -EINVAL;
adf9f195
FW
4341}
4342
613f04a0
SRRH
4343/* Some tracers require overwrite to stay enabled */
4344int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4345{
4346 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4347 return -1;
4348
4349 return 0;
4350}
4351
2b6080f2 4352int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4353{
4354 /* do nothing if flag is already set */
983f938a 4355 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4356 return 0;
4357
4358 /* Give the tracer a chance to approve the change */
2b6080f2 4359 if (tr->current_trace->flag_changed)
bf6065b5 4360 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4361 return -EINVAL;
af4617bd
SR
4362
4363 if (enabled)
983f938a 4364 tr->trace_flags |= mask;
af4617bd 4365 else
983f938a 4366 tr->trace_flags &= ~mask;
e870e9a1
LZ
4367
4368 if (mask == TRACE_ITER_RECORD_CMD)
4369 trace_event_enable_cmd_record(enabled);
750912fa 4370
d914ba37
JF
4371 if (mask == TRACE_ITER_RECORD_TGID) {
4372 if (!tgid_map)
6396bb22
KC
4373 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4374 sizeof(*tgid_map),
d914ba37
JF
4375 GFP_KERNEL);
4376 if (!tgid_map) {
4377 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4378 return -ENOMEM;
4379 }
4380
4381 trace_event_enable_tgid_record(enabled);
4382 }
4383
c37775d5
SR
4384 if (mask == TRACE_ITER_EVENT_FORK)
4385 trace_event_follow_fork(tr, enabled);
4386
1e10486f
NK
4387 if (mask == TRACE_ITER_FUNC_FORK)
4388 ftrace_pid_follow_fork(tr, enabled);
4389
80902822 4390 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4391 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4392#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4393 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4394#endif
4395 }
81698831 4396
b9f9108c 4397 if (mask == TRACE_ITER_PRINTK) {
81698831 4398 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4399 trace_printk_control(enabled);
4400 }
613f04a0
SRRH
4401
4402 return 0;
af4617bd
SR
4403}
4404
2b6080f2 4405static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4406{
8d18eaaf 4407 char *cmp;
bc0c38d1 4408 int neg = 0;
591a033d 4409 int ret;
a4d1e688 4410 size_t orig_len = strlen(option);
bc0c38d1 4411
7bcfaf54 4412 cmp = strstrip(option);
bc0c38d1 4413
b6b27355 4414 if (str_has_prefix(cmp, "no")) {
bc0c38d1
SR
4415 neg = 1;
4416 cmp += 2;
4417 }
4418
69d34da2
SRRH
4419 mutex_lock(&trace_types_lock);
4420
591a033d 4421 ret = match_string(trace_options, -1, cmp);
adf9f195 4422 /* If no option could be set, test the specific tracer options */
591a033d 4423 if (ret < 0)
8c1a49ae 4424 ret = set_tracer_option(tr, cmp, neg);
591a033d
YX
4425 else
4426 ret = set_tracer_flag(tr, 1 << ret, !neg);
69d34da2
SRRH
4427
4428 mutex_unlock(&trace_types_lock);
bc0c38d1 4429
a4d1e688
JW
4430 /*
4431 * If the first trailing whitespace is replaced with '\0' by strstrip,
4432 * turn it back into a space.
4433 */
4434 if (orig_len > strlen(option))
4435 option[strlen(option)] = ' ';
4436
7bcfaf54
SR
4437 return ret;
4438}
4439
a4d1e688
JW
4440static void __init apply_trace_boot_options(void)
4441{
4442 char *buf = trace_boot_options_buf;
4443 char *option;
4444
4445 while (true) {
4446 option = strsep(&buf, ",");
4447
4448 if (!option)
4449 break;
a4d1e688 4450
43ed3843
SRRH
4451 if (*option)
4452 trace_set_options(&global_trace, option);
a4d1e688
JW
4453
4454 /* Put back the comma to allow this to be called again */
4455 if (buf)
4456 *(buf - 1) = ',';
4457 }
4458}
4459
7bcfaf54
SR
4460static ssize_t
4461tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4462 size_t cnt, loff_t *ppos)
4463{
2b6080f2
SR
4464 struct seq_file *m = filp->private_data;
4465 struct trace_array *tr = m->private;
7bcfaf54 4466 char buf[64];
613f04a0 4467 int ret;
7bcfaf54
SR
4468
4469 if (cnt >= sizeof(buf))
4470 return -EINVAL;
4471
4afe6495 4472 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4473 return -EFAULT;
4474
a8dd2176
SR
4475 buf[cnt] = 0;
4476
2b6080f2 4477 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4478 if (ret < 0)
4479 return ret;
7bcfaf54 4480
cf8517cf 4481 *ppos += cnt;
bc0c38d1
SR
4482
4483 return cnt;
4484}
4485
fdb372ed
LZ
4486static int tracing_trace_options_open(struct inode *inode, struct file *file)
4487{
7b85af63 4488 struct trace_array *tr = inode->i_private;
f77d09a3 4489 int ret;
7b85af63 4490
fdb372ed
LZ
4491 if (tracing_disabled)
4492 return -ENODEV;
2b6080f2 4493
7b85af63
SRRH
4494 if (trace_array_get(tr) < 0)
4495 return -ENODEV;
4496
f77d09a3
AL
4497 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4498 if (ret < 0)
4499 trace_array_put(tr);
4500
4501 return ret;
fdb372ed
LZ
4502}
4503
5e2336a0 4504static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4505 .open = tracing_trace_options_open,
4506 .read = seq_read,
4507 .llseek = seq_lseek,
7b85af63 4508 .release = tracing_single_release_tr,
ee6bce52 4509 .write = tracing_trace_options_write,
bc0c38d1
SR
4510};
4511
7bd2f24c
IM
4512static const char readme_msg[] =
4513 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4514 "# echo 0 > tracing_on : quick way to disable tracing\n"
4515 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4516 " Important files:\n"
4517 " trace\t\t\t- The static contents of the buffer\n"
4518 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4519 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4520 " current_tracer\t- function and latency tracers\n"
4521 " available_tracers\t- list of configured tracers for current_tracer\n"
4522 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4523 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4524 " trace_clock\t\t-change the clock used to order events\n"
4525 " local: Per cpu clock but may not be synced across CPUs\n"
4526 " global: Synced across CPUs but slows tracing down.\n"
4527 " counter: Not a clock, but just an increment\n"
4528 " uptime: Jiffy counter from time of boot\n"
4529 " perf: Same clock that perf events use\n"
4530#ifdef CONFIG_X86_64
4531 " x86-tsc: TSC cycle counter\n"
4532#endif
2c1ea60b
TZ
4533 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4534 " delta: Delta difference against a buffer-wide timestamp\n"
4535 " absolute: Absolute (standalone) timestamp\n"
22f45649 4536 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4537 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4538 " tracing_cpumask\t- Limit which CPUs to trace\n"
4539 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4540 "\t\t\t Remove sub-buffer with rmdir\n"
4541 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4542 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4543 "\t\t\t option name\n"
939c7a4f 4544 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4545#ifdef CONFIG_DYNAMIC_FTRACE
4546 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4547 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4548 "\t\t\t functions\n"
60f1d5e3 4549 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4550 "\t modules: Can select a group via module\n"
4551 "\t Format: :mod:<module-name>\n"
4552 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4553 "\t triggers: a command to perform when function is hit\n"
4554 "\t Format: <function>:<trigger>[:count]\n"
4555 "\t trigger: traceon, traceoff\n"
4556 "\t\t enable_event:<system>:<event>\n"
4557 "\t\t disable_event:<system>:<event>\n"
22f45649 4558#ifdef CONFIG_STACKTRACE
71485c45 4559 "\t\t stacktrace\n"
22f45649
SRRH
4560#endif
4561#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4562 "\t\t snapshot\n"
22f45649 4563#endif
17a280ea
SRRH
4564 "\t\t dump\n"
4565 "\t\t cpudump\n"
71485c45
SRRH
4566 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4567 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4568 "\t The first one will disable tracing every time do_fault is hit\n"
4569 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4570 "\t The first time do trap is hit and it disables tracing, the\n"
4571 "\t counter will decrement to 2. If tracing is already disabled,\n"
4572 "\t the counter will not decrement. It only decrements when the\n"
4573 "\t trigger did work\n"
4574 "\t To remove trigger without count:\n"
4575 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4576 "\t To remove trigger with a count:\n"
4577 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4578 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4579 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4580 "\t modules: Can select a group via module command :mod:\n"
4581 "\t Does not accept triggers\n"
22f45649
SRRH
4582#endif /* CONFIG_DYNAMIC_FTRACE */
4583#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4584 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4585 "\t\t (function)\n"
22f45649
SRRH
4586#endif
4587#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4588 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4589 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4590 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4591#endif
4592#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4593 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4594 "\t\t\t snapshot buffer. Read the contents for more\n"
4595 "\t\t\t information\n"
22f45649 4596#endif
991821c8 4597#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4598 " stack_trace\t\t- Shows the max stack trace when active\n"
4599 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4600 "\t\t\t Write into this file to reset the max size (trigger a\n"
4601 "\t\t\t new trace)\n"
22f45649 4602#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4603 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4604 "\t\t\t traces\n"
22f45649 4605#endif
991821c8 4606#endif /* CONFIG_STACK_TRACER */
5448d44c
MH
4607#ifdef CONFIG_DYNAMIC_EVENTS
4608 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4609 "\t\t\t Write into this file to define/undefine new trace events.\n"
4610#endif
6b0b7551 4611#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4612 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4613 "\t\t\t Write into this file to define/undefine new trace events.\n"
4614#endif
6b0b7551 4615#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4616 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4617 "\t\t\t Write into this file to define/undefine new trace events.\n"
4618#endif
6b0b7551 4619#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625 4620 "\t accepts: event-definitions (one definition per line)\n"
c3ca46ef
MH
4621 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4622 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
7bbab38d
MH
4623#ifdef CONFIG_HIST_TRIGGERS
4624 "\t s:[synthetic/]<event> <field> [<field>]\n"
4625#endif
86425625 4626 "\t -:[<group>/]<event>\n"
6b0b7551 4627#ifdef CONFIG_KPROBE_EVENTS
86425625 4628 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4629 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4630#endif
6b0b7551 4631#ifdef CONFIG_UPROBE_EVENTS
1cc33161 4632 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
86425625
MH
4633#endif
4634 "\t args: <name>=fetcharg[:type]\n"
4635 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
a1303af5
MH
4636#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4637 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4638#else
86425625 4639 "\t $stack<index>, $stack, $retval, $comm\n"
a1303af5 4640#endif
60c2e0ce 4641 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
40b53b77
MH
4642 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4643 "\t <type>\\[<array-size>\\]\n"
7bbab38d
MH
4644#ifdef CONFIG_HIST_TRIGGERS
4645 "\t field: <stype> <name>;\n"
4646 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4647 "\t [unsigned] char/int/long\n"
4648#endif
86425625 4649#endif
26f25564
TZ
4650 " events/\t\t- Directory containing all trace event subsystems:\n"
4651 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4652 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4653 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4654 "\t\t\t events\n"
26f25564 4655 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4656 " events/<system>/<event>/\t- Directory containing control files for\n"
4657 "\t\t\t <event>:\n"
26f25564
TZ
4658 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4659 " filter\t\t- If set, only events passing filter are traced\n"
4660 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4661 "\t Format: <trigger>[:count][if <filter>]\n"
4662 "\t trigger: traceon, traceoff\n"
4663 "\t enable_event:<system>:<event>\n"
4664 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4665#ifdef CONFIG_HIST_TRIGGERS
4666 "\t enable_hist:<system>:<event>\n"
4667 "\t disable_hist:<system>:<event>\n"
4668#endif
26f25564 4669#ifdef CONFIG_STACKTRACE
71485c45 4670 "\t\t stacktrace\n"
26f25564
TZ
4671#endif
4672#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4673 "\t\t snapshot\n"
7ef224d1
TZ
4674#endif
4675#ifdef CONFIG_HIST_TRIGGERS
4676 "\t\t hist (see below)\n"
26f25564 4677#endif
71485c45
SRRH
4678 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4679 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4680 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4681 "\t events/block/block_unplug/trigger\n"
4682 "\t The first disables tracing every time block_unplug is hit.\n"
4683 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4684 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4685 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4686 "\t Like function triggers, the counter is only decremented if it\n"
4687 "\t enabled or disabled tracing.\n"
4688 "\t To remove a trigger without a count:\n"
4689 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4690 "\t To remove a trigger with a count:\n"
4691 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4692 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4693#ifdef CONFIG_HIST_TRIGGERS
4694 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4695 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4696 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4697 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4698 "\t [:size=#entries]\n"
e86ae9ba 4699 "\t [:pause][:continue][:clear]\n"
5463bfda 4700 "\t [:name=histname1]\n"
7ef224d1
TZ
4701 "\t [if <filter>]\n\n"
4702 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4703 "\t table using the key(s) and value(s) named, and the value of a\n"
4704 "\t sum called 'hitcount' is incremented. Keys and values\n"
4705 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4706 "\t can be any field, or the special string 'stacktrace'.\n"
4707 "\t Compound keys consisting of up to two fields can be specified\n"
4708 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4709 "\t fields. Sort keys consisting of up to two fields can be\n"
4710 "\t specified using the 'sort' keyword. The sort direction can\n"
4711 "\t be modified by appending '.descending' or '.ascending' to a\n"
4712 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4713 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4714 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4715 "\t its histogram data will be shared with other triggers of the\n"
4716 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4717 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4718 "\t table in its entirety to stdout. If there are multiple hist\n"
4719 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4720 "\t trigger in the output. The table displayed for a named\n"
4721 "\t trigger will be the same as any other instance having the\n"
4722 "\t same name. The default format used to display a given field\n"
4723 "\t can be modified by appending any of the following modifiers\n"
4724 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4725 "\t .hex display a number as a hex value\n"
4726 "\t .sym display an address as a symbol\n"
6b4827ad 4727 "\t .sym-offset display an address as a symbol and offset\n"
31696198 4728 "\t .execname display a common_pid as a program name\n"
860f9f6b
TZ
4729 "\t .syscall display a syscall id as a syscall name\n"
4730 "\t .log2 display log2 value rather than raw number\n"
4731 "\t .usecs display a common_timestamp in microseconds\n\n"
83e99914
TZ
4732 "\t The 'pause' parameter can be used to pause an existing hist\n"
4733 "\t trigger or to start a hist trigger but not log any events\n"
4734 "\t until told to do so. 'continue' can be used to start or\n"
4735 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4736 "\t The 'clear' parameter will clear the contents of a running\n"
4737 "\t hist trigger and leave its current paused/active state\n"
4738 "\t unchanged.\n\n"
d0bad49b
TZ
4739 "\t The enable_hist and disable_hist triggers can be used to\n"
4740 "\t have one event conditionally start and stop another event's\n"
4741 "\t already-attached hist trigger. The syntax is analagous to\n"
4742 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4743#endif
7bd2f24c
IM
4744;
4745
4746static ssize_t
4747tracing_readme_read(struct file *filp, char __user *ubuf,
4748 size_t cnt, loff_t *ppos)
4749{
4750 return simple_read_from_buffer(ubuf, cnt, ppos,
4751 readme_msg, strlen(readme_msg));
4752}
4753
5e2336a0 4754static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4755 .open = tracing_open_generic,
4756 .read = tracing_readme_read,
b444786f 4757 .llseek = generic_file_llseek,
7bd2f24c
IM
4758};
4759
99c621d7
MS
4760static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4761{
4762 int *ptr = v;
4763
4764 if (*pos || m->count)
4765 ptr++;
4766
4767 (*pos)++;
4768
4769 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4770 if (trace_find_tgid(*ptr))
4771 return ptr;
4772 }
4773
4774 return NULL;
4775}
4776
4777static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4778{
4779 void *v;
4780 loff_t l = 0;
4781
4782 if (!tgid_map)
4783 return NULL;
4784
4785 v = &tgid_map[0];
4786 while (l <= *pos) {
4787 v = saved_tgids_next(m, v, &l);
4788 if (!v)
4789 return NULL;
4790 }
4791
4792 return v;
4793}
4794
4795static void saved_tgids_stop(struct seq_file *m, void *v)
4796{
4797}
4798
4799static int saved_tgids_show(struct seq_file *m, void *v)
4800{
4801 int pid = (int *)v - tgid_map;
4802
4803 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4804 return 0;
4805}
4806
4807static const struct seq_operations tracing_saved_tgids_seq_ops = {
4808 .start = saved_tgids_start,
4809 .stop = saved_tgids_stop,
4810 .next = saved_tgids_next,
4811 .show = saved_tgids_show,
4812};
4813
4814static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4815{
4816 if (tracing_disabled)
4817 return -ENODEV;
4818
4819 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4820}
4821
4822
4823static const struct file_operations tracing_saved_tgids_fops = {
4824 .open = tracing_saved_tgids_open,
4825 .read = seq_read,
4826 .llseek = seq_lseek,
4827 .release = seq_release,
4828};
4829
42584c81
YY
4830static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4831{
4832 unsigned int *ptr = v;
69abe6a5 4833
42584c81
YY
4834 if (*pos || m->count)
4835 ptr++;
69abe6a5 4836
42584c81 4837 (*pos)++;
69abe6a5 4838
939c7a4f
YY
4839 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4840 ptr++) {
42584c81
YY
4841 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4842 continue;
69abe6a5 4843
42584c81
YY
4844 return ptr;
4845 }
69abe6a5 4846
42584c81
YY
4847 return NULL;
4848}
4849
4850static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4851{
4852 void *v;
4853 loff_t l = 0;
69abe6a5 4854
4c27e756
SRRH
4855 preempt_disable();
4856 arch_spin_lock(&trace_cmdline_lock);
4857
939c7a4f 4858 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4859 while (l <= *pos) {
4860 v = saved_cmdlines_next(m, v, &l);
4861 if (!v)
4862 return NULL;
69abe6a5
AP
4863 }
4864
42584c81
YY
4865 return v;
4866}
4867
4868static void saved_cmdlines_stop(struct seq_file *m, void *v)
4869{
4c27e756
SRRH
4870 arch_spin_unlock(&trace_cmdline_lock);
4871 preempt_enable();
42584c81 4872}
69abe6a5 4873
42584c81
YY
4874static int saved_cmdlines_show(struct seq_file *m, void *v)
4875{
4876 char buf[TASK_COMM_LEN];
4877 unsigned int *pid = v;
69abe6a5 4878
4c27e756 4879 __trace_find_cmdline(*pid, buf);
42584c81
YY
4880 seq_printf(m, "%d %s\n", *pid, buf);
4881 return 0;
4882}
4883
4884static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4885 .start = saved_cmdlines_start,
4886 .next = saved_cmdlines_next,
4887 .stop = saved_cmdlines_stop,
4888 .show = saved_cmdlines_show,
4889};
4890
4891static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4892{
4893 if (tracing_disabled)
4894 return -ENODEV;
4895
4896 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4897}
4898
4899static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4900 .open = tracing_saved_cmdlines_open,
4901 .read = seq_read,
4902 .llseek = seq_lseek,
4903 .release = seq_release,
69abe6a5
AP
4904};
4905
939c7a4f
YY
4906static ssize_t
4907tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4908 size_t cnt, loff_t *ppos)
4909{
4910 char buf[64];
4911 int r;
4912
4913 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4914 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4915 arch_spin_unlock(&trace_cmdline_lock);
4916
4917 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4918}
4919
4920static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4921{
4922 kfree(s->saved_cmdlines);
4923 kfree(s->map_cmdline_to_pid);
4924 kfree(s);
4925}
4926
4927static int tracing_resize_saved_cmdlines(unsigned int val)
4928{
4929 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4930
a6af8fbf 4931 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4932 if (!s)
4933 return -ENOMEM;
4934
4935 if (allocate_cmdlines_buffer(val, s) < 0) {
4936 kfree(s);
4937 return -ENOMEM;
4938 }
4939
4940 arch_spin_lock(&trace_cmdline_lock);
4941 savedcmd_temp = savedcmd;
4942 savedcmd = s;
4943 arch_spin_unlock(&trace_cmdline_lock);
4944 free_saved_cmdlines_buffer(savedcmd_temp);
4945
4946 return 0;
4947}
4948
4949static ssize_t
4950tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4951 size_t cnt, loff_t *ppos)
4952{
4953 unsigned long val;
4954 int ret;
4955
4956 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4957 if (ret)
4958 return ret;
4959
4960 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4961 if (!val || val > PID_MAX_DEFAULT)
4962 return -EINVAL;
4963
4964 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4965 if (ret < 0)
4966 return ret;
4967
4968 *ppos += cnt;
4969
4970 return cnt;
4971}
4972
4973static const struct file_operations tracing_saved_cmdlines_size_fops = {
4974 .open = tracing_open_generic,
4975 .read = tracing_saved_cmdlines_size_read,
4976 .write = tracing_saved_cmdlines_size_write,
4977};
4978
681bec03 4979#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 4980static union trace_eval_map_item *
f57a4143 4981update_eval_map(union trace_eval_map_item *ptr)
9828413d 4982{
00f4b652 4983 if (!ptr->map.eval_string) {
9828413d
SRRH
4984 if (ptr->tail.next) {
4985 ptr = ptr->tail.next;
4986 /* Set ptr to the next real item (skip head) */
4987 ptr++;
4988 } else
4989 return NULL;
4990 }
4991 return ptr;
4992}
4993
f57a4143 4994static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 4995{
23bf8cb8 4996 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4997
4998 /*
4999 * Paranoid! If ptr points to end, we don't want to increment past it.
5000 * This really should never happen.
5001 */
f57a4143 5002 ptr = update_eval_map(ptr);
9828413d
SRRH
5003 if (WARN_ON_ONCE(!ptr))
5004 return NULL;
5005
5006 ptr++;
5007
5008 (*pos)++;
5009
f57a4143 5010 ptr = update_eval_map(ptr);
9828413d
SRRH
5011
5012 return ptr;
5013}
5014
f57a4143 5015static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 5016{
23bf8cb8 5017 union trace_eval_map_item *v;
9828413d
SRRH
5018 loff_t l = 0;
5019
1793ed93 5020 mutex_lock(&trace_eval_mutex);
9828413d 5021
23bf8cb8 5022 v = trace_eval_maps;
9828413d
SRRH
5023 if (v)
5024 v++;
5025
5026 while (v && l < *pos) {
f57a4143 5027 v = eval_map_next(m, v, &l);
9828413d
SRRH
5028 }
5029
5030 return v;
5031}
5032
f57a4143 5033static void eval_map_stop(struct seq_file *m, void *v)
9828413d 5034{
1793ed93 5035 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5036}
5037
f57a4143 5038static int eval_map_show(struct seq_file *m, void *v)
9828413d 5039{
23bf8cb8 5040 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5041
5042 seq_printf(m, "%s %ld (%s)\n",
00f4b652 5043 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
5044 ptr->map.system);
5045
5046 return 0;
5047}
5048
f57a4143
JL
5049static const struct seq_operations tracing_eval_map_seq_ops = {
5050 .start = eval_map_start,
5051 .next = eval_map_next,
5052 .stop = eval_map_stop,
5053 .show = eval_map_show,
9828413d
SRRH
5054};
5055
f57a4143 5056static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
5057{
5058 if (tracing_disabled)
5059 return -ENODEV;
5060
f57a4143 5061 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
5062}
5063
f57a4143
JL
5064static const struct file_operations tracing_eval_map_fops = {
5065 .open = tracing_eval_map_open,
9828413d
SRRH
5066 .read = seq_read,
5067 .llseek = seq_lseek,
5068 .release = seq_release,
5069};
5070
23bf8cb8 5071static inline union trace_eval_map_item *
5f60b351 5072trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
5073{
5074 /* Return tail of array given the head */
5075 return ptr + ptr->head.length + 1;
5076}
5077
5078static void
f57a4143 5079trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
5080 int len)
5081{
00f4b652
JL
5082 struct trace_eval_map **stop;
5083 struct trace_eval_map **map;
23bf8cb8
JL
5084 union trace_eval_map_item *map_array;
5085 union trace_eval_map_item *ptr;
9828413d
SRRH
5086
5087 stop = start + len;
5088
5089 /*
23bf8cb8 5090 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
5091 * where the head holds the module and length of array, and the
5092 * tail holds a pointer to the next list.
5093 */
6da2ec56 5094 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
9828413d 5095 if (!map_array) {
f57a4143 5096 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
5097 return;
5098 }
5099
1793ed93 5100 mutex_lock(&trace_eval_mutex);
9828413d 5101
23bf8cb8
JL
5102 if (!trace_eval_maps)
5103 trace_eval_maps = map_array;
9828413d 5104 else {
23bf8cb8 5105 ptr = trace_eval_maps;
9828413d 5106 for (;;) {
5f60b351 5107 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
5108 if (!ptr->tail.next)
5109 break;
5110 ptr = ptr->tail.next;
5111
5112 }
5113 ptr->tail.next = map_array;
5114 }
5115 map_array->head.mod = mod;
5116 map_array->head.length = len;
5117 map_array++;
5118
5119 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5120 map_array->map = **map;
5121 map_array++;
5122 }
5123 memset(map_array, 0, sizeof(*map_array));
5124
1793ed93 5125 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5126}
5127
f57a4143 5128static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 5129{
681bec03 5130 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 5131 NULL, &tracing_eval_map_fops);
9828413d
SRRH
5132}
5133
681bec03 5134#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
5135static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5136static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 5137 struct trace_eval_map **start, int len) { }
681bec03 5138#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 5139
f57a4143 5140static void trace_insert_eval_map(struct module *mod,
00f4b652 5141 struct trace_eval_map **start, int len)
0c564a53 5142{
00f4b652 5143 struct trace_eval_map **map;
0c564a53
SRRH
5144
5145 if (len <= 0)
5146 return;
5147
5148 map = start;
5149
f57a4143 5150 trace_event_eval_update(map, len);
9828413d 5151
f57a4143 5152 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
5153}
5154
bc0c38d1
SR
5155static ssize_t
5156tracing_set_trace_read(struct file *filp, char __user *ubuf,
5157 size_t cnt, loff_t *ppos)
5158{
2b6080f2 5159 struct trace_array *tr = filp->private_data;
ee6c2c1b 5160 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
5161 int r;
5162
5163 mutex_lock(&trace_types_lock);
2b6080f2 5164 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
5165 mutex_unlock(&trace_types_lock);
5166
4bf39a94 5167 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5168}
5169
b6f11df2
ACM
5170int tracer_init(struct tracer *t, struct trace_array *tr)
5171{
12883efb 5172 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
5173 return t->init(tr);
5174}
5175
12883efb 5176static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
5177{
5178 int cpu;
737223fb 5179
438ced17 5180 for_each_tracing_cpu(cpu)
12883efb 5181 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
5182}
5183
12883efb 5184#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 5185/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
5186static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5187 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
5188{
5189 int cpu, ret = 0;
5190
5191 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5192 for_each_tracing_cpu(cpu) {
12883efb
SRRH
5193 ret = ring_buffer_resize(trace_buf->buffer,
5194 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
5195 if (ret < 0)
5196 break;
12883efb
SRRH
5197 per_cpu_ptr(trace_buf->data, cpu)->entries =
5198 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
5199 }
5200 } else {
12883efb
SRRH
5201 ret = ring_buffer_resize(trace_buf->buffer,
5202 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 5203 if (ret == 0)
12883efb
SRRH
5204 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5205 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
5206 }
5207
5208 return ret;
5209}
12883efb 5210#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 5211
2b6080f2
SR
5212static int __tracing_resize_ring_buffer(struct trace_array *tr,
5213 unsigned long size, int cpu)
73c5162a
SR
5214{
5215 int ret;
5216
5217 /*
5218 * If kernel or user changes the size of the ring buffer
a123c52b
SR
5219 * we use the size that was given, and we can forget about
5220 * expanding it later.
73c5162a 5221 */
55034cd6 5222 ring_buffer_expanded = true;
73c5162a 5223
b382ede6 5224 /* May be called before buffers are initialized */
12883efb 5225 if (!tr->trace_buffer.buffer)
b382ede6
SR
5226 return 0;
5227
12883efb 5228 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
5229 if (ret < 0)
5230 return ret;
5231
12883efb 5232#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5233 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5234 !tr->current_trace->use_max_tr)
ef710e10
KM
5235 goto out;
5236
12883efb 5237 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5238 if (ret < 0) {
12883efb
SRRH
5239 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5240 &tr->trace_buffer, cpu);
73c5162a 5241 if (r < 0) {
a123c52b
SR
5242 /*
5243 * AARGH! We are left with different
5244 * size max buffer!!!!
5245 * The max buffer is our "snapshot" buffer.
5246 * When a tracer needs a snapshot (one of the
5247 * latency tracers), it swaps the max buffer
5248 * with the saved snap shot. We succeeded to
5249 * update the size of the main buffer, but failed to
5250 * update the size of the max buffer. But when we tried
5251 * to reset the main buffer to the original size, we
5252 * failed there too. This is very unlikely to
5253 * happen, but if it does, warn and kill all
5254 * tracing.
5255 */
73c5162a
SR
5256 WARN_ON(1);
5257 tracing_disabled = 1;
5258 }
5259 return ret;
5260 }
5261
438ced17 5262 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5263 set_buffer_entries(&tr->max_buffer, size);
438ced17 5264 else
12883efb 5265 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5266
ef710e10 5267 out:
12883efb
SRRH
5268#endif /* CONFIG_TRACER_MAX_TRACE */
5269
438ced17 5270 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5271 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5272 else
12883efb 5273 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5274
5275 return ret;
5276}
5277
2b6080f2
SR
5278static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5279 unsigned long size, int cpu_id)
4f271a2a 5280{
83f40318 5281 int ret = size;
4f271a2a
VN
5282
5283 mutex_lock(&trace_types_lock);
5284
438ced17
VN
5285 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5286 /* make sure, this cpu is enabled in the mask */
5287 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5288 ret = -EINVAL;
5289 goto out;
5290 }
5291 }
4f271a2a 5292
2b6080f2 5293 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5294 if (ret < 0)
5295 ret = -ENOMEM;
5296
438ced17 5297out:
4f271a2a
VN
5298 mutex_unlock(&trace_types_lock);
5299
5300 return ret;
5301}
5302
ef710e10 5303
1852fcce
SR
5304/**
5305 * tracing_update_buffers - used by tracing facility to expand ring buffers
5306 *
5307 * To save on memory when the tracing is never used on a system with it
5308 * configured in. The ring buffers are set to a minimum size. But once
5309 * a user starts to use the tracing facility, then they need to grow
5310 * to their default size.
5311 *
5312 * This function is to be called when a tracer is about to be used.
5313 */
5314int tracing_update_buffers(void)
5315{
5316 int ret = 0;
5317
1027fcb2 5318 mutex_lock(&trace_types_lock);
1852fcce 5319 if (!ring_buffer_expanded)
2b6080f2 5320 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5321 RING_BUFFER_ALL_CPUS);
1027fcb2 5322 mutex_unlock(&trace_types_lock);
1852fcce
SR
5323
5324 return ret;
5325}
5326
577b785f
SR
5327struct trace_option_dentry;
5328
37aea98b 5329static void
2b6080f2 5330create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5331
6b450d25
SRRH
5332/*
5333 * Used to clear out the tracer before deletion of an instance.
5334 * Must have trace_types_lock held.
5335 */
5336static void tracing_set_nop(struct trace_array *tr)
5337{
5338 if (tr->current_trace == &nop_trace)
5339 return;
5340
50512ab5 5341 tr->current_trace->enabled--;
6b450d25
SRRH
5342
5343 if (tr->current_trace->reset)
5344 tr->current_trace->reset(tr);
5345
5346 tr->current_trace = &nop_trace;
5347}
5348
41d9c0be 5349static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5350{
09d23a1d
SRRH
5351 /* Only enable if the directory has been created already. */
5352 if (!tr->dir)
5353 return;
5354
37aea98b 5355 create_trace_option_files(tr, t);
09d23a1d
SRRH
5356}
5357
5358static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5359{
bc0c38d1 5360 struct tracer *t;
12883efb 5361#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5362 bool had_max_tr;
12883efb 5363#endif
d9e54076 5364 int ret = 0;
bc0c38d1 5365
1027fcb2
SR
5366 mutex_lock(&trace_types_lock);
5367
73c5162a 5368 if (!ring_buffer_expanded) {
2b6080f2 5369 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5370 RING_BUFFER_ALL_CPUS);
73c5162a 5371 if (ret < 0)
59f586db 5372 goto out;
73c5162a
SR
5373 ret = 0;
5374 }
5375
bc0c38d1
SR
5376 for (t = trace_types; t; t = t->next) {
5377 if (strcmp(t->name, buf) == 0)
5378 break;
5379 }
c2931e05
FW
5380 if (!t) {
5381 ret = -EINVAL;
5382 goto out;
5383 }
2b6080f2 5384 if (t == tr->current_trace)
bc0c38d1
SR
5385 goto out;
5386
c7b3ae0b
ZSZ
5387 /* Some tracers won't work on kernel command line */
5388 if (system_state < SYSTEM_RUNNING && t->noboot) {
5389 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5390 t->name);
5391 goto out;
5392 }
5393
607e2ea1
SRRH
5394 /* Some tracers are only allowed for the top level buffer */
5395 if (!trace_ok_for_array(t, tr)) {
5396 ret = -EINVAL;
5397 goto out;
5398 }
5399
cf6ab6d9
SRRH
5400 /* If trace pipe files are being read, we can't change the tracer */
5401 if (tr->current_trace->ref) {
5402 ret = -EBUSY;
5403 goto out;
5404 }
5405
9f029e83 5406 trace_branch_disable();
613f04a0 5407
50512ab5 5408 tr->current_trace->enabled--;
613f04a0 5409
2b6080f2
SR
5410 if (tr->current_trace->reset)
5411 tr->current_trace->reset(tr);
34600f0e 5412
12883efb 5413 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 5414 tr->current_trace = &nop_trace;
34600f0e 5415
45ad21ca
SRRH
5416#ifdef CONFIG_TRACER_MAX_TRACE
5417 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5418
5419 if (had_max_tr && !t->use_max_tr) {
5420 /*
5421 * We need to make sure that the update_max_tr sees that
5422 * current_trace changed to nop_trace to keep it from
5423 * swapping the buffers after we resize it.
5424 * The update_max_tr is called from interrupts disabled
5425 * so a synchronized_sched() is sufficient.
5426 */
5427 synchronize_sched();
3209cff4 5428 free_snapshot(tr);
ef710e10 5429 }
12883efb 5430#endif
12883efb
SRRH
5431
5432#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5433 if (t->use_max_tr && !had_max_tr) {
2824f503 5434 ret = tracing_alloc_snapshot_instance(tr);
d60da506
HT
5435 if (ret < 0)
5436 goto out;
ef710e10 5437 }
12883efb 5438#endif
577b785f 5439
1c80025a 5440 if (t->init) {
b6f11df2 5441 ret = tracer_init(t, tr);
1c80025a
FW
5442 if (ret)
5443 goto out;
5444 }
bc0c38d1 5445
2b6080f2 5446 tr->current_trace = t;
50512ab5 5447 tr->current_trace->enabled++;
9f029e83 5448 trace_branch_enable(tr);
bc0c38d1
SR
5449 out:
5450 mutex_unlock(&trace_types_lock);
5451
d9e54076
PZ
5452 return ret;
5453}
5454
5455static ssize_t
5456tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5457 size_t cnt, loff_t *ppos)
5458{
607e2ea1 5459 struct trace_array *tr = filp->private_data;
ee6c2c1b 5460 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5461 int i;
5462 size_t ret;
e6e7a65a
FW
5463 int err;
5464
5465 ret = cnt;
d9e54076 5466
ee6c2c1b
LZ
5467 if (cnt > MAX_TRACER_SIZE)
5468 cnt = MAX_TRACER_SIZE;
d9e54076 5469
4afe6495 5470 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5471 return -EFAULT;
5472
5473 buf[cnt] = 0;
5474
5475 /* strip ending whitespace. */
5476 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5477 buf[i] = 0;
5478
607e2ea1 5479 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5480 if (err)
5481 return err;
d9e54076 5482
cf8517cf 5483 *ppos += ret;
bc0c38d1 5484
c2931e05 5485 return ret;
bc0c38d1
SR
5486}
5487
5488static ssize_t
6508fa76
SF
5489tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5490 size_t cnt, loff_t *ppos)
bc0c38d1 5491{
bc0c38d1
SR
5492 char buf[64];
5493 int r;
5494
cffae437 5495 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5496 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5497 if (r > sizeof(buf))
5498 r = sizeof(buf);
4bf39a94 5499 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5500}
5501
5502static ssize_t
6508fa76
SF
5503tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5504 size_t cnt, loff_t *ppos)
bc0c38d1 5505{
5e39841c 5506 unsigned long val;
c6caeeb1 5507 int ret;
bc0c38d1 5508
22fe9b54
PH
5509 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5510 if (ret)
c6caeeb1 5511 return ret;
bc0c38d1
SR
5512
5513 *ptr = val * 1000;
5514
5515 return cnt;
5516}
5517
6508fa76
SF
5518static ssize_t
5519tracing_thresh_read(struct file *filp, char __user *ubuf,
5520 size_t cnt, loff_t *ppos)
5521{
5522 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5523}
5524
5525static ssize_t
5526tracing_thresh_write(struct file *filp, const char __user *ubuf,
5527 size_t cnt, loff_t *ppos)
5528{
5529 struct trace_array *tr = filp->private_data;
5530 int ret;
5531
5532 mutex_lock(&trace_types_lock);
5533 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5534 if (ret < 0)
5535 goto out;
5536
5537 if (tr->current_trace->update_thresh) {
5538 ret = tr->current_trace->update_thresh(tr);
5539 if (ret < 0)
5540 goto out;
5541 }
5542
5543 ret = cnt;
5544out:
5545 mutex_unlock(&trace_types_lock);
5546
5547 return ret;
5548}
5549
f971cc9a 5550#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5551
6508fa76
SF
5552static ssize_t
5553tracing_max_lat_read(struct file *filp, char __user *ubuf,
5554 size_t cnt, loff_t *ppos)
5555{
5556 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5557}
5558
5559static ssize_t
5560tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5561 size_t cnt, loff_t *ppos)
5562{
5563 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5564}
5565
e428abbb
CG
5566#endif
5567
b3806b43
SR
5568static int tracing_open_pipe(struct inode *inode, struct file *filp)
5569{
15544209 5570 struct trace_array *tr = inode->i_private;
b3806b43 5571 struct trace_iterator *iter;
b04cc6b1 5572 int ret = 0;
b3806b43
SR
5573
5574 if (tracing_disabled)
5575 return -ENODEV;
5576
7b85af63
SRRH
5577 if (trace_array_get(tr) < 0)
5578 return -ENODEV;
5579
b04cc6b1
FW
5580 mutex_lock(&trace_types_lock);
5581
b3806b43
SR
5582 /* create a buffer to store the information to pass to userspace */
5583 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5584 if (!iter) {
5585 ret = -ENOMEM;
f77d09a3 5586 __trace_array_put(tr);
b04cc6b1
FW
5587 goto out;
5588 }
b3806b43 5589
3a161d99 5590 trace_seq_init(&iter->seq);
d716ff71 5591 iter->trace = tr->current_trace;
d7350c3f 5592
4462344e 5593 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5594 ret = -ENOMEM;
d7350c3f 5595 goto fail;
4462344e
RR
5596 }
5597
a309720c 5598 /* trace pipe does not show start of buffer */
4462344e 5599 cpumask_setall(iter->started);
a309720c 5600
983f938a 5601 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5602 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5603
8be0709f 5604 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5605 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5606 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5607
15544209
ON
5608 iter->tr = tr;
5609 iter->trace_buffer = &tr->trace_buffer;
5610 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5611 mutex_init(&iter->mutex);
b3806b43
SR
5612 filp->private_data = iter;
5613
107bad8b
SR
5614 if (iter->trace->pipe_open)
5615 iter->trace->pipe_open(iter);
107bad8b 5616
b444786f 5617 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5618
5619 tr->current_trace->ref++;
b04cc6b1
FW
5620out:
5621 mutex_unlock(&trace_types_lock);
5622 return ret;
d7350c3f
FW
5623
5624fail:
5625 kfree(iter->trace);
5626 kfree(iter);
7b85af63 5627 __trace_array_put(tr);
d7350c3f
FW
5628 mutex_unlock(&trace_types_lock);
5629 return ret;
b3806b43
SR
5630}
5631
5632static int tracing_release_pipe(struct inode *inode, struct file *file)
5633{
5634 struct trace_iterator *iter = file->private_data;
15544209 5635 struct trace_array *tr = inode->i_private;
b3806b43 5636
b04cc6b1
FW
5637 mutex_lock(&trace_types_lock);
5638
cf6ab6d9
SRRH
5639 tr->current_trace->ref--;
5640
29bf4a5e 5641 if (iter->trace->pipe_close)
c521efd1
SR
5642 iter->trace->pipe_close(iter);
5643
b04cc6b1
FW
5644 mutex_unlock(&trace_types_lock);
5645
4462344e 5646 free_cpumask_var(iter->started);
d7350c3f 5647 mutex_destroy(&iter->mutex);
b3806b43 5648 kfree(iter);
b3806b43 5649
7b85af63
SRRH
5650 trace_array_put(tr);
5651
b3806b43
SR
5652 return 0;
5653}
5654
9dd95748 5655static __poll_t
cc60cdc9 5656trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5657{
983f938a
SRRH
5658 struct trace_array *tr = iter->tr;
5659
15693458
SRRH
5660 /* Iterators are static, they should be filled or empty */
5661 if (trace_buffer_iter(iter, iter->cpu_file))
a9a08845 5662 return EPOLLIN | EPOLLRDNORM;
2a2cc8f7 5663
983f938a 5664 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5665 /*
5666 * Always select as readable when in blocking mode
5667 */
a9a08845 5668 return EPOLLIN | EPOLLRDNORM;
15693458 5669 else
12883efb 5670 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5671 filp, poll_table);
2a2cc8f7 5672}
2a2cc8f7 5673
9dd95748 5674static __poll_t
cc60cdc9
SR
5675tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5676{
5677 struct trace_iterator *iter = filp->private_data;
5678
5679 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5680}
5681
d716ff71 5682/* Must be called with iter->mutex held. */
ff98781b 5683static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5684{
5685 struct trace_iterator *iter = filp->private_data;
8b8b3683 5686 int ret;
b3806b43 5687
b3806b43 5688 while (trace_empty(iter)) {
2dc8f095 5689
107bad8b 5690 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5691 return -EAGAIN;
107bad8b 5692 }
2dc8f095 5693
b3806b43 5694 /*
250bfd3d 5695 * We block until we read something and tracing is disabled.
b3806b43
SR
5696 * We still block if tracing is disabled, but we have never
5697 * read anything. This allows a user to cat this file, and
5698 * then enable tracing. But after we have read something,
5699 * we give an EOF when tracing is again disabled.
5700 *
5701 * iter->pos will be 0 if we haven't read anything.
5702 */
75df6e68 5703 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
b3806b43 5704 break;
f4874261
SRRH
5705
5706 mutex_unlock(&iter->mutex);
5707
2c2b0a78 5708 ret = wait_on_pipe(iter, 0);
f4874261
SRRH
5709
5710 mutex_lock(&iter->mutex);
5711
8b8b3683
SRRH
5712 if (ret)
5713 return ret;
b3806b43
SR
5714 }
5715
ff98781b
EGM
5716 return 1;
5717}
5718
5719/*
5720 * Consumer reader.
5721 */
5722static ssize_t
5723tracing_read_pipe(struct file *filp, char __user *ubuf,
5724 size_t cnt, loff_t *ppos)
5725{
5726 struct trace_iterator *iter = filp->private_data;
5727 ssize_t sret;
5728
d7350c3f
FW
5729 /*
5730 * Avoid more than one consumer on a single file descriptor
5731 * This is just a matter of traces coherency, the ring buffer itself
5732 * is protected.
5733 */
5734 mutex_lock(&iter->mutex);
1245800c
SRRH
5735
5736 /* return any leftover data */
5737 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5738 if (sret != -EBUSY)
5739 goto out;
5740
5741 trace_seq_init(&iter->seq);
5742
ff98781b
EGM
5743 if (iter->trace->read) {
5744 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5745 if (sret)
5746 goto out;
5747 }
5748
5749waitagain:
5750 sret = tracing_wait_pipe(filp);
5751 if (sret <= 0)
5752 goto out;
5753
b3806b43 5754 /* stop when tracing is finished */
ff98781b
EGM
5755 if (trace_empty(iter)) {
5756 sret = 0;
107bad8b 5757 goto out;
ff98781b 5758 }
b3806b43
SR
5759
5760 if (cnt >= PAGE_SIZE)
5761 cnt = PAGE_SIZE - 1;
5762
53d0aa77 5763 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5764 memset(&iter->seq, 0,
5765 sizeof(struct trace_iterator) -
5766 offsetof(struct trace_iterator, seq));
ed5467da 5767 cpumask_clear(iter->started);
4823ed7e 5768 iter->pos = -1;
b3806b43 5769
4f535968 5770 trace_event_read_lock();
7e53bd42 5771 trace_access_lock(iter->cpu_file);
955b61e5 5772 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5773 enum print_line_t ret;
5ac48378 5774 int save_len = iter->seq.seq.len;
088b1e42 5775
f9896bf3 5776 ret = print_trace_line(iter);
2c4f035f 5777 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5778 /* don't print partial lines */
5ac48378 5779 iter->seq.seq.len = save_len;
b3806b43 5780 break;
088b1e42 5781 }
b91facc3
FW
5782 if (ret != TRACE_TYPE_NO_CONSUME)
5783 trace_consume(iter);
b3806b43 5784
5ac48378 5785 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5786 break;
ee5e51f5
JO
5787
5788 /*
5789 * Setting the full flag means we reached the trace_seq buffer
5790 * size and we should leave by partial output condition above.
5791 * One of the trace_seq_* functions is not used properly.
5792 */
5793 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5794 iter->ent->type);
b3806b43 5795 }
7e53bd42 5796 trace_access_unlock(iter->cpu_file);
4f535968 5797 trace_event_read_unlock();
b3806b43 5798
b3806b43 5799 /* Now copy what we have to the user */
6c6c2796 5800 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5801 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5802 trace_seq_init(&iter->seq);
9ff4b974
PP
5803
5804 /*
25985edc 5805 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5806 * entries, go back to wait for more entries.
5807 */
6c6c2796 5808 if (sret == -EBUSY)
9ff4b974 5809 goto waitagain;
b3806b43 5810
107bad8b 5811out:
d7350c3f 5812 mutex_unlock(&iter->mutex);
107bad8b 5813
6c6c2796 5814 return sret;
b3806b43
SR
5815}
5816
3c56819b
EGM
5817static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5818 unsigned int idx)
5819{
5820 __free_page(spd->pages[idx]);
5821}
5822
28dfef8f 5823static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5824 .can_merge = 0,
34cd4998 5825 .confirm = generic_pipe_buf_confirm,
92fdd98c 5826 .release = generic_pipe_buf_release,
34cd4998
SR
5827 .steal = generic_pipe_buf_steal,
5828 .get = generic_pipe_buf_get,
3c56819b
EGM
5829};
5830
34cd4998 5831static size_t
fa7c7f6e 5832tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5833{
5834 size_t count;
74f06bb7 5835 int save_len;
34cd4998
SR
5836 int ret;
5837
5838 /* Seq buffer is page-sized, exactly what we need. */
5839 for (;;) {
74f06bb7 5840 save_len = iter->seq.seq.len;
34cd4998 5841 ret = print_trace_line(iter);
74f06bb7
SRRH
5842
5843 if (trace_seq_has_overflowed(&iter->seq)) {
5844 iter->seq.seq.len = save_len;
34cd4998
SR
5845 break;
5846 }
74f06bb7
SRRH
5847
5848 /*
5849 * This should not be hit, because it should only
5850 * be set if the iter->seq overflowed. But check it
5851 * anyway to be safe.
5852 */
34cd4998 5853 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5854 iter->seq.seq.len = save_len;
5855 break;
5856 }
5857
5ac48378 5858 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5859 if (rem < count) {
5860 rem = 0;
5861 iter->seq.seq.len = save_len;
34cd4998
SR
5862 break;
5863 }
5864
74e7ff8c
LJ
5865 if (ret != TRACE_TYPE_NO_CONSUME)
5866 trace_consume(iter);
34cd4998 5867 rem -= count;
955b61e5 5868 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5869 rem = 0;
5870 iter->ent = NULL;
5871 break;
5872 }
5873 }
5874
5875 return rem;
5876}
5877
3c56819b
EGM
5878static ssize_t tracing_splice_read_pipe(struct file *filp,
5879 loff_t *ppos,
5880 struct pipe_inode_info *pipe,
5881 size_t len,
5882 unsigned int flags)
5883{
35f3d14d
JA
5884 struct page *pages_def[PIPE_DEF_BUFFERS];
5885 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5886 struct trace_iterator *iter = filp->private_data;
5887 struct splice_pipe_desc spd = {
35f3d14d
JA
5888 .pages = pages_def,
5889 .partial = partial_def,
34cd4998 5890 .nr_pages = 0, /* This gets updated below. */
047fe360 5891 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5892 .ops = &tracing_pipe_buf_ops,
5893 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5894 };
5895 ssize_t ret;
34cd4998 5896 size_t rem;
3c56819b
EGM
5897 unsigned int i;
5898
35f3d14d
JA
5899 if (splice_grow_spd(pipe, &spd))
5900 return -ENOMEM;
5901
d7350c3f 5902 mutex_lock(&iter->mutex);
3c56819b
EGM
5903
5904 if (iter->trace->splice_read) {
5905 ret = iter->trace->splice_read(iter, filp,
5906 ppos, pipe, len, flags);
5907 if (ret)
34cd4998 5908 goto out_err;
3c56819b
EGM
5909 }
5910
5911 ret = tracing_wait_pipe(filp);
5912 if (ret <= 0)
34cd4998 5913 goto out_err;
3c56819b 5914
955b61e5 5915 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5916 ret = -EFAULT;
34cd4998 5917 goto out_err;
3c56819b
EGM
5918 }
5919
4f535968 5920 trace_event_read_lock();
7e53bd42 5921 trace_access_lock(iter->cpu_file);
4f535968 5922
3c56819b 5923 /* Fill as many pages as possible. */
a786c06d 5924 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5925 spd.pages[i] = alloc_page(GFP_KERNEL);
5926 if (!spd.pages[i])
34cd4998 5927 break;
3c56819b 5928
fa7c7f6e 5929 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5930
5931 /* Copy the data into the page, so we can start over. */
5932 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5933 page_address(spd.pages[i]),
5ac48378 5934 trace_seq_used(&iter->seq));
3c56819b 5935 if (ret < 0) {
35f3d14d 5936 __free_page(spd.pages[i]);
3c56819b
EGM
5937 break;
5938 }
35f3d14d 5939 spd.partial[i].offset = 0;
5ac48378 5940 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5941
f9520750 5942 trace_seq_init(&iter->seq);
3c56819b
EGM
5943 }
5944
7e53bd42 5945 trace_access_unlock(iter->cpu_file);
4f535968 5946 trace_event_read_unlock();
d7350c3f 5947 mutex_unlock(&iter->mutex);
3c56819b
EGM
5948
5949 spd.nr_pages = i;
5950
a29054d9
SRRH
5951 if (i)
5952 ret = splice_to_pipe(pipe, &spd);
5953 else
5954 ret = 0;
35f3d14d 5955out:
047fe360 5956 splice_shrink_spd(&spd);
35f3d14d 5957 return ret;
3c56819b 5958
34cd4998 5959out_err:
d7350c3f 5960 mutex_unlock(&iter->mutex);
35f3d14d 5961 goto out;
3c56819b
EGM
5962}
5963
a98a3c3f
SR
5964static ssize_t
5965tracing_entries_read(struct file *filp, char __user *ubuf,
5966 size_t cnt, loff_t *ppos)
5967{
0bc392ee
ON
5968 struct inode *inode = file_inode(filp);
5969 struct trace_array *tr = inode->i_private;
5970 int cpu = tracing_get_cpu(inode);
438ced17
VN
5971 char buf[64];
5972 int r = 0;
5973 ssize_t ret;
a98a3c3f 5974
db526ca3 5975 mutex_lock(&trace_types_lock);
438ced17 5976
0bc392ee 5977 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5978 int cpu, buf_size_same;
5979 unsigned long size;
5980
5981 size = 0;
5982 buf_size_same = 1;
5983 /* check if all cpu sizes are same */
5984 for_each_tracing_cpu(cpu) {
5985 /* fill in the size from first enabled cpu */
5986 if (size == 0)
12883efb
SRRH
5987 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5988 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5989 buf_size_same = 0;
5990 break;
5991 }
5992 }
5993
5994 if (buf_size_same) {
5995 if (!ring_buffer_expanded)
5996 r = sprintf(buf, "%lu (expanded: %lu)\n",
5997 size >> 10,
5998 trace_buf_size >> 10);
5999 else
6000 r = sprintf(buf, "%lu\n", size >> 10);
6001 } else
6002 r = sprintf(buf, "X\n");
6003 } else
0bc392ee 6004 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 6005
db526ca3
SR
6006 mutex_unlock(&trace_types_lock);
6007
438ced17
VN
6008 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6009 return ret;
a98a3c3f
SR
6010}
6011
6012static ssize_t
6013tracing_entries_write(struct file *filp, const char __user *ubuf,
6014 size_t cnt, loff_t *ppos)
6015{
0bc392ee
ON
6016 struct inode *inode = file_inode(filp);
6017 struct trace_array *tr = inode->i_private;
a98a3c3f 6018 unsigned long val;
4f271a2a 6019 int ret;
a98a3c3f 6020
22fe9b54
PH
6021 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6022 if (ret)
c6caeeb1 6023 return ret;
a98a3c3f
SR
6024
6025 /* must have at least 1 entry */
6026 if (!val)
6027 return -EINVAL;
6028
1696b2b0
SR
6029 /* value is in KB */
6030 val <<= 10;
0bc392ee 6031 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
6032 if (ret < 0)
6033 return ret;
a98a3c3f 6034
cf8517cf 6035 *ppos += cnt;
a98a3c3f 6036
4f271a2a
VN
6037 return cnt;
6038}
bf5e6519 6039
f81ab074
VN
6040static ssize_t
6041tracing_total_entries_read(struct file *filp, char __user *ubuf,
6042 size_t cnt, loff_t *ppos)
6043{
6044 struct trace_array *tr = filp->private_data;
6045 char buf[64];
6046 int r, cpu;
6047 unsigned long size = 0, expanded_size = 0;
6048
6049 mutex_lock(&trace_types_lock);
6050 for_each_tracing_cpu(cpu) {
12883efb 6051 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
6052 if (!ring_buffer_expanded)
6053 expanded_size += trace_buf_size >> 10;
6054 }
6055 if (ring_buffer_expanded)
6056 r = sprintf(buf, "%lu\n", size);
6057 else
6058 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6059 mutex_unlock(&trace_types_lock);
6060
6061 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6062}
6063
4f271a2a
VN
6064static ssize_t
6065tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6066 size_t cnt, loff_t *ppos)
6067{
6068 /*
6069 * There is no need to read what the user has written, this function
6070 * is just to make sure that there is no error when "echo" is used
6071 */
6072
6073 *ppos += cnt;
a98a3c3f
SR
6074
6075 return cnt;
6076}
6077
4f271a2a
VN
6078static int
6079tracing_free_buffer_release(struct inode *inode, struct file *filp)
6080{
2b6080f2
SR
6081 struct trace_array *tr = inode->i_private;
6082
cf30cf67 6083 /* disable tracing ? */
983f938a 6084 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 6085 tracer_tracing_off(tr);
4f271a2a 6086 /* resize the ring buffer to 0 */
2b6080f2 6087 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 6088
7b85af63
SRRH
6089 trace_array_put(tr);
6090
4f271a2a
VN
6091 return 0;
6092}
6093
5bf9a1ee
PP
6094static ssize_t
6095tracing_mark_write(struct file *filp, const char __user *ubuf,
6096 size_t cnt, loff_t *fpos)
6097{
2d71619c 6098 struct trace_array *tr = filp->private_data;
d696b58c 6099 struct ring_buffer_event *event;
3dd80953 6100 enum event_trigger_type tt = ETT_NONE;
d696b58c
SR
6101 struct ring_buffer *buffer;
6102 struct print_entry *entry;
6103 unsigned long irq_flags;
656c7f0d 6104 const char faulted[] = "<faulted>";
d696b58c 6105 ssize_t written;
d696b58c
SR
6106 int size;
6107 int len;
fa32e855 6108
656c7f0d
SRRH
6109/* Used in tracing_mark_raw_write() as well */
6110#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 6111
c76f0694 6112 if (tracing_disabled)
5bf9a1ee
PP
6113 return -EINVAL;
6114
983f938a 6115 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
6116 return -EINVAL;
6117
5bf9a1ee
PP
6118 if (cnt > TRACE_BUF_SIZE)
6119 cnt = TRACE_BUF_SIZE;
6120
d696b58c 6121 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 6122
d696b58c 6123 local_save_flags(irq_flags);
656c7f0d 6124 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 6125
656c7f0d
SRRH
6126 /* If less than "<faulted>", then make sure we can still add that */
6127 if (cnt < FAULTED_SIZE)
6128 size += FAULTED_SIZE - cnt;
d696b58c 6129
2d71619c 6130 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6131 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6132 irq_flags, preempt_count());
656c7f0d 6133 if (unlikely(!event))
d696b58c 6134 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6135 return -EBADF;
d696b58c
SR
6136
6137 entry = ring_buffer_event_data(event);
6138 entry->ip = _THIS_IP_;
6139
656c7f0d
SRRH
6140 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6141 if (len) {
6142 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6143 cnt = FAULTED_SIZE;
6144 written = -EFAULT;
c13d2f7c 6145 } else
656c7f0d
SRRH
6146 written = cnt;
6147 len = cnt;
5bf9a1ee 6148
3dd80953
SRV
6149 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6150 /* do not add \n before testing triggers, but add \0 */
6151 entry->buf[cnt] = '\0';
6152 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6153 }
6154
d696b58c
SR
6155 if (entry->buf[cnt - 1] != '\n') {
6156 entry->buf[cnt] = '\n';
6157 entry->buf[cnt + 1] = '\0';
6158 } else
6159 entry->buf[cnt] = '\0';
6160
7ffbd48d 6161 __buffer_unlock_commit(buffer, event);
5bf9a1ee 6162
3dd80953
SRV
6163 if (tt)
6164 event_triggers_post_call(tr->trace_marker_file, tt);
6165
656c7f0d
SRRH
6166 if (written > 0)
6167 *fpos += written;
5bf9a1ee 6168
fa32e855
SR
6169 return written;
6170}
6171
6172/* Limit it for now to 3K (including tag) */
6173#define RAW_DATA_MAX_SIZE (1024*3)
6174
6175static ssize_t
6176tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6177 size_t cnt, loff_t *fpos)
6178{
6179 struct trace_array *tr = filp->private_data;
6180 struct ring_buffer_event *event;
6181 struct ring_buffer *buffer;
6182 struct raw_data_entry *entry;
656c7f0d 6183 const char faulted[] = "<faulted>";
fa32e855 6184 unsigned long irq_flags;
fa32e855 6185 ssize_t written;
fa32e855
SR
6186 int size;
6187 int len;
6188
656c7f0d
SRRH
6189#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6190
fa32e855
SR
6191 if (tracing_disabled)
6192 return -EINVAL;
6193
6194 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6195 return -EINVAL;
6196
6197 /* The marker must at least have a tag id */
6198 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6199 return -EINVAL;
6200
6201 if (cnt > TRACE_BUF_SIZE)
6202 cnt = TRACE_BUF_SIZE;
6203
6204 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6205
fa32e855
SR
6206 local_save_flags(irq_flags);
6207 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
6208 if (cnt < FAULT_SIZE_ID)
6209 size += FAULT_SIZE_ID - cnt;
6210
fa32e855 6211 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6212 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6213 irq_flags, preempt_count());
656c7f0d 6214 if (!event)
fa32e855 6215 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6216 return -EBADF;
fa32e855
SR
6217
6218 entry = ring_buffer_event_data(event);
6219
656c7f0d
SRRH
6220 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6221 if (len) {
6222 entry->id = -1;
6223 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6224 written = -EFAULT;
fa32e855 6225 } else
656c7f0d 6226 written = cnt;
fa32e855
SR
6227
6228 __buffer_unlock_commit(buffer, event);
6229
656c7f0d
SRRH
6230 if (written > 0)
6231 *fpos += written;
1aa54bca
MS
6232
6233 return written;
5bf9a1ee
PP
6234}
6235
13f16d20 6236static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 6237{
2b6080f2 6238 struct trace_array *tr = m->private;
5079f326
Z
6239 int i;
6240
6241 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 6242 seq_printf(m,
5079f326 6243 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
6244 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6245 i == tr->clock_id ? "]" : "");
13f16d20 6246 seq_putc(m, '\n');
5079f326 6247
13f16d20 6248 return 0;
5079f326
Z
6249}
6250
d71bd34d 6251int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6252{
5079f326
Z
6253 int i;
6254
5079f326
Z
6255 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6256 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6257 break;
6258 }
6259 if (i == ARRAY_SIZE(trace_clocks))
6260 return -EINVAL;
6261
5079f326
Z
6262 mutex_lock(&trace_types_lock);
6263
2b6080f2
SR
6264 tr->clock_id = i;
6265
12883efb 6266 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6267
60303ed3
DS
6268 /*
6269 * New clock may not be consistent with the previous clock.
6270 * Reset the buffer so that it doesn't have incomparable timestamps.
6271 */
9457158b 6272 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6273
6274#ifdef CONFIG_TRACER_MAX_TRACE
170b3b10 6275 if (tr->max_buffer.buffer)
12883efb 6276 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6277 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6278#endif
60303ed3 6279
5079f326
Z
6280 mutex_unlock(&trace_types_lock);
6281
e1e232ca
SR
6282 return 0;
6283}
6284
6285static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6286 size_t cnt, loff_t *fpos)
6287{
6288 struct seq_file *m = filp->private_data;
6289 struct trace_array *tr = m->private;
6290 char buf[64];
6291 const char *clockstr;
6292 int ret;
6293
6294 if (cnt >= sizeof(buf))
6295 return -EINVAL;
6296
4afe6495 6297 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6298 return -EFAULT;
6299
6300 buf[cnt] = 0;
6301
6302 clockstr = strstrip(buf);
6303
6304 ret = tracing_set_clock(tr, clockstr);
6305 if (ret)
6306 return ret;
6307
5079f326
Z
6308 *fpos += cnt;
6309
6310 return cnt;
6311}
6312
13f16d20
LZ
6313static int tracing_clock_open(struct inode *inode, struct file *file)
6314{
7b85af63
SRRH
6315 struct trace_array *tr = inode->i_private;
6316 int ret;
6317
13f16d20
LZ
6318 if (tracing_disabled)
6319 return -ENODEV;
2b6080f2 6320
7b85af63
SRRH
6321 if (trace_array_get(tr))
6322 return -ENODEV;
6323
6324 ret = single_open(file, tracing_clock_show, inode->i_private);
6325 if (ret < 0)
6326 trace_array_put(tr);
6327
6328 return ret;
13f16d20
LZ
6329}
6330
2c1ea60b
TZ
6331static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6332{
6333 struct trace_array *tr = m->private;
6334
6335 mutex_lock(&trace_types_lock);
6336
6337 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6338 seq_puts(m, "delta [absolute]\n");
6339 else
6340 seq_puts(m, "[delta] absolute\n");
6341
6342 mutex_unlock(&trace_types_lock);
6343
6344 return 0;
6345}
6346
6347static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6348{
6349 struct trace_array *tr = inode->i_private;
6350 int ret;
6351
6352 if (tracing_disabled)
6353 return -ENODEV;
6354
6355 if (trace_array_get(tr))
6356 return -ENODEV;
6357
6358 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6359 if (ret < 0)
6360 trace_array_put(tr);
6361
6362 return ret;
6363}
6364
00b41452
TZ
6365int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6366{
6367 int ret = 0;
6368
6369 mutex_lock(&trace_types_lock);
6370
6371 if (abs && tr->time_stamp_abs_ref++)
6372 goto out;
6373
6374 if (!abs) {
6375 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6376 ret = -EINVAL;
6377 goto out;
6378 }
6379
6380 if (--tr->time_stamp_abs_ref)
6381 goto out;
6382 }
6383
6384 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6385
6386#ifdef CONFIG_TRACER_MAX_TRACE
6387 if (tr->max_buffer.buffer)
6388 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6389#endif
6390 out:
6391 mutex_unlock(&trace_types_lock);
6392
6393 return ret;
6394}
6395
6de58e62
SRRH
6396struct ftrace_buffer_info {
6397 struct trace_iterator iter;
6398 void *spare;
73a757e6 6399 unsigned int spare_cpu;
6de58e62
SRRH
6400 unsigned int read;
6401};
6402
debdd57f
HT
6403#ifdef CONFIG_TRACER_SNAPSHOT
6404static int tracing_snapshot_open(struct inode *inode, struct file *file)
6405{
6484c71c 6406 struct trace_array *tr = inode->i_private;
debdd57f 6407 struct trace_iterator *iter;
2b6080f2 6408 struct seq_file *m;
debdd57f
HT
6409 int ret = 0;
6410
ff451961
SRRH
6411 if (trace_array_get(tr) < 0)
6412 return -ENODEV;
6413
debdd57f 6414 if (file->f_mode & FMODE_READ) {
6484c71c 6415 iter = __tracing_open(inode, file, true);
debdd57f
HT
6416 if (IS_ERR(iter))
6417 ret = PTR_ERR(iter);
2b6080f2
SR
6418 } else {
6419 /* Writes still need the seq_file to hold the private data */
f77d09a3 6420 ret = -ENOMEM;
2b6080f2
SR
6421 m = kzalloc(sizeof(*m), GFP_KERNEL);
6422 if (!m)
f77d09a3 6423 goto out;
2b6080f2
SR
6424 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6425 if (!iter) {
6426 kfree(m);
f77d09a3 6427 goto out;
2b6080f2 6428 }
f77d09a3
AL
6429 ret = 0;
6430
ff451961 6431 iter->tr = tr;
6484c71c
ON
6432 iter->trace_buffer = &tr->max_buffer;
6433 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6434 m->private = iter;
6435 file->private_data = m;
debdd57f 6436 }
f77d09a3 6437out:
ff451961
SRRH
6438 if (ret < 0)
6439 trace_array_put(tr);
6440
debdd57f
HT
6441 return ret;
6442}
6443
6444static ssize_t
6445tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6446 loff_t *ppos)
6447{
2b6080f2
SR
6448 struct seq_file *m = filp->private_data;
6449 struct trace_iterator *iter = m->private;
6450 struct trace_array *tr = iter->tr;
debdd57f
HT
6451 unsigned long val;
6452 int ret;
6453
6454 ret = tracing_update_buffers();
6455 if (ret < 0)
6456 return ret;
6457
6458 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6459 if (ret)
6460 return ret;
6461
6462 mutex_lock(&trace_types_lock);
6463
2b6080f2 6464 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6465 ret = -EBUSY;
6466 goto out;
6467 }
6468
6469 switch (val) {
6470 case 0:
f1affcaa
SRRH
6471 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6472 ret = -EINVAL;
6473 break;
debdd57f 6474 }
3209cff4
SRRH
6475 if (tr->allocated_snapshot)
6476 free_snapshot(tr);
debdd57f
HT
6477 break;
6478 case 1:
f1affcaa
SRRH
6479/* Only allow per-cpu swap if the ring buffer supports it */
6480#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6481 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6482 ret = -EINVAL;
6483 break;
6484 }
6485#endif
45ad21ca 6486 if (!tr->allocated_snapshot) {
2824f503 6487 ret = tracing_alloc_snapshot_instance(tr);
debdd57f
HT
6488 if (ret < 0)
6489 break;
debdd57f 6490 }
debdd57f
HT
6491 local_irq_disable();
6492 /* Now, we're going to swap */
f1affcaa 6493 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6494 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6495 else
ce9bae55 6496 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6497 local_irq_enable();
6498 break;
6499 default:
45ad21ca 6500 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6501 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6502 tracing_reset_online_cpus(&tr->max_buffer);
6503 else
6504 tracing_reset(&tr->max_buffer, iter->cpu_file);
6505 }
debdd57f
HT
6506 break;
6507 }
6508
6509 if (ret >= 0) {
6510 *ppos += cnt;
6511 ret = cnt;
6512 }
6513out:
6514 mutex_unlock(&trace_types_lock);
6515 return ret;
6516}
2b6080f2
SR
6517
6518static int tracing_snapshot_release(struct inode *inode, struct file *file)
6519{
6520 struct seq_file *m = file->private_data;
ff451961
SRRH
6521 int ret;
6522
6523 ret = tracing_release(inode, file);
2b6080f2
SR
6524
6525 if (file->f_mode & FMODE_READ)
ff451961 6526 return ret;
2b6080f2
SR
6527
6528 /* If write only, the seq_file is just a stub */
6529 if (m)
6530 kfree(m->private);
6531 kfree(m);
6532
6533 return 0;
6534}
6535
6de58e62
SRRH
6536static int tracing_buffers_open(struct inode *inode, struct file *filp);
6537static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6538 size_t count, loff_t *ppos);
6539static int tracing_buffers_release(struct inode *inode, struct file *file);
6540static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6541 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6542
6543static int snapshot_raw_open(struct inode *inode, struct file *filp)
6544{
6545 struct ftrace_buffer_info *info;
6546 int ret;
6547
6548 ret = tracing_buffers_open(inode, filp);
6549 if (ret < 0)
6550 return ret;
6551
6552 info = filp->private_data;
6553
6554 if (info->iter.trace->use_max_tr) {
6555 tracing_buffers_release(inode, filp);
6556 return -EBUSY;
6557 }
6558
6559 info->iter.snapshot = true;
6560 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6561
6562 return ret;
6563}
6564
debdd57f
HT
6565#endif /* CONFIG_TRACER_SNAPSHOT */
6566
6567
6508fa76
SF
6568static const struct file_operations tracing_thresh_fops = {
6569 .open = tracing_open_generic,
6570 .read = tracing_thresh_read,
6571 .write = tracing_thresh_write,
6572 .llseek = generic_file_llseek,
6573};
6574
f971cc9a 6575#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6576static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6577 .open = tracing_open_generic,
6578 .read = tracing_max_lat_read,
6579 .write = tracing_max_lat_write,
b444786f 6580 .llseek = generic_file_llseek,
bc0c38d1 6581};
e428abbb 6582#endif
bc0c38d1 6583
5e2336a0 6584static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6585 .open = tracing_open_generic,
6586 .read = tracing_set_trace_read,
6587 .write = tracing_set_trace_write,
b444786f 6588 .llseek = generic_file_llseek,
bc0c38d1
SR
6589};
6590
5e2336a0 6591static const struct file_operations tracing_pipe_fops = {
4bf39a94 6592 .open = tracing_open_pipe,
2a2cc8f7 6593 .poll = tracing_poll_pipe,
4bf39a94 6594 .read = tracing_read_pipe,
3c56819b 6595 .splice_read = tracing_splice_read_pipe,
4bf39a94 6596 .release = tracing_release_pipe,
b444786f 6597 .llseek = no_llseek,
b3806b43
SR
6598};
6599
5e2336a0 6600static const struct file_operations tracing_entries_fops = {
0bc392ee 6601 .open = tracing_open_generic_tr,
a98a3c3f
SR
6602 .read = tracing_entries_read,
6603 .write = tracing_entries_write,
b444786f 6604 .llseek = generic_file_llseek,
0bc392ee 6605 .release = tracing_release_generic_tr,
a98a3c3f
SR
6606};
6607
f81ab074 6608static const struct file_operations tracing_total_entries_fops = {
7b85af63 6609 .open = tracing_open_generic_tr,
f81ab074
VN
6610 .read = tracing_total_entries_read,
6611 .llseek = generic_file_llseek,
7b85af63 6612 .release = tracing_release_generic_tr,
f81ab074
VN
6613};
6614
4f271a2a 6615static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6616 .open = tracing_open_generic_tr,
4f271a2a
VN
6617 .write = tracing_free_buffer_write,
6618 .release = tracing_free_buffer_release,
6619};
6620
5e2336a0 6621static const struct file_operations tracing_mark_fops = {
7b85af63 6622 .open = tracing_open_generic_tr,
5bf9a1ee 6623 .write = tracing_mark_write,
b444786f 6624 .llseek = generic_file_llseek,
7b85af63 6625 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6626};
6627
fa32e855
SR
6628static const struct file_operations tracing_mark_raw_fops = {
6629 .open = tracing_open_generic_tr,
6630 .write = tracing_mark_raw_write,
6631 .llseek = generic_file_llseek,
6632 .release = tracing_release_generic_tr,
6633};
6634
5079f326 6635static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6636 .open = tracing_clock_open,
6637 .read = seq_read,
6638 .llseek = seq_lseek,
7b85af63 6639 .release = tracing_single_release_tr,
5079f326
Z
6640 .write = tracing_clock_write,
6641};
6642
2c1ea60b
TZ
6643static const struct file_operations trace_time_stamp_mode_fops = {
6644 .open = tracing_time_stamp_mode_open,
6645 .read = seq_read,
6646 .llseek = seq_lseek,
6647 .release = tracing_single_release_tr,
6648};
6649
debdd57f
HT
6650#ifdef CONFIG_TRACER_SNAPSHOT
6651static const struct file_operations snapshot_fops = {
6652 .open = tracing_snapshot_open,
6653 .read = seq_read,
6654 .write = tracing_snapshot_write,
098c879e 6655 .llseek = tracing_lseek,
2b6080f2 6656 .release = tracing_snapshot_release,
debdd57f 6657};
debdd57f 6658
6de58e62
SRRH
6659static const struct file_operations snapshot_raw_fops = {
6660 .open = snapshot_raw_open,
6661 .read = tracing_buffers_read,
6662 .release = tracing_buffers_release,
6663 .splice_read = tracing_buffers_splice_read,
6664 .llseek = no_llseek,
2cadf913
SR
6665};
6666
6de58e62
SRRH
6667#endif /* CONFIG_TRACER_SNAPSHOT */
6668
2cadf913
SR
6669static int tracing_buffers_open(struct inode *inode, struct file *filp)
6670{
46ef2be0 6671 struct trace_array *tr = inode->i_private;
2cadf913 6672 struct ftrace_buffer_info *info;
7b85af63 6673 int ret;
2cadf913
SR
6674
6675 if (tracing_disabled)
6676 return -ENODEV;
6677
7b85af63
SRRH
6678 if (trace_array_get(tr) < 0)
6679 return -ENODEV;
6680
2cadf913 6681 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6682 if (!info) {
6683 trace_array_put(tr);
2cadf913 6684 return -ENOMEM;
7b85af63 6685 }
2cadf913 6686
a695cb58
SRRH
6687 mutex_lock(&trace_types_lock);
6688
cc60cdc9 6689 info->iter.tr = tr;
46ef2be0 6690 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6691 info->iter.trace = tr->current_trace;
12883efb 6692 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6693 info->spare = NULL;
2cadf913 6694 /* Force reading ring buffer for first read */
cc60cdc9 6695 info->read = (unsigned int)-1;
2cadf913
SR
6696
6697 filp->private_data = info;
6698
cf6ab6d9
SRRH
6699 tr->current_trace->ref++;
6700
a695cb58
SRRH
6701 mutex_unlock(&trace_types_lock);
6702
7b85af63
SRRH
6703 ret = nonseekable_open(inode, filp);
6704 if (ret < 0)
6705 trace_array_put(tr);
6706
6707 return ret;
2cadf913
SR
6708}
6709
9dd95748 6710static __poll_t
cc60cdc9
SR
6711tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6712{
6713 struct ftrace_buffer_info *info = filp->private_data;
6714 struct trace_iterator *iter = &info->iter;
6715
6716 return trace_poll(iter, filp, poll_table);
6717}
6718
2cadf913
SR
6719static ssize_t
6720tracing_buffers_read(struct file *filp, char __user *ubuf,
6721 size_t count, loff_t *ppos)
6722{
6723 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6724 struct trace_iterator *iter = &info->iter;
a7e52ad7 6725 ssize_t ret = 0;
6de58e62 6726 ssize_t size;
2cadf913 6727
2dc5d12b
SR
6728 if (!count)
6729 return 0;
6730
6de58e62 6731#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6732 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6733 return -EBUSY;
6de58e62
SRRH
6734#endif
6735
73a757e6 6736 if (!info->spare) {
12883efb
SRRH
6737 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6738 iter->cpu_file);
a7e52ad7
SRV
6739 if (IS_ERR(info->spare)) {
6740 ret = PTR_ERR(info->spare);
6741 info->spare = NULL;
6742 } else {
6743 info->spare_cpu = iter->cpu_file;
6744 }
73a757e6 6745 }
ddd538f3 6746 if (!info->spare)
a7e52ad7 6747 return ret;
ddd538f3 6748
2cadf913
SR
6749 /* Do we have previous read data to read? */
6750 if (info->read < PAGE_SIZE)
6751 goto read;
6752
b627344f 6753 again:
cc60cdc9 6754 trace_access_lock(iter->cpu_file);
12883efb 6755 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6756 &info->spare,
6757 count,
cc60cdc9
SR
6758 iter->cpu_file, 0);
6759 trace_access_unlock(iter->cpu_file);
2cadf913 6760
b627344f
SR
6761 if (ret < 0) {
6762 if (trace_empty(iter)) {
d716ff71
SRRH
6763 if ((filp->f_flags & O_NONBLOCK))
6764 return -EAGAIN;
6765
2c2b0a78 6766 ret = wait_on_pipe(iter, 0);
d716ff71
SRRH
6767 if (ret)
6768 return ret;
6769
b627344f
SR
6770 goto again;
6771 }
d716ff71 6772 return 0;
b627344f 6773 }
436fc280 6774
436fc280 6775 info->read = 0;
b627344f 6776 read:
2cadf913
SR
6777 size = PAGE_SIZE - info->read;
6778 if (size > count)
6779 size = count;
6780
6781 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6782 if (ret == size)
6783 return -EFAULT;
6784
2dc5d12b
SR
6785 size -= ret;
6786
2cadf913
SR
6787 *ppos += size;
6788 info->read += size;
6789
6790 return size;
6791}
6792
6793static int tracing_buffers_release(struct inode *inode, struct file *file)
6794{
6795 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6796 struct trace_iterator *iter = &info->iter;
2cadf913 6797
a695cb58
SRRH
6798 mutex_lock(&trace_types_lock);
6799
cf6ab6d9
SRRH
6800 iter->tr->current_trace->ref--;
6801
ff451961 6802 __trace_array_put(iter->tr);
2cadf913 6803
ddd538f3 6804 if (info->spare)
73a757e6
SRV
6805 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6806 info->spare_cpu, info->spare);
2cadf913
SR
6807 kfree(info);
6808
a695cb58
SRRH
6809 mutex_unlock(&trace_types_lock);
6810
2cadf913
SR
6811 return 0;
6812}
6813
6814struct buffer_ref {
6815 struct ring_buffer *buffer;
6816 void *page;
73a757e6 6817 int cpu;
2cadf913
SR
6818 int ref;
6819};
6820
6821static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6822 struct pipe_buffer *buf)
6823{
6824 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6825
6826 if (--ref->ref)
6827 return;
6828
73a757e6 6829 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6830 kfree(ref);
6831 buf->private = 0;
6832}
6833
2cadf913
SR
6834static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6835 struct pipe_buffer *buf)
6836{
6837 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6838
6839 ref->ref++;
6840}
6841
6842/* Pipe buffer operations for a buffer. */
28dfef8f 6843static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6844 .can_merge = 0,
2cadf913
SR
6845 .confirm = generic_pipe_buf_confirm,
6846 .release = buffer_pipe_buf_release,
d55cb6cf 6847 .steal = generic_pipe_buf_steal,
2cadf913
SR
6848 .get = buffer_pipe_buf_get,
6849};
6850
6851/*
6852 * Callback from splice_to_pipe(), if we need to release some pages
6853 * at the end of the spd in case we error'ed out in filling the pipe.
6854 */
6855static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6856{
6857 struct buffer_ref *ref =
6858 (struct buffer_ref *)spd->partial[i].private;
6859
6860 if (--ref->ref)
6861 return;
6862
73a757e6 6863 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6864 kfree(ref);
6865 spd->partial[i].private = 0;
6866}
6867
6868static ssize_t
6869tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6870 struct pipe_inode_info *pipe, size_t len,
6871 unsigned int flags)
6872{
6873 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6874 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6875 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6876 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6877 struct splice_pipe_desc spd = {
35f3d14d
JA
6878 .pages = pages_def,
6879 .partial = partial_def,
047fe360 6880 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6881 .ops = &buffer_pipe_buf_ops,
6882 .spd_release = buffer_spd_release,
6883 };
6884 struct buffer_ref *ref;
6b7e633f 6885 int entries, i;
07906da7 6886 ssize_t ret = 0;
2cadf913 6887
6de58e62 6888#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6889 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6890 return -EBUSY;
6de58e62
SRRH
6891#endif
6892
d716ff71
SRRH
6893 if (*ppos & (PAGE_SIZE - 1))
6894 return -EINVAL;
93cfb3c9
LJ
6895
6896 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6897 if (len < PAGE_SIZE)
6898 return -EINVAL;
93cfb3c9
LJ
6899 len &= PAGE_MASK;
6900 }
6901
1ae2293d
AV
6902 if (splice_grow_spd(pipe, &spd))
6903 return -ENOMEM;
6904
cc60cdc9
SR
6905 again:
6906 trace_access_lock(iter->cpu_file);
12883efb 6907 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6908
a786c06d 6909 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6910 struct page *page;
6911 int r;
6912
6913 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6914 if (!ref) {
6915 ret = -ENOMEM;
2cadf913 6916 break;
07906da7 6917 }
2cadf913 6918
7267fa68 6919 ref->ref = 1;
12883efb 6920 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6921 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
a7e52ad7
SRV
6922 if (IS_ERR(ref->page)) {
6923 ret = PTR_ERR(ref->page);
6924 ref->page = NULL;
2cadf913
SR
6925 kfree(ref);
6926 break;
6927 }
73a757e6 6928 ref->cpu = iter->cpu_file;
2cadf913
SR
6929
6930 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6931 len, iter->cpu_file, 1);
2cadf913 6932 if (r < 0) {
73a757e6
SRV
6933 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6934 ref->page);
2cadf913
SR
6935 kfree(ref);
6936 break;
6937 }
6938
2cadf913
SR
6939 page = virt_to_page(ref->page);
6940
6941 spd.pages[i] = page;
6942 spd.partial[i].len = PAGE_SIZE;
6943 spd.partial[i].offset = 0;
6944 spd.partial[i].private = (unsigned long)ref;
6945 spd.nr_pages++;
93cfb3c9 6946 *ppos += PAGE_SIZE;
93459c6c 6947
12883efb 6948 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6949 }
6950
cc60cdc9 6951 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6952 spd.nr_pages = i;
6953
6954 /* did we read anything? */
6955 if (!spd.nr_pages) {
07906da7 6956 if (ret)
1ae2293d 6957 goto out;
d716ff71 6958
1ae2293d 6959 ret = -EAGAIN;
d716ff71 6960 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6961 goto out;
07906da7 6962
03329f99 6963 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8b8b3683 6964 if (ret)
1ae2293d 6965 goto out;
e30f53aa 6966
cc60cdc9 6967 goto again;
2cadf913
SR
6968 }
6969
6970 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6971out:
047fe360 6972 splice_shrink_spd(&spd);
6de58e62 6973
2cadf913
SR
6974 return ret;
6975}
6976
6977static const struct file_operations tracing_buffers_fops = {
6978 .open = tracing_buffers_open,
6979 .read = tracing_buffers_read,
cc60cdc9 6980 .poll = tracing_buffers_poll,
2cadf913
SR
6981 .release = tracing_buffers_release,
6982 .splice_read = tracing_buffers_splice_read,
6983 .llseek = no_llseek,
6984};
6985
c8d77183
SR
6986static ssize_t
6987tracing_stats_read(struct file *filp, char __user *ubuf,
6988 size_t count, loff_t *ppos)
6989{
4d3435b8
ON
6990 struct inode *inode = file_inode(filp);
6991 struct trace_array *tr = inode->i_private;
12883efb 6992 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6993 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6994 struct trace_seq *s;
6995 unsigned long cnt;
c64e148a
VN
6996 unsigned long long t;
6997 unsigned long usec_rem;
c8d77183 6998
e4f2d10f 6999 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 7000 if (!s)
a646365c 7001 return -ENOMEM;
c8d77183
SR
7002
7003 trace_seq_init(s);
7004
12883efb 7005 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7006 trace_seq_printf(s, "entries: %ld\n", cnt);
7007
12883efb 7008 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7009 trace_seq_printf(s, "overrun: %ld\n", cnt);
7010
12883efb 7011 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
7012 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7013
12883efb 7014 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
7015 trace_seq_printf(s, "bytes: %ld\n", cnt);
7016
58e8eedf 7017 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 7018 /* local or global for trace_clock */
12883efb 7019 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
7020 usec_rem = do_div(t, USEC_PER_SEC);
7021 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7022 t, usec_rem);
7023
12883efb 7024 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
7025 usec_rem = do_div(t, USEC_PER_SEC);
7026 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7027 } else {
7028 /* counter or tsc mode for trace_clock */
7029 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 7030 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 7031
11043d8b 7032 trace_seq_printf(s, "now ts: %llu\n",
12883efb 7033 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 7034 }
c64e148a 7035
12883efb 7036 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
7037 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7038
12883efb 7039 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
7040 trace_seq_printf(s, "read events: %ld\n", cnt);
7041
5ac48378
SRRH
7042 count = simple_read_from_buffer(ubuf, count, ppos,
7043 s->buffer, trace_seq_used(s));
c8d77183
SR
7044
7045 kfree(s);
7046
7047 return count;
7048}
7049
7050static const struct file_operations tracing_stats_fops = {
4d3435b8 7051 .open = tracing_open_generic_tr,
c8d77183 7052 .read = tracing_stats_read,
b444786f 7053 .llseek = generic_file_llseek,
4d3435b8 7054 .release = tracing_release_generic_tr,
c8d77183
SR
7055};
7056
bc0c38d1
SR
7057#ifdef CONFIG_DYNAMIC_FTRACE
7058
7059static ssize_t
b807c3d0 7060tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
7061 size_t cnt, loff_t *ppos)
7062{
7063 unsigned long *p = filp->private_data;
6a9c981b 7064 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
7065 int r;
7066
6a9c981b 7067 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
7068 buf[r++] = '\n';
7069
6a9c981b 7070 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
7071}
7072
5e2336a0 7073static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 7074 .open = tracing_open_generic,
b807c3d0 7075 .read = tracing_read_dyn_info,
b444786f 7076 .llseek = generic_file_llseek,
bc0c38d1 7077};
77fd5c15 7078#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 7079
77fd5c15
SRRH
7080#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7081static void
bca6c8d0 7082ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7083 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7084 void *data)
77fd5c15 7085{
cab50379 7086 tracing_snapshot_instance(tr);
77fd5c15 7087}
bc0c38d1 7088
77fd5c15 7089static void
bca6c8d0 7090ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7091 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7092 void *data)
bc0c38d1 7093{
6e444319 7094 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7095 long *count = NULL;
77fd5c15 7096
1a93f8bd
SRV
7097 if (mapper)
7098 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7099
7100 if (count) {
7101
7102 if (*count <= 0)
7103 return;
bc0c38d1 7104
77fd5c15 7105 (*count)--;
1a93f8bd 7106 }
77fd5c15 7107
cab50379 7108 tracing_snapshot_instance(tr);
77fd5c15
SRRH
7109}
7110
7111static int
7112ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7113 struct ftrace_probe_ops *ops, void *data)
7114{
6e444319 7115 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7116 long *count = NULL;
77fd5c15
SRRH
7117
7118 seq_printf(m, "%ps:", (void *)ip);
7119
fa6f0cc7 7120 seq_puts(m, "snapshot");
77fd5c15 7121
1a93f8bd
SRV
7122 if (mapper)
7123 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7124
7125 if (count)
7126 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 7127 else
1a93f8bd 7128 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
7129
7130 return 0;
7131}
7132
1a93f8bd 7133static int
b5f081b5 7134ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7135 unsigned long ip, void *init_data, void **data)
1a93f8bd 7136{
6e444319
SRV
7137 struct ftrace_func_mapper *mapper = *data;
7138
7139 if (!mapper) {
7140 mapper = allocate_ftrace_func_mapper();
7141 if (!mapper)
7142 return -ENOMEM;
7143 *data = mapper;
7144 }
1a93f8bd 7145
6e444319 7146 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
7147}
7148
7149static void
b5f081b5 7150ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7151 unsigned long ip, void *data)
1a93f8bd 7152{
6e444319
SRV
7153 struct ftrace_func_mapper *mapper = data;
7154
7155 if (!ip) {
7156 if (!mapper)
7157 return;
7158 free_ftrace_func_mapper(mapper, NULL);
7159 return;
7160 }
1a93f8bd
SRV
7161
7162 ftrace_func_mapper_remove_ip(mapper, ip);
7163}
7164
77fd5c15
SRRH
7165static struct ftrace_probe_ops snapshot_probe_ops = {
7166 .func = ftrace_snapshot,
7167 .print = ftrace_snapshot_print,
7168};
7169
7170static struct ftrace_probe_ops snapshot_count_probe_ops = {
7171 .func = ftrace_count_snapshot,
7172 .print = ftrace_snapshot_print,
1a93f8bd
SRV
7173 .init = ftrace_snapshot_init,
7174 .free = ftrace_snapshot_free,
77fd5c15
SRRH
7175};
7176
7177static int
04ec7bb6 7178ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
7179 char *glob, char *cmd, char *param, int enable)
7180{
7181 struct ftrace_probe_ops *ops;
7182 void *count = (void *)-1;
7183 char *number;
7184 int ret;
7185
0f179765
SRV
7186 if (!tr)
7187 return -ENODEV;
7188
77fd5c15
SRRH
7189 /* hash funcs only work with set_ftrace_filter */
7190 if (!enable)
7191 return -EINVAL;
7192
7193 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7194
d3d532d7 7195 if (glob[0] == '!')
7b60f3d8 7196 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
7197
7198 if (!param)
7199 goto out_reg;
7200
7201 number = strsep(&param, ":");
7202
7203 if (!strlen(number))
7204 goto out_reg;
7205
7206 /*
7207 * We use the callback data field (which is a pointer)
7208 * as our counter.
7209 */
7210 ret = kstrtoul(number, 0, (unsigned long *)&count);
7211 if (ret)
7212 return ret;
7213
7214 out_reg:
2824f503 7215 ret = tracing_alloc_snapshot_instance(tr);
df62db5b
SRV
7216 if (ret < 0)
7217 goto out;
77fd5c15 7218
4c174688 7219 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 7220
df62db5b 7221 out:
77fd5c15
SRRH
7222 return ret < 0 ? ret : 0;
7223}
7224
7225static struct ftrace_func_command ftrace_snapshot_cmd = {
7226 .name = "snapshot",
7227 .func = ftrace_trace_snapshot_callback,
7228};
7229
38de93ab 7230static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
7231{
7232 return register_ftrace_command(&ftrace_snapshot_cmd);
7233}
7234#else
38de93ab 7235static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 7236#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 7237
7eeafbca 7238static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 7239{
8434dc93
SRRH
7240 if (WARN_ON(!tr->dir))
7241 return ERR_PTR(-ENODEV);
7242
7243 /* Top directory uses NULL as the parent */
7244 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7245 return NULL;
7246
7247 /* All sub buffers have a descriptor */
2b6080f2 7248 return tr->dir;
bc0c38d1
SR
7249}
7250
2b6080f2 7251static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 7252{
b04cc6b1
FW
7253 struct dentry *d_tracer;
7254
2b6080f2
SR
7255 if (tr->percpu_dir)
7256 return tr->percpu_dir;
b04cc6b1 7257
7eeafbca 7258 d_tracer = tracing_get_dentry(tr);
14a5ae40 7259 if (IS_ERR(d_tracer))
b04cc6b1
FW
7260 return NULL;
7261
8434dc93 7262 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 7263
2b6080f2 7264 WARN_ONCE(!tr->percpu_dir,
8434dc93 7265 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 7266
2b6080f2 7267 return tr->percpu_dir;
b04cc6b1
FW
7268}
7269
649e9c70
ON
7270static struct dentry *
7271trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7272 void *data, long cpu, const struct file_operations *fops)
7273{
7274 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7275
7276 if (ret) /* See tracing_get_cpu() */
7682c918 7277 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
7278 return ret;
7279}
7280
2b6080f2 7281static void
8434dc93 7282tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 7283{
2b6080f2 7284 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 7285 struct dentry *d_cpu;
dd49a38c 7286 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 7287
0a3d7ce7
NK
7288 if (!d_percpu)
7289 return;
7290
dd49a38c 7291 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 7292 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 7293 if (!d_cpu) {
a395d6a7 7294 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
7295 return;
7296 }
b04cc6b1 7297
8656e7a2 7298 /* per cpu trace_pipe */
649e9c70 7299 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 7300 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
7301
7302 /* per cpu trace */
649e9c70 7303 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 7304 tr, cpu, &tracing_fops);
7f96f93f 7305
649e9c70 7306 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 7307 tr, cpu, &tracing_buffers_fops);
7f96f93f 7308
649e9c70 7309 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 7310 tr, cpu, &tracing_stats_fops);
438ced17 7311
649e9c70 7312 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 7313 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
7314
7315#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 7316 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 7317 tr, cpu, &snapshot_fops);
6de58e62 7318
649e9c70 7319 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 7320 tr, cpu, &snapshot_raw_fops);
f1affcaa 7321#endif
b04cc6b1
FW
7322}
7323
60a11774
SR
7324#ifdef CONFIG_FTRACE_SELFTEST
7325/* Let selftest have access to static functions in this file */
7326#include "trace_selftest.c"
7327#endif
7328
577b785f
SR
7329static ssize_t
7330trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7331 loff_t *ppos)
7332{
7333 struct trace_option_dentry *topt = filp->private_data;
7334 char *buf;
7335
7336 if (topt->flags->val & topt->opt->bit)
7337 buf = "1\n";
7338 else
7339 buf = "0\n";
7340
7341 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7342}
7343
7344static ssize_t
7345trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7346 loff_t *ppos)
7347{
7348 struct trace_option_dentry *topt = filp->private_data;
7349 unsigned long val;
577b785f
SR
7350 int ret;
7351
22fe9b54
PH
7352 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7353 if (ret)
577b785f
SR
7354 return ret;
7355
8d18eaaf
LZ
7356 if (val != 0 && val != 1)
7357 return -EINVAL;
577b785f 7358
8d18eaaf 7359 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7360 mutex_lock(&trace_types_lock);
8c1a49ae 7361 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7362 topt->opt, !val);
577b785f
SR
7363 mutex_unlock(&trace_types_lock);
7364 if (ret)
7365 return ret;
577b785f
SR
7366 }
7367
7368 *ppos += cnt;
7369
7370 return cnt;
7371}
7372
7373
7374static const struct file_operations trace_options_fops = {
7375 .open = tracing_open_generic,
7376 .read = trace_options_read,
7377 .write = trace_options_write,
b444786f 7378 .llseek = generic_file_llseek,
577b785f
SR
7379};
7380
9a38a885
SRRH
7381/*
7382 * In order to pass in both the trace_array descriptor as well as the index
7383 * to the flag that the trace option file represents, the trace_array
7384 * has a character array of trace_flags_index[], which holds the index
7385 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7386 * The address of this character array is passed to the flag option file
7387 * read/write callbacks.
7388 *
7389 * In order to extract both the index and the trace_array descriptor,
7390 * get_tr_index() uses the following algorithm.
7391 *
7392 * idx = *ptr;
7393 *
7394 * As the pointer itself contains the address of the index (remember
7395 * index[1] == 1).
7396 *
7397 * Then to get the trace_array descriptor, by subtracting that index
7398 * from the ptr, we get to the start of the index itself.
7399 *
7400 * ptr - idx == &index[0]
7401 *
7402 * Then a simple container_of() from that pointer gets us to the
7403 * trace_array descriptor.
7404 */
7405static void get_tr_index(void *data, struct trace_array **ptr,
7406 unsigned int *pindex)
7407{
7408 *pindex = *(unsigned char *)data;
7409
7410 *ptr = container_of(data - *pindex, struct trace_array,
7411 trace_flags_index);
7412}
7413
a8259075
SR
7414static ssize_t
7415trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7416 loff_t *ppos)
7417{
9a38a885
SRRH
7418 void *tr_index = filp->private_data;
7419 struct trace_array *tr;
7420 unsigned int index;
a8259075
SR
7421 char *buf;
7422
9a38a885
SRRH
7423 get_tr_index(tr_index, &tr, &index);
7424
7425 if (tr->trace_flags & (1 << index))
a8259075
SR
7426 buf = "1\n";
7427 else
7428 buf = "0\n";
7429
7430 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7431}
7432
7433static ssize_t
7434trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7435 loff_t *ppos)
7436{
9a38a885
SRRH
7437 void *tr_index = filp->private_data;
7438 struct trace_array *tr;
7439 unsigned int index;
a8259075
SR
7440 unsigned long val;
7441 int ret;
7442
9a38a885
SRRH
7443 get_tr_index(tr_index, &tr, &index);
7444
22fe9b54
PH
7445 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7446 if (ret)
a8259075
SR
7447 return ret;
7448
f2d84b65 7449 if (val != 0 && val != 1)
a8259075 7450 return -EINVAL;
69d34da2
SRRH
7451
7452 mutex_lock(&trace_types_lock);
2b6080f2 7453 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7454 mutex_unlock(&trace_types_lock);
a8259075 7455
613f04a0
SRRH
7456 if (ret < 0)
7457 return ret;
7458
a8259075
SR
7459 *ppos += cnt;
7460
7461 return cnt;
7462}
7463
a8259075
SR
7464static const struct file_operations trace_options_core_fops = {
7465 .open = tracing_open_generic,
7466 .read = trace_options_core_read,
7467 .write = trace_options_core_write,
b444786f 7468 .llseek = generic_file_llseek,
a8259075
SR
7469};
7470
5452af66 7471struct dentry *trace_create_file(const char *name,
f4ae40a6 7472 umode_t mode,
5452af66
FW
7473 struct dentry *parent,
7474 void *data,
7475 const struct file_operations *fops)
7476{
7477 struct dentry *ret;
7478
8434dc93 7479 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7480 if (!ret)
a395d6a7 7481 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7482
7483 return ret;
7484}
7485
7486
2b6080f2 7487static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7488{
7489 struct dentry *d_tracer;
a8259075 7490
2b6080f2
SR
7491 if (tr->options)
7492 return tr->options;
a8259075 7493
7eeafbca 7494 d_tracer = tracing_get_dentry(tr);
14a5ae40 7495 if (IS_ERR(d_tracer))
a8259075
SR
7496 return NULL;
7497
8434dc93 7498 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7499 if (!tr->options) {
a395d6a7 7500 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7501 return NULL;
7502 }
7503
2b6080f2 7504 return tr->options;
a8259075
SR
7505}
7506
577b785f 7507static void
2b6080f2
SR
7508create_trace_option_file(struct trace_array *tr,
7509 struct trace_option_dentry *topt,
577b785f
SR
7510 struct tracer_flags *flags,
7511 struct tracer_opt *opt)
7512{
7513 struct dentry *t_options;
577b785f 7514
2b6080f2 7515 t_options = trace_options_init_dentry(tr);
577b785f
SR
7516 if (!t_options)
7517 return;
7518
7519 topt->flags = flags;
7520 topt->opt = opt;
2b6080f2 7521 topt->tr = tr;
577b785f 7522
5452af66 7523 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7524 &trace_options_fops);
7525
577b785f
SR
7526}
7527
37aea98b 7528static void
2b6080f2 7529create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7530{
7531 struct trace_option_dentry *topts;
37aea98b 7532 struct trace_options *tr_topts;
577b785f
SR
7533 struct tracer_flags *flags;
7534 struct tracer_opt *opts;
7535 int cnt;
37aea98b 7536 int i;
577b785f
SR
7537
7538 if (!tracer)
37aea98b 7539 return;
577b785f
SR
7540
7541 flags = tracer->flags;
7542
7543 if (!flags || !flags->opts)
37aea98b
SRRH
7544 return;
7545
7546 /*
7547 * If this is an instance, only create flags for tracers
7548 * the instance may have.
7549 */
7550 if (!trace_ok_for_array(tracer, tr))
7551 return;
7552
7553 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7554 /* Make sure there's no duplicate flags. */
7555 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7556 return;
7557 }
577b785f
SR
7558
7559 opts = flags->opts;
7560
7561 for (cnt = 0; opts[cnt].name; cnt++)
7562 ;
7563
0cfe8245 7564 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7565 if (!topts)
37aea98b
SRRH
7566 return;
7567
7568 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7569 GFP_KERNEL);
7570 if (!tr_topts) {
7571 kfree(topts);
7572 return;
7573 }
7574
7575 tr->topts = tr_topts;
7576 tr->topts[tr->nr_topts].tracer = tracer;
7577 tr->topts[tr->nr_topts].topts = topts;
7578 tr->nr_topts++;
577b785f 7579
41d9c0be 7580 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7581 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7582 &opts[cnt]);
41d9c0be
SRRH
7583 WARN_ONCE(topts[cnt].entry == NULL,
7584 "Failed to create trace option: %s",
7585 opts[cnt].name);
7586 }
577b785f
SR
7587}
7588
a8259075 7589static struct dentry *
2b6080f2
SR
7590create_trace_option_core_file(struct trace_array *tr,
7591 const char *option, long index)
a8259075
SR
7592{
7593 struct dentry *t_options;
a8259075 7594
2b6080f2 7595 t_options = trace_options_init_dentry(tr);
a8259075
SR
7596 if (!t_options)
7597 return NULL;
7598
9a38a885
SRRH
7599 return trace_create_file(option, 0644, t_options,
7600 (void *)&tr->trace_flags_index[index],
7601 &trace_options_core_fops);
a8259075
SR
7602}
7603
16270145 7604static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7605{
7606 struct dentry *t_options;
16270145 7607 bool top_level = tr == &global_trace;
a8259075
SR
7608 int i;
7609
2b6080f2 7610 t_options = trace_options_init_dentry(tr);
a8259075
SR
7611 if (!t_options)
7612 return;
7613
16270145
SRRH
7614 for (i = 0; trace_options[i]; i++) {
7615 if (top_level ||
7616 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7617 create_trace_option_core_file(tr, trace_options[i], i);
7618 }
a8259075
SR
7619}
7620
499e5470
SR
7621static ssize_t
7622rb_simple_read(struct file *filp, char __user *ubuf,
7623 size_t cnt, loff_t *ppos)
7624{
348f0fc2 7625 struct trace_array *tr = filp->private_data;
499e5470
SR
7626 char buf[64];
7627 int r;
7628
10246fa3 7629 r = tracer_tracing_is_on(tr);
499e5470
SR
7630 r = sprintf(buf, "%d\n", r);
7631
7632 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7633}
7634
7635static ssize_t
7636rb_simple_write(struct file *filp, const char __user *ubuf,
7637 size_t cnt, loff_t *ppos)
7638{
348f0fc2 7639 struct trace_array *tr = filp->private_data;
12883efb 7640 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7641 unsigned long val;
7642 int ret;
7643
7644 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7645 if (ret)
7646 return ret;
7647
7648 if (buffer) {
2df8f8a6 7649 mutex_lock(&trace_types_lock);
f143641b
SRV
7650 if (!!val == tracer_tracing_is_on(tr)) {
7651 val = 0; /* do nothing */
7652 } else if (val) {
10246fa3 7653 tracer_tracing_on(tr);
2b6080f2
SR
7654 if (tr->current_trace->start)
7655 tr->current_trace->start(tr);
2df8f8a6 7656 } else {
10246fa3 7657 tracer_tracing_off(tr);
2b6080f2
SR
7658 if (tr->current_trace->stop)
7659 tr->current_trace->stop(tr);
2df8f8a6
SR
7660 }
7661 mutex_unlock(&trace_types_lock);
499e5470
SR
7662 }
7663
7664 (*ppos)++;
7665
7666 return cnt;
7667}
7668
7669static const struct file_operations rb_simple_fops = {
7b85af63 7670 .open = tracing_open_generic_tr,
499e5470
SR
7671 .read = rb_simple_read,
7672 .write = rb_simple_write,
7b85af63 7673 .release = tracing_release_generic_tr,
499e5470
SR
7674 .llseek = default_llseek,
7675};
7676
03329f99
SRV
7677static ssize_t
7678buffer_percent_read(struct file *filp, char __user *ubuf,
7679 size_t cnt, loff_t *ppos)
7680{
7681 struct trace_array *tr = filp->private_data;
7682 char buf[64];
7683 int r;
7684
7685 r = tr->buffer_percent;
7686 r = sprintf(buf, "%d\n", r);
7687
7688 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7689}
7690
7691static ssize_t
7692buffer_percent_write(struct file *filp, const char __user *ubuf,
7693 size_t cnt, loff_t *ppos)
7694{
7695 struct trace_array *tr = filp->private_data;
7696 unsigned long val;
7697 int ret;
7698
7699 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7700 if (ret)
7701 return ret;
7702
7703 if (val > 100)
7704 return -EINVAL;
7705
7706 if (!val)
7707 val = 1;
7708
7709 tr->buffer_percent = val;
7710
7711 (*ppos)++;
7712
7713 return cnt;
7714}
7715
7716static const struct file_operations buffer_percent_fops = {
7717 .open = tracing_open_generic_tr,
7718 .read = buffer_percent_read,
7719 .write = buffer_percent_write,
7720 .release = tracing_release_generic_tr,
7721 .llseek = default_llseek,
7722};
7723
277ba044
SR
7724struct dentry *trace_instance_dir;
7725
7726static void
8434dc93 7727init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7728
55034cd6
SRRH
7729static int
7730allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7731{
7732 enum ring_buffer_flags rb_flags;
737223fb 7733
983f938a 7734 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7735
dced341b
SRRH
7736 buf->tr = tr;
7737
55034cd6
SRRH
7738 buf->buffer = ring_buffer_alloc(size, rb_flags);
7739 if (!buf->buffer)
7740 return -ENOMEM;
737223fb 7741
55034cd6
SRRH
7742 buf->data = alloc_percpu(struct trace_array_cpu);
7743 if (!buf->data) {
7744 ring_buffer_free(buf->buffer);
4397f045 7745 buf->buffer = NULL;
55034cd6
SRRH
7746 return -ENOMEM;
7747 }
737223fb 7748
737223fb
SRRH
7749 /* Allocate the first page for all buffers */
7750 set_buffer_entries(&tr->trace_buffer,
7751 ring_buffer_size(tr->trace_buffer.buffer, 0));
7752
55034cd6
SRRH
7753 return 0;
7754}
737223fb 7755
55034cd6
SRRH
7756static int allocate_trace_buffers(struct trace_array *tr, int size)
7757{
7758 int ret;
737223fb 7759
55034cd6
SRRH
7760 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7761 if (ret)
7762 return ret;
737223fb 7763
55034cd6
SRRH
7764#ifdef CONFIG_TRACER_MAX_TRACE
7765 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7766 allocate_snapshot ? size : 1);
7767 if (WARN_ON(ret)) {
737223fb 7768 ring_buffer_free(tr->trace_buffer.buffer);
24f2aaf9 7769 tr->trace_buffer.buffer = NULL;
55034cd6 7770 free_percpu(tr->trace_buffer.data);
24f2aaf9 7771 tr->trace_buffer.data = NULL;
55034cd6
SRRH
7772 return -ENOMEM;
7773 }
7774 tr->allocated_snapshot = allocate_snapshot;
737223fb 7775
55034cd6
SRRH
7776 /*
7777 * Only the top level trace array gets its snapshot allocated
7778 * from the kernel command line.
7779 */
7780 allocate_snapshot = false;
737223fb 7781#endif
55034cd6 7782 return 0;
737223fb
SRRH
7783}
7784
f0b70cc4
SRRH
7785static void free_trace_buffer(struct trace_buffer *buf)
7786{
7787 if (buf->buffer) {
7788 ring_buffer_free(buf->buffer);
7789 buf->buffer = NULL;
7790 free_percpu(buf->data);
7791 buf->data = NULL;
7792 }
7793}
7794
23aaa3c1
SRRH
7795static void free_trace_buffers(struct trace_array *tr)
7796{
7797 if (!tr)
7798 return;
7799
f0b70cc4 7800 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7801
7802#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7803 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7804#endif
7805}
7806
9a38a885
SRRH
7807static void init_trace_flags_index(struct trace_array *tr)
7808{
7809 int i;
7810
7811 /* Used by the trace options files */
7812 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7813 tr->trace_flags_index[i] = i;
7814}
7815
37aea98b
SRRH
7816static void __update_tracer_options(struct trace_array *tr)
7817{
7818 struct tracer *t;
7819
7820 for (t = trace_types; t; t = t->next)
7821 add_tracer_options(tr, t);
7822}
7823
7824static void update_tracer_options(struct trace_array *tr)
7825{
7826 mutex_lock(&trace_types_lock);
7827 __update_tracer_options(tr);
7828 mutex_unlock(&trace_types_lock);
7829}
7830
eae47358 7831static int instance_mkdir(const char *name)
737223fb 7832{
277ba044
SR
7833 struct trace_array *tr;
7834 int ret;
277ba044 7835
12ecef0c 7836 mutex_lock(&event_mutex);
277ba044
SR
7837 mutex_lock(&trace_types_lock);
7838
7839 ret = -EEXIST;
7840 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7841 if (tr->name && strcmp(tr->name, name) == 0)
7842 goto out_unlock;
7843 }
7844
7845 ret = -ENOMEM;
7846 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7847 if (!tr)
7848 goto out_unlock;
7849
7850 tr->name = kstrdup(name, GFP_KERNEL);
7851 if (!tr->name)
7852 goto out_free_tr;
7853
ccfe9e42
AL
7854 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7855 goto out_free_tr;
7856
20550622 7857 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7858
ccfe9e42
AL
7859 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7860
277ba044
SR
7861 raw_spin_lock_init(&tr->start_lock);
7862
0b9b12c1
SRRH
7863 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7864
277ba044
SR
7865 tr->current_trace = &nop_trace;
7866
7867 INIT_LIST_HEAD(&tr->systems);
7868 INIT_LIST_HEAD(&tr->events);
067fe038 7869 INIT_LIST_HEAD(&tr->hist_vars);
277ba044 7870
737223fb 7871 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7872 goto out_free_tr;
7873
8434dc93 7874 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7875 if (!tr->dir)
7876 goto out_free_tr;
7877
7878 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7879 if (ret) {
8434dc93 7880 tracefs_remove_recursive(tr->dir);
277ba044 7881 goto out_free_tr;
609e85a7 7882 }
277ba044 7883
04ec7bb6
SRV
7884 ftrace_init_trace_array(tr);
7885
8434dc93 7886 init_tracer_tracefs(tr, tr->dir);
9a38a885 7887 init_trace_flags_index(tr);
37aea98b 7888 __update_tracer_options(tr);
277ba044
SR
7889
7890 list_add(&tr->list, &ftrace_trace_arrays);
7891
7892 mutex_unlock(&trace_types_lock);
12ecef0c 7893 mutex_unlock(&event_mutex);
277ba044
SR
7894
7895 return 0;
7896
7897 out_free_tr:
23aaa3c1 7898 free_trace_buffers(tr);
ccfe9e42 7899 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7900 kfree(tr->name);
7901 kfree(tr);
7902
7903 out_unlock:
7904 mutex_unlock(&trace_types_lock);
12ecef0c 7905 mutex_unlock(&event_mutex);
277ba044
SR
7906
7907 return ret;
7908
7909}
7910
eae47358 7911static int instance_rmdir(const char *name)
0c8916c3
SR
7912{
7913 struct trace_array *tr;
7914 int found = 0;
7915 int ret;
37aea98b 7916 int i;
0c8916c3 7917
12ecef0c 7918 mutex_lock(&event_mutex);
0c8916c3
SR
7919 mutex_lock(&trace_types_lock);
7920
7921 ret = -ENODEV;
7922 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7923 if (tr->name && strcmp(tr->name, name) == 0) {
7924 found = 1;
7925 break;
7926 }
7927 }
7928 if (!found)
7929 goto out_unlock;
7930
a695cb58 7931 ret = -EBUSY;
cf6ab6d9 7932 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7933 goto out_unlock;
7934
0c8916c3
SR
7935 list_del(&tr->list);
7936
20550622
SRRH
7937 /* Disable all the flags that were enabled coming in */
7938 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7939 if ((1 << i) & ZEROED_TRACE_FLAGS)
7940 set_tracer_flag(tr, 1 << i, 0);
7941 }
7942
6b450d25 7943 tracing_set_nop(tr);
a0e6369e 7944 clear_ftrace_function_probes(tr);
0c8916c3 7945 event_trace_del_tracer(tr);
d879d0b8 7946 ftrace_clear_pids(tr);
591dffda 7947 ftrace_destroy_function_files(tr);
681a4a2f 7948 tracefs_remove_recursive(tr->dir);
a9fcaaac 7949 free_trace_buffers(tr);
0c8916c3 7950
37aea98b
SRRH
7951 for (i = 0; i < tr->nr_topts; i++) {
7952 kfree(tr->topts[i].topts);
7953 }
7954 kfree(tr->topts);
7955
db9108e0 7956 free_cpumask_var(tr->tracing_cpumask);
0c8916c3
SR
7957 kfree(tr->name);
7958 kfree(tr);
7959
7960 ret = 0;
7961
7962 out_unlock:
7963 mutex_unlock(&trace_types_lock);
12ecef0c 7964 mutex_unlock(&event_mutex);
0c8916c3
SR
7965
7966 return ret;
7967}
7968
277ba044
SR
7969static __init void create_trace_instances(struct dentry *d_tracer)
7970{
eae47358
SRRH
7971 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7972 instance_mkdir,
7973 instance_rmdir);
277ba044
SR
7974 if (WARN_ON(!trace_instance_dir))
7975 return;
277ba044
SR
7976}
7977
2b6080f2 7978static void
8434dc93 7979init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7980{
3dd80953 7981 struct trace_event_file *file;
121aaee7 7982 int cpu;
2b6080f2 7983
607e2ea1
SRRH
7984 trace_create_file("available_tracers", 0444, d_tracer,
7985 tr, &show_traces_fops);
7986
7987 trace_create_file("current_tracer", 0644, d_tracer,
7988 tr, &set_tracer_fops);
7989
ccfe9e42
AL
7990 trace_create_file("tracing_cpumask", 0644, d_tracer,
7991 tr, &tracing_cpumask_fops);
7992
2b6080f2
SR
7993 trace_create_file("trace_options", 0644, d_tracer,
7994 tr, &tracing_iter_fops);
7995
7996 trace_create_file("trace", 0644, d_tracer,
6484c71c 7997 tr, &tracing_fops);
2b6080f2
SR
7998
7999 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 8000 tr, &tracing_pipe_fops);
2b6080f2
SR
8001
8002 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 8003 tr, &tracing_entries_fops);
2b6080f2
SR
8004
8005 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8006 tr, &tracing_total_entries_fops);
8007
238ae93d 8008 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
8009 tr, &tracing_free_buffer_fops);
8010
8011 trace_create_file("trace_marker", 0220, d_tracer,
8012 tr, &tracing_mark_fops);
8013
3dd80953
SRV
8014 file = __find_event_file(tr, "ftrace", "print");
8015 if (file && file->dir)
8016 trace_create_file("trigger", 0644, file->dir, file,
8017 &event_trigger_fops);
8018 tr->trace_marker_file = file;
8019
fa32e855
SR
8020 trace_create_file("trace_marker_raw", 0220, d_tracer,
8021 tr, &tracing_mark_raw_fops);
8022
2b6080f2
SR
8023 trace_create_file("trace_clock", 0644, d_tracer, tr,
8024 &trace_clock_fops);
8025
8026 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 8027 tr, &rb_simple_fops);
ce9bae55 8028
2c1ea60b
TZ
8029 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8030 &trace_time_stamp_mode_fops);
8031
a7b1d74e 8032 tr->buffer_percent = 50;
03329f99
SRV
8033
8034 trace_create_file("buffer_percent", 0444, d_tracer,
8035 tr, &buffer_percent_fops);
8036
16270145
SRRH
8037 create_trace_options_dir(tr);
8038
f971cc9a 8039#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
8040 trace_create_file("tracing_max_latency", 0644, d_tracer,
8041 &tr->max_latency, &tracing_max_lat_fops);
8042#endif
8043
591dffda
SRRH
8044 if (ftrace_create_function_files(tr, d_tracer))
8045 WARN(1, "Could not allocate function filter files");
8046
ce9bae55
SRRH
8047#ifdef CONFIG_TRACER_SNAPSHOT
8048 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 8049 tr, &snapshot_fops);
ce9bae55 8050#endif
121aaee7
SRRH
8051
8052 for_each_tracing_cpu(cpu)
8434dc93 8053 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 8054
345ddcc8 8055 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
8056}
8057
93faccbb 8058static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
8059{
8060 struct vfsmount *mnt;
8061 struct file_system_type *type;
8062
8063 /*
8064 * To maintain backward compatibility for tools that mount
8065 * debugfs to get to the tracing facility, tracefs is automatically
8066 * mounted to the debugfs/tracing directory.
8067 */
8068 type = get_fs_type("tracefs");
8069 if (!type)
8070 return NULL;
93faccbb 8071 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
8072 put_filesystem(type);
8073 if (IS_ERR(mnt))
8074 return NULL;
8075 mntget(mnt);
8076
8077 return mnt;
8078}
8079
7eeafbca
SRRH
8080/**
8081 * tracing_init_dentry - initialize top level trace array
8082 *
8083 * This is called when creating files or directories in the tracing
8084 * directory. It is called via fs_initcall() by any of the boot up code
8085 * and expects to return the dentry of the top level tracing directory.
8086 */
8087struct dentry *tracing_init_dentry(void)
8088{
8089 struct trace_array *tr = &global_trace;
8090
f76180bc 8091 /* The top level trace array uses NULL as parent */
7eeafbca 8092 if (tr->dir)
f76180bc 8093 return NULL;
7eeafbca 8094
8b129199
JW
8095 if (WARN_ON(!tracefs_initialized()) ||
8096 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8097 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
8098 return ERR_PTR(-ENODEV);
8099
f76180bc
SRRH
8100 /*
8101 * As there may still be users that expect the tracing
8102 * files to exist in debugfs/tracing, we must automount
8103 * the tracefs file system there, so older tools still
8104 * work with the newer kerenl.
8105 */
8106 tr->dir = debugfs_create_automount("tracing", NULL,
8107 trace_automount, NULL);
7eeafbca
SRRH
8108 if (!tr->dir) {
8109 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8110 return ERR_PTR(-ENOMEM);
8111 }
8112
8434dc93 8113 return NULL;
7eeafbca
SRRH
8114}
8115
00f4b652
JL
8116extern struct trace_eval_map *__start_ftrace_eval_maps[];
8117extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 8118
5f60b351 8119static void __init trace_eval_init(void)
0c564a53 8120{
3673b8e4
SRRH
8121 int len;
8122
02fd7f68 8123 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 8124 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
8125}
8126
8127#ifdef CONFIG_MODULES
f57a4143 8128static void trace_module_add_evals(struct module *mod)
3673b8e4 8129{
99be647c 8130 if (!mod->num_trace_evals)
3673b8e4
SRRH
8131 return;
8132
8133 /*
8134 * Modules with bad taint do not have events created, do
8135 * not bother with enums either.
8136 */
8137 if (trace_module_has_bad_taint(mod))
8138 return;
8139
f57a4143 8140 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
8141}
8142
681bec03 8143#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 8144static void trace_module_remove_evals(struct module *mod)
9828413d 8145{
23bf8cb8
JL
8146 union trace_eval_map_item *map;
8147 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 8148
99be647c 8149 if (!mod->num_trace_evals)
9828413d
SRRH
8150 return;
8151
1793ed93 8152 mutex_lock(&trace_eval_mutex);
9828413d 8153
23bf8cb8 8154 map = trace_eval_maps;
9828413d
SRRH
8155
8156 while (map) {
8157 if (map->head.mod == mod)
8158 break;
5f60b351 8159 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
8160 last = &map->tail.next;
8161 map = map->tail.next;
8162 }
8163 if (!map)
8164 goto out;
8165
5f60b351 8166 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
8167 kfree(map);
8168 out:
1793ed93 8169 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
8170}
8171#else
f57a4143 8172static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 8173#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 8174
3673b8e4
SRRH
8175static int trace_module_notify(struct notifier_block *self,
8176 unsigned long val, void *data)
8177{
8178 struct module *mod = data;
8179
8180 switch (val) {
8181 case MODULE_STATE_COMING:
f57a4143 8182 trace_module_add_evals(mod);
3673b8e4 8183 break;
9828413d 8184 case MODULE_STATE_GOING:
f57a4143 8185 trace_module_remove_evals(mod);
9828413d 8186 break;
3673b8e4
SRRH
8187 }
8188
8189 return 0;
0c564a53
SRRH
8190}
8191
3673b8e4
SRRH
8192static struct notifier_block trace_module_nb = {
8193 .notifier_call = trace_module_notify,
8194 .priority = 0,
8195};
9828413d 8196#endif /* CONFIG_MODULES */
3673b8e4 8197
8434dc93 8198static __init int tracer_init_tracefs(void)
bc0c38d1
SR
8199{
8200 struct dentry *d_tracer;
bc0c38d1 8201
7e53bd42
LJ
8202 trace_access_lock_init();
8203
bc0c38d1 8204 d_tracer = tracing_init_dentry();
14a5ae40 8205 if (IS_ERR(d_tracer))
ed6f1c99 8206 return 0;
bc0c38d1 8207
58b92547
SRV
8208 event_trace_init();
8209
8434dc93 8210 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 8211 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 8212
5452af66 8213 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 8214 &global_trace, &tracing_thresh_fops);
a8259075 8215
339ae5d3 8216 trace_create_file("README", 0444, d_tracer,
5452af66
FW
8217 NULL, &tracing_readme_fops);
8218
69abe6a5
AP
8219 trace_create_file("saved_cmdlines", 0444, d_tracer,
8220 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 8221
939c7a4f
YY
8222 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8223 NULL, &tracing_saved_cmdlines_size_fops);
8224
99c621d7
MS
8225 trace_create_file("saved_tgids", 0444, d_tracer,
8226 NULL, &tracing_saved_tgids_fops);
8227
5f60b351 8228 trace_eval_init();
0c564a53 8229
f57a4143 8230 trace_create_eval_file(d_tracer);
9828413d 8231
3673b8e4
SRRH
8232#ifdef CONFIG_MODULES
8233 register_module_notifier(&trace_module_nb);
8234#endif
8235
bc0c38d1 8236#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
8237 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8238 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 8239#endif
b04cc6b1 8240
277ba044 8241 create_trace_instances(d_tracer);
5452af66 8242
37aea98b 8243 update_tracer_options(&global_trace);
09d23a1d 8244
b5ad384e 8245 return 0;
bc0c38d1
SR
8246}
8247
3f5a54e3
SR
8248static int trace_panic_handler(struct notifier_block *this,
8249 unsigned long event, void *unused)
8250{
944ac425 8251 if (ftrace_dump_on_oops)
cecbca96 8252 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8253 return NOTIFY_OK;
8254}
8255
8256static struct notifier_block trace_panic_notifier = {
8257 .notifier_call = trace_panic_handler,
8258 .next = NULL,
8259 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8260};
8261
8262static int trace_die_handler(struct notifier_block *self,
8263 unsigned long val,
8264 void *data)
8265{
8266 switch (val) {
8267 case DIE_OOPS:
944ac425 8268 if (ftrace_dump_on_oops)
cecbca96 8269 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8270 break;
8271 default:
8272 break;
8273 }
8274 return NOTIFY_OK;
8275}
8276
8277static struct notifier_block trace_die_notifier = {
8278 .notifier_call = trace_die_handler,
8279 .priority = 200
8280};
8281
8282/*
8283 * printk is set to max of 1024, we really don't need it that big.
8284 * Nothing should be printing 1000 characters anyway.
8285 */
8286#define TRACE_MAX_PRINT 1000
8287
8288/*
8289 * Define here KERN_TRACE so that we have one place to modify
8290 * it if we decide to change what log level the ftrace dump
8291 * should be at.
8292 */
428aee14 8293#define KERN_TRACE KERN_EMERG
3f5a54e3 8294
955b61e5 8295void
3f5a54e3
SR
8296trace_printk_seq(struct trace_seq *s)
8297{
8298 /* Probably should print a warning here. */
3a161d99
SRRH
8299 if (s->seq.len >= TRACE_MAX_PRINT)
8300 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 8301
820b75f6
SRRH
8302 /*
8303 * More paranoid code. Although the buffer size is set to
8304 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8305 * an extra layer of protection.
8306 */
8307 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8308 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
8309
8310 /* should be zero ended, but we are paranoid. */
3a161d99 8311 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
8312
8313 printk(KERN_TRACE "%s", s->buffer);
8314
f9520750 8315 trace_seq_init(s);
3f5a54e3
SR
8316}
8317
955b61e5
JW
8318void trace_init_global_iter(struct trace_iterator *iter)
8319{
8320 iter->tr = &global_trace;
2b6080f2 8321 iter->trace = iter->tr->current_trace;
ae3b5093 8322 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 8323 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
8324
8325 if (iter->trace && iter->trace->open)
8326 iter->trace->open(iter);
8327
8328 /* Annotate start of buffers if we had overruns */
8329 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8330 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8331
8332 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8333 if (trace_clocks[iter->tr->clock_id].in_ns)
8334 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
8335}
8336
7fe70b57 8337void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 8338{
3f5a54e3
SR
8339 /* use static because iter can be a bit big for the stack */
8340 static struct trace_iterator iter;
7fe70b57 8341 static atomic_t dump_running;
983f938a 8342 struct trace_array *tr = &global_trace;
cf586b61 8343 unsigned int old_userobj;
d769041f
SR
8344 unsigned long flags;
8345 int cnt = 0, cpu;
3f5a54e3 8346
7fe70b57
SRRH
8347 /* Only allow one dump user at a time. */
8348 if (atomic_inc_return(&dump_running) != 1) {
8349 atomic_dec(&dump_running);
8350 return;
8351 }
3f5a54e3 8352
7fe70b57
SRRH
8353 /*
8354 * Always turn off tracing when we dump.
8355 * We don't need to show trace output of what happens
8356 * between multiple crashes.
8357 *
8358 * If the user does a sysrq-z, then they can re-enable
8359 * tracing with echo 1 > tracing_on.
8360 */
0ee6b6cf 8361 tracing_off();
cf586b61 8362
7fe70b57 8363 local_irq_save(flags);
03fc7f9c 8364 printk_nmi_direct_enter();
3f5a54e3 8365
38dbe0b1 8366 /* Simulate the iterator */
955b61e5
JW
8367 trace_init_global_iter(&iter);
8368
d769041f 8369 for_each_tracing_cpu(cpu) {
5e2d5ef8 8370 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
8371 }
8372
983f938a 8373 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 8374
b54d3de9 8375 /* don't look at user memory in panic mode */
983f938a 8376 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 8377
cecbca96
FW
8378 switch (oops_dump_mode) {
8379 case DUMP_ALL:
ae3b5093 8380 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8381 break;
8382 case DUMP_ORIG:
8383 iter.cpu_file = raw_smp_processor_id();
8384 break;
8385 case DUMP_NONE:
8386 goto out_enable;
8387 default:
8388 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 8389 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8390 }
8391
8392 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 8393
7fe70b57
SRRH
8394 /* Did function tracer already get disabled? */
8395 if (ftrace_is_dead()) {
8396 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8397 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8398 }
8399
3f5a54e3
SR
8400 /*
8401 * We need to stop all tracing on all CPUS to read the
8402 * the next buffer. This is a bit expensive, but is
8403 * not done often. We fill all what we can read,
8404 * and then release the locks again.
8405 */
8406
3f5a54e3
SR
8407 while (!trace_empty(&iter)) {
8408
8409 if (!cnt)
8410 printk(KERN_TRACE "---------------------------------\n");
8411
8412 cnt++;
8413
8414 /* reset all but tr, trace, and overruns */
8415 memset(&iter.seq, 0,
8416 sizeof(struct trace_iterator) -
8417 offsetof(struct trace_iterator, seq));
8418 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8419 iter.pos = -1;
8420
955b61e5 8421 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8422 int ret;
8423
8424 ret = print_trace_line(&iter);
8425 if (ret != TRACE_TYPE_NO_CONSUME)
8426 trace_consume(&iter);
3f5a54e3 8427 }
b892e5c8 8428 touch_nmi_watchdog();
3f5a54e3
SR
8429
8430 trace_printk_seq(&iter.seq);
8431 }
8432
8433 if (!cnt)
8434 printk(KERN_TRACE " (ftrace buffer empty)\n");
8435 else
8436 printk(KERN_TRACE "---------------------------------\n");
8437
cecbca96 8438 out_enable:
983f938a 8439 tr->trace_flags |= old_userobj;
cf586b61 8440
7fe70b57
SRRH
8441 for_each_tracing_cpu(cpu) {
8442 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8443 }
03fc7f9c
PM
8444 atomic_dec(&dump_running);
8445 printk_nmi_direct_exit();
cd891ae0 8446 local_irq_restore(flags);
3f5a54e3 8447}
a8eecf22 8448EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8449
7e465baa
TZ
8450int trace_run_command(const char *buf, int (*createfn)(int, char **))
8451{
8452 char **argv;
8453 int argc, ret;
8454
8455 argc = 0;
8456 ret = 0;
8457 argv = argv_split(GFP_KERNEL, buf, &argc);
8458 if (!argv)
8459 return -ENOMEM;
8460
8461 if (argc)
8462 ret = createfn(argc, argv);
8463
8464 argv_free(argv);
8465
8466 return ret;
8467}
8468
8469#define WRITE_BUFSIZE 4096
8470
8471ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8472 size_t count, loff_t *ppos,
8473 int (*createfn)(int, char **))
8474{
8475 char *kbuf, *buf, *tmp;
8476 int ret = 0;
8477 size_t done = 0;
8478 size_t size;
8479
8480 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8481 if (!kbuf)
8482 return -ENOMEM;
8483
8484 while (done < count) {
8485 size = count - done;
8486
8487 if (size >= WRITE_BUFSIZE)
8488 size = WRITE_BUFSIZE - 1;
8489
8490 if (copy_from_user(kbuf, buffer + done, size)) {
8491 ret = -EFAULT;
8492 goto out;
8493 }
8494 kbuf[size] = '\0';
8495 buf = kbuf;
8496 do {
8497 tmp = strchr(buf, '\n');
8498 if (tmp) {
8499 *tmp = '\0';
8500 size = tmp - buf + 1;
8501 } else {
8502 size = strlen(buf);
8503 if (done + size < count) {
8504 if (buf != kbuf)
8505 break;
8506 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8507 pr_warn("Line length is too long: Should be less than %d\n",
8508 WRITE_BUFSIZE - 2);
8509 ret = -EINVAL;
8510 goto out;
8511 }
8512 }
8513 done += size;
8514
8515 /* Remove comments */
8516 tmp = strchr(buf, '#');
8517
8518 if (tmp)
8519 *tmp = '\0';
8520
8521 ret = trace_run_command(buf, createfn);
8522 if (ret)
8523 goto out;
8524 buf += size;
8525
8526 } while (done < count);
8527 }
8528 ret = done;
8529
8530out:
8531 kfree(kbuf);
8532
8533 return ret;
8534}
8535
3928a8a2 8536__init static int tracer_alloc_buffers(void)
bc0c38d1 8537{
73c5162a 8538 int ring_buf_size;
9e01c1b7 8539 int ret = -ENOMEM;
4c11d7ae 8540
b5e87c05
SRRH
8541 /*
8542 * Make sure we don't accidently add more trace options
8543 * than we have bits for.
8544 */
9a38a885 8545 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8546
9e01c1b7
RR
8547 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8548 goto out;
8549
ccfe9e42 8550 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8551 goto out_free_buffer_mask;
4c11d7ae 8552
07d777fe
SR
8553 /* Only allocate trace_printk buffers if a trace_printk exists */
8554 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8555 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8556 trace_printk_init_buffers();
8557
73c5162a
SR
8558 /* To save memory, keep the ring buffer size to its minimum */
8559 if (ring_buffer_expanded)
8560 ring_buf_size = trace_buf_size;
8561 else
8562 ring_buf_size = 1;
8563
9e01c1b7 8564 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8565 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8566
2b6080f2
SR
8567 raw_spin_lock_init(&global_trace.start_lock);
8568
b32614c0
SAS
8569 /*
8570 * The prepare callbacks allocates some memory for the ring buffer. We
8571 * don't free the buffer if the if the CPU goes down. If we were to free
8572 * the buffer, then the user would lose any trace that was in the
8573 * buffer. The memory will be removed once the "instance" is removed.
8574 */
8575 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8576 "trace/RB:preapre", trace_rb_cpu_prepare,
8577 NULL);
8578 if (ret < 0)
8579 goto out_free_cpumask;
2c4a33ab 8580 /* Used for event triggers */
147d88e0 8581 ret = -ENOMEM;
2c4a33ab
SRRH
8582 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8583 if (!temp_buffer)
b32614c0 8584 goto out_rm_hp_state;
2c4a33ab 8585
939c7a4f
YY
8586 if (trace_create_savedcmd() < 0)
8587 goto out_free_temp_buffer;
8588
9e01c1b7 8589 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8590 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8591 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8592 WARN_ON(1);
939c7a4f 8593 goto out_free_savedcmd;
4c11d7ae 8594 }
a7603ff4 8595
499e5470
SR
8596 if (global_trace.buffer_disabled)
8597 tracing_off();
4c11d7ae 8598
e1e232ca
SR
8599 if (trace_boot_clock) {
8600 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8601 if (ret < 0)
a395d6a7
JP
8602 pr_warn("Trace clock %s not defined, going back to default\n",
8603 trace_boot_clock);
e1e232ca
SR
8604 }
8605
ca164318
SRRH
8606 /*
8607 * register_tracer() might reference current_trace, so it
8608 * needs to be set before we register anything. This is
8609 * just a bootstrap of current_trace anyway.
8610 */
2b6080f2
SR
8611 global_trace.current_trace = &nop_trace;
8612
0b9b12c1
SRRH
8613 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8614
4104d326
SRRH
8615 ftrace_init_global_array_ops(&global_trace);
8616
9a38a885
SRRH
8617 init_trace_flags_index(&global_trace);
8618
ca164318
SRRH
8619 register_tracer(&nop_trace);
8620
dbeafd0d
SRV
8621 /* Function tracing may start here (via kernel command line) */
8622 init_function_trace();
8623
60a11774
SR
8624 /* All seems OK, enable tracing */
8625 tracing_disabled = 0;
3928a8a2 8626
3f5a54e3
SR
8627 atomic_notifier_chain_register(&panic_notifier_list,
8628 &trace_panic_notifier);
8629
8630 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8631
ae63b31e
SR
8632 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8633
8634 INIT_LIST_HEAD(&global_trace.systems);
8635 INIT_LIST_HEAD(&global_trace.events);
067fe038 8636 INIT_LIST_HEAD(&global_trace.hist_vars);
ae63b31e
SR
8637 list_add(&global_trace.list, &ftrace_trace_arrays);
8638
a4d1e688 8639 apply_trace_boot_options();
7bcfaf54 8640
77fd5c15
SRRH
8641 register_snapshot_cmd();
8642
2fc1dfbe 8643 return 0;
3f5a54e3 8644
939c7a4f
YY
8645out_free_savedcmd:
8646 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8647out_free_temp_buffer:
8648 ring_buffer_free(temp_buffer);
b32614c0
SAS
8649out_rm_hp_state:
8650 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8651out_free_cpumask:
ccfe9e42 8652 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8653out_free_buffer_mask:
8654 free_cpumask_var(tracing_buffer_mask);
8655out:
8656 return ret;
bc0c38d1 8657}
b2821ae6 8658
e725c731 8659void __init early_trace_init(void)
5f893b26 8660{
0daa2302
SRRH
8661 if (tracepoint_printk) {
8662 tracepoint_print_iter =
8663 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8664 if (WARN_ON(!tracepoint_print_iter))
8665 tracepoint_printk = 0;
42391745
SRRH
8666 else
8667 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8668 }
5f893b26 8669 tracer_alloc_buffers();
e725c731
SRV
8670}
8671
8672void __init trace_init(void)
8673{
0c564a53 8674 trace_event_init();
5f893b26
SRRH
8675}
8676
b2821ae6
SR
8677__init static int clear_boot_tracer(void)
8678{
8679 /*
8680 * The default tracer at boot buffer is an init section.
8681 * This function is called in lateinit. If we did not
8682 * find the boot tracer, then clear it out, to prevent
8683 * later registration from accessing the buffer that is
8684 * about to be freed.
8685 */
8686 if (!default_bootup_tracer)
8687 return 0;
8688
8689 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8690 default_bootup_tracer);
8691 default_bootup_tracer = NULL;
8692
8693 return 0;
8694}
8695
8434dc93 8696fs_initcall(tracer_init_tracefs);
4bb0f0e7 8697late_initcall_sync(clear_boot_tracer);
3fd49c9e
CW
8698
8699#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8700__init static int tracing_set_default_clock(void)
8701{
8702 /* sched_clock_stable() is determined in late_initcall */
5125eee4 8703 if (!trace_boot_clock && !sched_clock_stable()) {
3fd49c9e
CW
8704 printk(KERN_WARNING
8705 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8706 "If you want to keep using the local clock, then add:\n"
8707 " \"trace_clock=local\"\n"
8708 "on the kernel command line\n");
8709 tracing_set_clock(&global_trace, "global");
8710 }
8711
8712 return 0;
8713}
8714late_initcall_sync(tracing_set_default_clock);
8715#endif