]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace.c
reset: remove remaining WARN_ON() in <linux/reset.h>
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
76c813e2 28#include <linux/vmalloc.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
478409dd 43#include <linux/trace.h>
8bd75c77 44#include <linux/sched/rt.h>
86387f7e 45
bc0c38d1 46#include "trace.h"
f0868d1e 47#include "trace_output.h"
bc0c38d1 48
73c5162a
SR
49/*
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
55034cd6 53bool ring_buffer_expanded;
73c5162a 54
8e1b82e0
FW
55/*
56 * We need to change this state when a selftest is running.
ff32504f
FW
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
5e1607a0 59 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
60 * at the same time, giving false positive or negative results.
61 */
8e1b82e0 62static bool __read_mostly tracing_selftest_running;
ff32504f 63
b2821ae6
SR
64/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
020e5f85 67bool __read_mostly tracing_selftest_disabled;
b2821ae6 68
0daa2302
SRRH
69/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
42391745 72static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 73
adf9f195
FW
74/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
8c1a49ae
SRRH
79static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
81{
82 return 0;
83}
0f048701 84
7ffbd48d
SR
85/*
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
d914ba37 90static DEFINE_PER_CPU(bool, trace_taskinfo_save);
7ffbd48d 91
0f048701
SR
92/*
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
4fd27358 98static int tracing_disabled = 1;
0f048701 99
955b61e5 100cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 101
944ac425
SR
102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 116 */
cecbca96
FW
117
118enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 119
de7edd31
SRRH
120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
681bec03
JL
123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
23bf8cb8 125struct trace_eval_map_head {
9828413d
SRRH
126 struct module *mod;
127 unsigned long length;
128};
129
23bf8cb8 130union trace_eval_map_item;
9828413d 131
23bf8cb8 132struct trace_eval_map_tail {
9828413d
SRRH
133 /*
134 * "end" is first and points to NULL as it must be different
00f4b652 135 * than "mod" or "eval_string"
9828413d 136 */
23bf8cb8 137 union trace_eval_map_item *next;
9828413d
SRRH
138 const char *end; /* points to NULL */
139};
140
1793ed93 141static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
142
143/*
23bf8cb8 144 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
681bec03 148 * pointer to the next array of saved eval_map items.
9828413d 149 */
23bf8cb8 150union trace_eval_map_item {
00f4b652 151 struct trace_eval_map map;
23bf8cb8
JL
152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
9828413d
SRRH
154};
155
23bf8cb8 156static union trace_eval_map_item *trace_eval_maps;
681bec03 157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 158
607e2ea1 159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 160
ee6c2c1b
LZ
161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 163static char *default_bootup_tracer;
d9e54076 164
55034cd6
SRRH
165static bool allocate_snapshot;
166
1beee96b 167static int __init set_cmdline_ftrace(char *str)
d9e54076 168{
67012ab1 169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 170 default_bootup_tracer = bootup_tracer_buf;
73c5162a 171 /* We are using ftrace early, expand it */
55034cd6 172 ring_buffer_expanded = true;
d9e54076
PZ
173 return 1;
174}
1beee96b 175__setup("ftrace=", set_cmdline_ftrace);
d9e54076 176
944ac425
SR
177static int __init set_ftrace_dump_on_oops(char *str)
178{
cecbca96
FW
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
944ac425
SR
190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 192
de7edd31
SRRH
193static int __init stop_trace_on_warning(char *str)
194{
933ff9f2
LCG
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
de7edd31
SRRH
197 return 1;
198}
933ff9f2 199__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 200
3209cff4 201static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
3209cff4 208__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 209
7bcfaf54
SR
210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
212
213static int __init set_trace_boot_options(char *str)
214{
67012ab1 215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
e1e232ca
SR
220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
0daa2302
SRRH
231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
de7edd31 238
a5a1d1c2 239unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
983f938a
SRRH
246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
16270145
SRRH
254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
20550622
SRRH
258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
1e10486f 260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 261
4fcdae83 262/*
67d04bb2
JF
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
4fcdae83 265 */
983f938a
SRRH
266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
bc0c38d1 269
ae63b31e 270LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 271
ff451961
SRRH
272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
2425bcb9 303int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
0fc1b09f 309 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
310 return 1;
311 }
312
313 return 0;
eb02ce01
TZ
314}
315
76c813e2
SRRH
316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
d8275c45
SR
322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
5a93bae2 365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
d8275c45
SR
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
5cc8976b
SRRH
400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
76c813e2
SRRH
471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
67f20b08
WY
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
76c813e2 516 set_bit(pid, pid_list->pids);
76c813e2
SRRH
517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
a5a1d1c2 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
9457158b 573 if (!buf->buffer)
37886f6a
SR
574 return trace_clock_local();
575
9457158b
AL
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
578
579 return ts;
580}
bc0c38d1 581
a5a1d1c2 582u64 ftrace_now(int cpu)
9457158b
AL
583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
10246fa3
SRRH
587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
9036990d
SR
596int tracing_is_enabled(void)
597{
10246fa3
SRRH
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
9036990d
SR
605}
606
4fcdae83 607/*
3928a8a2
SR
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
3f5a54e3
SR
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
4fcdae83 616 */
3928a8a2 617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 618
3928a8a2 619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 620
4fcdae83 621/* trace_types holds a link list of available tracers. */
bc0c38d1 622static struct tracer *trace_types __read_mostly;
4fcdae83 623
4fcdae83
SR
624/*
625 * trace_types_lock is used to protect the trace_types list.
4fcdae83 626 */
a8227415 627DEFINE_MUTEX(trace_types_lock);
4fcdae83 628
7e53bd42
LJ
629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
ae3b5093 657 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
ae3b5093 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
ae3b5093 673 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
d78a4614
SRRH
711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
73dddbb5
SRRH
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
ca475e83 719
d78a4614
SRRH
720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
2d34f489
SRRH
726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
73dddbb5
SRRH
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
730{
731}
732
d78a4614
SRRH
733#endif
734
3e9a8aad
SRRH
735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
2290f2c5 760void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
499e5470
SR
777/**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
10246fa3 785 tracer_tracing_on(&global_trace);
499e5470
SR
786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
52ffabe3
SRRH
789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
d914ba37 793 __this_cpu_write(trace_taskinfo_save, true);
52ffabe3
SRRH
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
09ae7234
SRRH
805/**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
8abfb872
J
818 int pc;
819
983f938a 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
821 return 0;
822
8abfb872 823 pc = preempt_count();
09ae7234 824
3132e107
SRRH
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
09ae7234
SRRH
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
09ae7234
SRRH
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
2d34f489 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
8abfb872
J
868 int pc;
869
983f938a 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
871 return 0;
872
8abfb872 873 pc = preempt_count();
09ae7234 874
3132e107
SRRH
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
09ae7234
SRRH
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
09ae7234
SRRH
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
2d34f489 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
ad909e21 896#ifdef CONFIG_TRACER_SNAPSHOT
9ccd9a81 897void tracing_snapshot_instance(struct trace_array *tr)
ad909e21 898{
ad909e21
SRRH
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
1b22e382
SRRH
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
ad909e21 908 if (!tr->allocated_snapshot) {
ca268da6
SRRH
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
ca268da6
SRRH
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
cab50379
SRV
926
927/**
5a93bae2 928 * tracing_snapshot - take a snapshot of the current buffer.
cab50379
SRV
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
1b22e382 947EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
9ccd9a81 953int tracing_alloc_snapshot_instance(struct trace_array *tr)
3209cff4
SRRH
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
ad1438a0 971static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
ad909e21 983
93e31ffb
TZ
984/**
985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
9ccd9a81 999 ret = tracing_alloc_snapshot_instance(tr);
93e31ffb
TZ
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
ad909e21 1006/**
5a93bae2 1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
ad909e21 1008 *
5a93bae2 1009 * This is similar to tracing_snapshot(), but it will allocate the
ad909e21
SRRH
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
ad909e21
SRRH
1019 int ret;
1020
93e31ffb
TZ
1021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
3209cff4 1023 return;
ad909e21
SRRH
1024
1025 tracing_snapshot();
1026}
1b22e382 1027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
1b22e382 1033EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
1b22e382 1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
2290f2c5 1048void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
499e5470
SR
1065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
10246fa3 1075 tracer_tracing_off(&global_trace);
499e5470
SR
1076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
de7edd31
SRRH
1079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
10246fa3
SRRH
1085/**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
e7c15cd8 1091int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
499e5470
SR
1098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
10246fa3 1103 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
3928a8a2 1107static int __init set_buf_size(char *str)
bc0c38d1 1108{
3928a8a2 1109 unsigned long buf_size;
c6caeeb1 1110
bc0c38d1
SR
1111 if (!str)
1112 return 0;
9d612bef 1113 buf_size = memparse(str, &str);
c6caeeb1 1114 /* nr_entries can not be zero */
9d612bef 1115 if (buf_size == 0)
c6caeeb1 1116 return 0;
3928a8a2 1117 trace_buf_size = buf_size;
bc0c38d1
SR
1118 return 1;
1119}
3928a8a2 1120__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1121
0e950173
TB
1122static int __init set_tracing_thresh(char *str)
1123{
87abb3b1 1124 unsigned long threshold;
0e950173
TB
1125 int ret;
1126
1127 if (!str)
1128 return 0;
bcd83ea6 1129 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1130 if (ret < 0)
1131 return 0;
87abb3b1 1132 tracing_thresh = threshold * 1000;
0e950173
TB
1133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
57f50be1
SR
1137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
a3418a36
SRRH
1142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1146 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1147 */
1148#undef C
1149#define C(a, b) b
1150
4fcdae83 1151/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1152static const char *trace_options[] = {
a3418a36 1153 TRACE_FLAGS
bc0c38d1
SR
1154 NULL
1155};
1156
5079f326
Z
1157static struct {
1158 u64 (*func)(void);
1159 const char *name;
8be0709f 1160 int in_ns; /* is this clock in nanoseconds? */
5079f326 1161} trace_clocks[] = {
1b3e5c09
TG
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
e7fda6c4 1165 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
80ec3552 1169 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1170 ARCH_TRACE_CLOCKS
5079f326
Z
1171};
1172
b63f39ea 1173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178 memset(parser, 0, sizeof(*parser));
1179
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1182 return 1;
1183
1184 parser->size = size;
1185 return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193 kfree(parser->buffer);
0e684b65 1194 parser->buffer = NULL;
b63f39ea 1195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
3c235a33 1251 if (parser->idx < parser->size - 1)
b63f39ea 1252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
057db848 1268 } else if (parser->idx < parser->size - 1) {
b63f39ea 1269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
057db848
SR
1271 } else {
1272 ret = -EINVAL;
1273 goto out;
b63f39ea 1274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
3a161d99 1283/* TODO add a seq_buf_to_buffer() */
b8b94265 1284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1285{
1286 int len;
3c56819b 1287
5ac48378 1288 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1289 return -EBUSY;
1290
5ac48378 1291 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1292 if (cnt > len)
1293 cnt = len;
3a161d99 1294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1295
3a161d99 1296 s->seq.readpos += cnt;
3c56819b
EGM
1297 return cnt;
1298}
1299
0e950173
TB
1300unsigned long __read_mostly tracing_thresh;
1301
5d4a9dba 1302#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
5a93bae2 1306 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
5d4a9dba
SR
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
12883efb
SRRH
1311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1315
12883efb
SRRH
1316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1318
6d9b3fa5 1319 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
5d4a9dba 1322
1acaa1b2 1323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1324 max_data->pid = tsk->pid;
f17a5194
SRRH
1325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
8248ac05
SR
1334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
4fcdae83
SR
1342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
e309b41d 1351void
bc0c38d1
SR
1352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
2721e72d 1354 struct ring_buffer *buf;
bc0c38d1 1355
2b6080f2 1356 if (tr->stop_count)
b8de7bd1
SR
1357 return;
1358
4c11d7ae 1359 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1360
45ad21ca 1361 if (!tr->allocated_snapshot) {
debdd57f 1362 /* Only the nop tracer should hit this when disabling */
2b6080f2 1363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1364 return;
debdd57f 1365 }
34600f0e 1366
0b9b12c1 1367 arch_spin_lock(&tr->max_lock);
3928a8a2 1368
1f1cb23b
MH
1369 /* Inherit the recordable setting from trace_buffer */
1370 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1371 ring_buffer_record_on(tr->max_buffer.buffer);
1372 else
1373 ring_buffer_record_off(tr->max_buffer.buffer);
1374
12883efb
SRRH
1375 buf = tr->trace_buffer.buffer;
1376 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1377 tr->max_buffer.buffer = buf;
3928a8a2 1378
bc0c38d1 1379 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1380 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1381}
1382
1383/**
1384 * update_max_tr_single - only copy one trace over, and reset the rest
1385 * @tr - tracer
1386 * @tsk - task with the latency
1387 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1388 *
1389 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1390 */
e309b41d 1391void
bc0c38d1
SR
1392update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1393{
3928a8a2 1394 int ret;
bc0c38d1 1395
2b6080f2 1396 if (tr->stop_count)
b8de7bd1
SR
1397 return;
1398
4c11d7ae 1399 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1400 if (!tr->allocated_snapshot) {
2930e04d 1401 /* Only the nop tracer should hit this when disabling */
9e8529af 1402 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1403 return;
2930e04d 1404 }
ef710e10 1405
0b9b12c1 1406 arch_spin_lock(&tr->max_lock);
bc0c38d1 1407
12883efb 1408 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1409
e8165dbb
SR
1410 if (ret == -EBUSY) {
1411 /*
1412 * We failed to swap the buffer due to a commit taking
1413 * place on this CPU. We fail to record, but we reset
1414 * the max trace buffer (no one writes directly to it)
1415 * and flag that it failed.
1416 */
12883efb 1417 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1418 "Failed to swap buffers due to commit in progress\n");
1419 }
1420
e8165dbb 1421 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1422
1423 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1424 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1425}
5d4a9dba 1426#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1427
e30f53aa 1428static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1429{
15693458
SRRH
1430 /* Iterators are static, they should be filled or empty */
1431 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1432 return 0;
0d5c6e1c 1433
e30f53aa
RV
1434 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1435 full);
0d5c6e1c
SR
1436}
1437
f4e781c0 1438#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1439static bool selftests_can_run;
1440
1441struct trace_selftests {
1442 struct list_head list;
1443 struct tracer *type;
1444};
1445
1446static LIST_HEAD(postponed_selftests);
1447
1448static int save_selftest(struct tracer *type)
1449{
1450 struct trace_selftests *selftest;
1451
1452 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1453 if (!selftest)
1454 return -ENOMEM;
1455
1456 selftest->type = type;
1457 list_add(&selftest->list, &postponed_selftests);
1458 return 0;
1459}
1460
f4e781c0
SRRH
1461static int run_tracer_selftest(struct tracer *type)
1462{
1463 struct trace_array *tr = &global_trace;
1464 struct tracer *saved_tracer = tr->current_trace;
1465 int ret;
0d5c6e1c 1466
f4e781c0
SRRH
1467 if (!type->selftest || tracing_selftest_disabled)
1468 return 0;
0d5c6e1c 1469
9afecfbb
SRV
1470 /*
1471 * If a tracer registers early in boot up (before scheduling is
1472 * initialized and such), then do not run its selftests yet.
1473 * Instead, run it a little later in the boot process.
1474 */
1475 if (!selftests_can_run)
1476 return save_selftest(type);
1477
0d5c6e1c 1478 /*
f4e781c0
SRRH
1479 * Run a selftest on this tracer.
1480 * Here we reset the trace buffer, and set the current
1481 * tracer to be this tracer. The tracer can then run some
1482 * internal tracing to verify that everything is in order.
1483 * If we fail, we do not register this tracer.
0d5c6e1c 1484 */
f4e781c0 1485 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1486
f4e781c0
SRRH
1487 tr->current_trace = type;
1488
1489#ifdef CONFIG_TRACER_MAX_TRACE
1490 if (type->use_max_tr) {
1491 /* If we expanded the buffers, make sure the max is expanded too */
1492 if (ring_buffer_expanded)
1493 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1494 RING_BUFFER_ALL_CPUS);
1495 tr->allocated_snapshot = true;
1496 }
1497#endif
1498
1499 /* the test is responsible for initializing and enabling */
1500 pr_info("Testing tracer %s: ", type->name);
1501 ret = type->selftest(type, tr);
1502 /* the test is responsible for resetting too */
1503 tr->current_trace = saved_tracer;
1504 if (ret) {
1505 printk(KERN_CONT "FAILED!\n");
1506 /* Add the warning after printing 'FAILED' */
1507 WARN_ON(1);
1508 return -1;
1509 }
1510 /* Only reset on passing, to avoid touching corrupted buffers */
1511 tracing_reset_online_cpus(&tr->trace_buffer);
1512
1513#ifdef CONFIG_TRACER_MAX_TRACE
1514 if (type->use_max_tr) {
1515 tr->allocated_snapshot = false;
0d5c6e1c 1516
f4e781c0
SRRH
1517 /* Shrink the max buffer again */
1518 if (ring_buffer_expanded)
1519 ring_buffer_resize(tr->max_buffer.buffer, 1,
1520 RING_BUFFER_ALL_CPUS);
1521 }
1522#endif
1523
1524 printk(KERN_CONT "PASSED\n");
1525 return 0;
1526}
9afecfbb
SRV
1527
1528static __init int init_trace_selftests(void)
1529{
1530 struct trace_selftests *p, *n;
1531 struct tracer *t, **last;
1532 int ret;
1533
1534 selftests_can_run = true;
1535
1536 mutex_lock(&trace_types_lock);
1537
1538 if (list_empty(&postponed_selftests))
1539 goto out;
1540
1541 pr_info("Running postponed tracer tests:\n");
1542
1543 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1544 ret = run_tracer_selftest(p->type);
1545 /* If the test fails, then warn and remove from available_tracers */
1546 if (ret < 0) {
1547 WARN(1, "tracer: %s failed selftest, disabling\n",
1548 p->type->name);
1549 last = &trace_types;
1550 for (t = trace_types; t; t = t->next) {
1551 if (t == p->type) {
1552 *last = t->next;
1553 break;
1554 }
1555 last = &t->next;
1556 }
1557 }
1558 list_del(&p->list);
1559 kfree(p);
1560 }
1561
1562 out:
1563 mutex_unlock(&trace_types_lock);
1564
1565 return 0;
1566}
b9ef0326 1567core_initcall(init_trace_selftests);
f4e781c0
SRRH
1568#else
1569static inline int run_tracer_selftest(struct tracer *type)
1570{
1571 return 0;
0d5c6e1c 1572}
f4e781c0 1573#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1574
41d9c0be
SRRH
1575static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1576
a4d1e688
JW
1577static void __init apply_trace_boot_options(void);
1578
4fcdae83
SR
1579/**
1580 * register_tracer - register a tracer with the ftrace system.
1581 * @type - the plugin for the tracer
1582 *
1583 * Register a new plugin tracer.
1584 */
a4d1e688 1585int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1586{
1587 struct tracer *t;
bc0c38d1
SR
1588 int ret = 0;
1589
1590 if (!type->name) {
1591 pr_info("Tracer must have a name\n");
1592 return -1;
1593 }
1594
24a461d5 1595 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1596 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1597 return -1;
1598 }
1599
bc0c38d1 1600 mutex_lock(&trace_types_lock);
86fa2f60 1601
8e1b82e0
FW
1602 tracing_selftest_running = true;
1603
bc0c38d1
SR
1604 for (t = trace_types; t; t = t->next) {
1605 if (strcmp(type->name, t->name) == 0) {
1606 /* already found */
ee6c2c1b 1607 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1608 type->name);
1609 ret = -1;
1610 goto out;
1611 }
1612 }
1613
adf9f195
FW
1614 if (!type->set_flag)
1615 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1616 if (!type->flags) {
1617 /*allocate a dummy tracer_flags*/
1618 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1619 if (!type->flags) {
1620 ret = -ENOMEM;
1621 goto out;
1622 }
d39cdd20
CH
1623 type->flags->val = 0;
1624 type->flags->opts = dummy_tracer_opt;
1625 } else
adf9f195
FW
1626 if (!type->flags->opts)
1627 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1628
d39cdd20
CH
1629 /* store the tracer for __set_tracer_option */
1630 type->flags->trace = type;
1631
f4e781c0
SRRH
1632 ret = run_tracer_selftest(type);
1633 if (ret < 0)
1634 goto out;
60a11774 1635
bc0c38d1
SR
1636 type->next = trace_types;
1637 trace_types = type;
41d9c0be 1638 add_tracer_options(&global_trace, type);
60a11774 1639
bc0c38d1 1640 out:
8e1b82e0 1641 tracing_selftest_running = false;
bc0c38d1
SR
1642 mutex_unlock(&trace_types_lock);
1643
dac74940
SR
1644 if (ret || !default_bootup_tracer)
1645 goto out_unlock;
1646
ee6c2c1b 1647 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1648 goto out_unlock;
1649
1650 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1651 /* Do we want this tracer to start on bootup? */
607e2ea1 1652 tracing_set_tracer(&global_trace, type->name);
dac74940 1653 default_bootup_tracer = NULL;
a4d1e688
JW
1654
1655 apply_trace_boot_options();
1656
dac74940 1657 /* disable other selftests, since this will break it. */
55034cd6 1658 tracing_selftest_disabled = true;
b2821ae6 1659#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1660 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1661 type->name);
b2821ae6 1662#endif
b2821ae6 1663
dac74940 1664 out_unlock:
bc0c38d1
SR
1665 return ret;
1666}
1667
12883efb 1668void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1669{
12883efb 1670 struct ring_buffer *buffer = buf->buffer;
f633903a 1671
a5416411
HT
1672 if (!buffer)
1673 return;
1674
f633903a
SR
1675 ring_buffer_record_disable(buffer);
1676
1677 /* Make sure all commits have finished */
1678 synchronize_sched();
68179686 1679 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1680
1681 ring_buffer_record_enable(buffer);
1682}
1683
12883efb 1684void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1685{
12883efb 1686 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1687 int cpu;
1688
a5416411
HT
1689 if (!buffer)
1690 return;
1691
621968cd
SR
1692 ring_buffer_record_disable(buffer);
1693
1694 /* Make sure all commits have finished */
1695 synchronize_sched();
1696
9457158b 1697 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1698
1699 for_each_online_cpu(cpu)
68179686 1700 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1701
1702 ring_buffer_record_enable(buffer);
213cc060
PE
1703}
1704
09d8091c 1705/* Must have trace_types_lock held */
873c642f 1706void tracing_reset_all_online_cpus(void)
9456f0fa 1707{
873c642f
SRRH
1708 struct trace_array *tr;
1709
873c642f 1710 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
065e63f9
SRV
1711 if (!tr->clear_trace)
1712 continue;
1713 tr->clear_trace = false;
12883efb
SRRH
1714 tracing_reset_online_cpus(&tr->trace_buffer);
1715#ifdef CONFIG_TRACER_MAX_TRACE
1716 tracing_reset_online_cpus(&tr->max_buffer);
1717#endif
873c642f 1718 }
9456f0fa
SR
1719}
1720
d914ba37
JF
1721static int *tgid_map;
1722
939c7a4f 1723#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1724#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1725static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1726struct saved_cmdlines_buffer {
1727 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1728 unsigned *map_cmdline_to_pid;
1729 unsigned cmdline_num;
1730 int cmdline_idx;
1731 char *saved_cmdlines;
1732};
1733static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1734
25b0b44a 1735/* temporary disable recording */
d914ba37 1736static atomic_t trace_record_taskinfo_disabled __read_mostly;
bc0c38d1 1737
939c7a4f
YY
1738static inline char *get_saved_cmdlines(int idx)
1739{
1740 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1741}
1742
1743static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1744{
939c7a4f
YY
1745 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1746}
1747
1748static int allocate_cmdlines_buffer(unsigned int val,
1749 struct saved_cmdlines_buffer *s)
1750{
1751 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1752 GFP_KERNEL);
1753 if (!s->map_cmdline_to_pid)
1754 return -ENOMEM;
1755
1756 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1757 if (!s->saved_cmdlines) {
1758 kfree(s->map_cmdline_to_pid);
1759 return -ENOMEM;
1760 }
1761
1762 s->cmdline_idx = 0;
1763 s->cmdline_num = val;
1764 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1765 sizeof(s->map_pid_to_cmdline));
1766 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1767 val * sizeof(*s->map_cmdline_to_pid));
1768
1769 return 0;
1770}
1771
1772static int trace_create_savedcmd(void)
1773{
1774 int ret;
1775
a6af8fbf 1776 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1777 if (!savedcmd)
1778 return -ENOMEM;
1779
1780 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1781 if (ret < 0) {
1782 kfree(savedcmd);
1783 savedcmd = NULL;
1784 return -ENOMEM;
1785 }
1786
1787 return 0;
bc0c38d1
SR
1788}
1789
b5130b1e
CE
1790int is_tracing_stopped(void)
1791{
2b6080f2 1792 return global_trace.stop_count;
b5130b1e
CE
1793}
1794
0f048701
SR
1795/**
1796 * tracing_start - quick start of the tracer
1797 *
1798 * If tracing is enabled but was stopped by tracing_stop,
1799 * this will start the tracer back up.
1800 */
1801void tracing_start(void)
1802{
1803 struct ring_buffer *buffer;
1804 unsigned long flags;
1805
1806 if (tracing_disabled)
1807 return;
1808
2b6080f2
SR
1809 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1810 if (--global_trace.stop_count) {
1811 if (global_trace.stop_count < 0) {
b06a8301
SR
1812 /* Someone screwed up their debugging */
1813 WARN_ON_ONCE(1);
2b6080f2 1814 global_trace.stop_count = 0;
b06a8301 1815 }
0f048701
SR
1816 goto out;
1817 }
1818
a2f80714 1819 /* Prevent the buffers from switching */
0b9b12c1 1820 arch_spin_lock(&global_trace.max_lock);
0f048701 1821
12883efb 1822 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1823 if (buffer)
1824 ring_buffer_record_enable(buffer);
1825
12883efb
SRRH
1826#ifdef CONFIG_TRACER_MAX_TRACE
1827 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1828 if (buffer)
1829 ring_buffer_record_enable(buffer);
12883efb 1830#endif
0f048701 1831
0b9b12c1 1832 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1833
0f048701 1834 out:
2b6080f2
SR
1835 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1836}
1837
1838static void tracing_start_tr(struct trace_array *tr)
1839{
1840 struct ring_buffer *buffer;
1841 unsigned long flags;
1842
1843 if (tracing_disabled)
1844 return;
1845
1846 /* If global, we need to also start the max tracer */
1847 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1848 return tracing_start();
1849
1850 raw_spin_lock_irqsave(&tr->start_lock, flags);
1851
1852 if (--tr->stop_count) {
1853 if (tr->stop_count < 0) {
1854 /* Someone screwed up their debugging */
1855 WARN_ON_ONCE(1);
1856 tr->stop_count = 0;
1857 }
1858 goto out;
1859 }
1860
12883efb 1861 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1862 if (buffer)
1863 ring_buffer_record_enable(buffer);
1864
1865 out:
1866 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1867}
1868
1869/**
1870 * tracing_stop - quick stop of the tracer
1871 *
1872 * Light weight way to stop tracing. Use in conjunction with
1873 * tracing_start.
1874 */
1875void tracing_stop(void)
1876{
1877 struct ring_buffer *buffer;
1878 unsigned long flags;
1879
2b6080f2
SR
1880 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1881 if (global_trace.stop_count++)
0f048701
SR
1882 goto out;
1883
a2f80714 1884 /* Prevent the buffers from switching */
0b9b12c1 1885 arch_spin_lock(&global_trace.max_lock);
a2f80714 1886
12883efb 1887 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1888 if (buffer)
1889 ring_buffer_record_disable(buffer);
1890
12883efb
SRRH
1891#ifdef CONFIG_TRACER_MAX_TRACE
1892 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1893 if (buffer)
1894 ring_buffer_record_disable(buffer);
12883efb 1895#endif
0f048701 1896
0b9b12c1 1897 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1898
0f048701 1899 out:
2b6080f2
SR
1900 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1901}
1902
1903static void tracing_stop_tr(struct trace_array *tr)
1904{
1905 struct ring_buffer *buffer;
1906 unsigned long flags;
1907
1908 /* If global, we need to also stop the max tracer */
1909 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1910 return tracing_stop();
1911
1912 raw_spin_lock_irqsave(&tr->start_lock, flags);
1913 if (tr->stop_count++)
1914 goto out;
1915
12883efb 1916 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1917 if (buffer)
1918 ring_buffer_record_disable(buffer);
1919
1920 out:
1921 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1922}
1923
379cfdac 1924static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1925{
a635cf04 1926 unsigned pid, idx;
bc0c38d1 1927
eaf260ac
JF
1928 /* treat recording of idle task as a success */
1929 if (!tsk->pid)
1930 return 1;
1931
1932 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1933 return 0;
bc0c38d1
SR
1934
1935 /*
1936 * It's not the end of the world if we don't get
1937 * the lock, but we also don't want to spin
1938 * nor do we want to disable interrupts,
1939 * so if we miss here, then better luck next time.
1940 */
0199c4e6 1941 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1942 return 0;
bc0c38d1 1943
939c7a4f 1944 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1945 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1946 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1947
a635cf04
CE
1948 /*
1949 * Check whether the cmdline buffer at idx has a pid
1950 * mapped. We are going to overwrite that entry so we
1951 * need to clear the map_pid_to_cmdline. Otherwise we
1952 * would read the new comm for the old pid.
1953 */
939c7a4f 1954 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1955 if (pid != NO_CMDLINE_MAP)
939c7a4f 1956 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1957
939c7a4f
YY
1958 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1959 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1960
939c7a4f 1961 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1962 }
1963
939c7a4f 1964 set_cmdline(idx, tsk->comm);
bc0c38d1 1965
0199c4e6 1966 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1967
1968 return 1;
bc0c38d1
SR
1969}
1970
4c27e756 1971static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1972{
bc0c38d1
SR
1973 unsigned map;
1974
4ca53085
SR
1975 if (!pid) {
1976 strcpy(comm, "<idle>");
1977 return;
1978 }
bc0c38d1 1979
74bf4076
SR
1980 if (WARN_ON_ONCE(pid < 0)) {
1981 strcpy(comm, "<XXX>");
1982 return;
1983 }
1984
4ca53085
SR
1985 if (pid > PID_MAX_DEFAULT) {
1986 strcpy(comm, "<...>");
1987 return;
1988 }
bc0c38d1 1989
939c7a4f 1990 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1991 if (map != NO_CMDLINE_MAP)
e09e2867 1992 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
1993 else
1994 strcpy(comm, "<...>");
4c27e756
SRRH
1995}
1996
1997void trace_find_cmdline(int pid, char comm[])
1998{
1999 preempt_disable();
2000 arch_spin_lock(&trace_cmdline_lock);
2001
2002 __trace_find_cmdline(pid, comm);
bc0c38d1 2003
0199c4e6 2004 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 2005 preempt_enable();
bc0c38d1
SR
2006}
2007
d914ba37
JF
2008int trace_find_tgid(int pid)
2009{
2010 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2011 return 0;
2012
2013 return tgid_map[pid];
2014}
2015
2016static int trace_save_tgid(struct task_struct *tsk)
2017{
bd45d34d
JF
2018 /* treat recording of idle task as a success */
2019 if (!tsk->pid)
2020 return 1;
2021
2022 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
d914ba37
JF
2023 return 0;
2024
2025 tgid_map[tsk->pid] = tsk->tgid;
2026 return 1;
2027}
2028
2029static bool tracing_record_taskinfo_skip(int flags)
2030{
2031 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2032 return true;
2033 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2034 return true;
2035 if (!__this_cpu_read(trace_taskinfo_save))
2036 return true;
2037 return false;
2038}
2039
2040/**
2041 * tracing_record_taskinfo - record the task info of a task
2042 *
2043 * @task - task to record
2044 * @flags - TRACE_RECORD_CMDLINE for recording comm
2045 * - TRACE_RECORD_TGID for recording tgid
2046 */
2047void tracing_record_taskinfo(struct task_struct *task, int flags)
2048{
29b1a8ad
JF
2049 bool done;
2050
d914ba37
JF
2051 if (tracing_record_taskinfo_skip(flags))
2052 return;
29b1a8ad
JF
2053
2054 /*
2055 * Record as much task information as possible. If some fail, continue
2056 * to try to record the others.
2057 */
2058 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2059 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2060
2061 /* If recording any information failed, retry again soon. */
2062 if (!done)
d914ba37
JF
2063 return;
2064
2065 __this_cpu_write(trace_taskinfo_save, false);
2066}
2067
2068/**
2069 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2070 *
2071 * @prev - previous task during sched_switch
2072 * @next - next task during sched_switch
2073 * @flags - TRACE_RECORD_CMDLINE for recording comm
2074 * TRACE_RECORD_TGID for recording tgid
2075 */
2076void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2077 struct task_struct *next, int flags)
bc0c38d1 2078{
29b1a8ad
JF
2079 bool done;
2080
d914ba37
JF
2081 if (tracing_record_taskinfo_skip(flags))
2082 return;
2083
29b1a8ad
JF
2084 /*
2085 * Record as much task information as possible. If some fail, continue
2086 * to try to record the others.
2087 */
2088 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2089 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2090 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2091 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
bc0c38d1 2092
29b1a8ad
JF
2093 /* If recording any information failed, retry again soon. */
2094 if (!done)
7ffbd48d
SR
2095 return;
2096
d914ba37
JF
2097 __this_cpu_write(trace_taskinfo_save, false);
2098}
2099
2100/* Helpers to record a specific task information */
2101void tracing_record_cmdline(struct task_struct *task)
2102{
2103 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2104}
2105
2106void tracing_record_tgid(struct task_struct *task)
2107{
2108 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
bc0c38d1
SR
2109}
2110
af0009fc
SRV
2111/*
2112 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2113 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2114 * simplifies those functions and keeps them in sync.
2115 */
2116enum print_line_t trace_handle_return(struct trace_seq *s)
2117{
2118 return trace_seq_has_overflowed(s) ?
2119 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2120}
2121EXPORT_SYMBOL_GPL(trace_handle_return);
2122
45dcd8b8 2123void
38697053
SR
2124tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2125 int pc)
bc0c38d1
SR
2126{
2127 struct task_struct *tsk = current;
bc0c38d1 2128
777e208d
SR
2129 entry->preempt_count = pc & 0xff;
2130 entry->pid = (tsk) ? tsk->pid : 0;
2131 entry->flags =
9244489a 2132#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2133 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2134#else
2135 TRACE_FLAG_IRQS_NOSUPPORT |
2136#endif
7e6867bf 2137 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2138 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2139 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2140 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2141 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2142}
f413cdb8 2143EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2144
e77405ad
SR
2145struct ring_buffer_event *
2146trace_buffer_lock_reserve(struct ring_buffer *buffer,
2147 int type,
2148 unsigned long len,
2149 unsigned long flags, int pc)
51a763dd 2150{
3e9a8aad 2151 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2152}
2153
2154DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2155DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2156static int trace_buffered_event_ref;
2157
2158/**
2159 * trace_buffered_event_enable - enable buffering events
2160 *
2161 * When events are being filtered, it is quicker to use a temporary
2162 * buffer to write the event data into if there's a likely chance
2163 * that it will not be committed. The discard of the ring buffer
2164 * is not as fast as committing, and is much slower than copying
2165 * a commit.
2166 *
2167 * When an event is to be filtered, allocate per cpu buffers to
2168 * write the event data into, and if the event is filtered and discarded
2169 * it is simply dropped, otherwise, the entire data is to be committed
2170 * in one shot.
2171 */
2172void trace_buffered_event_enable(void)
2173{
2174 struct ring_buffer_event *event;
2175 struct page *page;
2176 int cpu;
51a763dd 2177
0fc1b09f
SRRH
2178 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2179
2180 if (trace_buffered_event_ref++)
2181 return;
2182
2183 for_each_tracing_cpu(cpu) {
2184 page = alloc_pages_node(cpu_to_node(cpu),
2185 GFP_KERNEL | __GFP_NORETRY, 0);
2186 if (!page)
2187 goto failed;
2188
2189 event = page_address(page);
2190 memset(event, 0, sizeof(*event));
2191
2192 per_cpu(trace_buffered_event, cpu) = event;
2193
2194 preempt_disable();
2195 if (cpu == smp_processor_id() &&
2196 this_cpu_read(trace_buffered_event) !=
2197 per_cpu(trace_buffered_event, cpu))
2198 WARN_ON_ONCE(1);
2199 preempt_enable();
51a763dd
ACM
2200 }
2201
0fc1b09f
SRRH
2202 return;
2203 failed:
2204 trace_buffered_event_disable();
2205}
2206
2207static void enable_trace_buffered_event(void *data)
2208{
2209 /* Probably not needed, but do it anyway */
2210 smp_rmb();
2211 this_cpu_dec(trace_buffered_event_cnt);
2212}
2213
2214static void disable_trace_buffered_event(void *data)
2215{
2216 this_cpu_inc(trace_buffered_event_cnt);
2217}
2218
2219/**
2220 * trace_buffered_event_disable - disable buffering events
2221 *
2222 * When a filter is removed, it is faster to not use the buffered
2223 * events, and to commit directly into the ring buffer. Free up
2224 * the temp buffers when there are no more users. This requires
2225 * special synchronization with current events.
2226 */
2227void trace_buffered_event_disable(void)
2228{
2229 int cpu;
2230
2231 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2232
2233 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2234 return;
2235
2236 if (--trace_buffered_event_ref)
2237 return;
2238
2239 preempt_disable();
2240 /* For each CPU, set the buffer as used. */
2241 smp_call_function_many(tracing_buffer_mask,
2242 disable_trace_buffered_event, NULL, 1);
2243 preempt_enable();
2244
2245 /* Wait for all current users to finish */
2246 synchronize_sched();
2247
2248 for_each_tracing_cpu(cpu) {
2249 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2250 per_cpu(trace_buffered_event, cpu) = NULL;
2251 }
2252 /*
2253 * Make sure trace_buffered_event is NULL before clearing
2254 * trace_buffered_event_cnt.
2255 */
2256 smp_wmb();
2257
2258 preempt_disable();
2259 /* Do the work on each cpu */
2260 smp_call_function_many(tracing_buffer_mask,
2261 enable_trace_buffered_event, NULL, 1);
2262 preempt_enable();
51a763dd 2263}
51a763dd 2264
2c4a33ab
SRRH
2265static struct ring_buffer *temp_buffer;
2266
ccb469a1
SR
2267struct ring_buffer_event *
2268trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2269 struct trace_event_file *trace_file,
ccb469a1
SR
2270 int type, unsigned long len,
2271 unsigned long flags, int pc)
2272{
2c4a33ab 2273 struct ring_buffer_event *entry;
0fc1b09f 2274 int val;
2c4a33ab 2275
7f1d2f82 2276 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f
SRRH
2277
2278 if ((trace_file->flags &
2279 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2280 (entry = this_cpu_read(trace_buffered_event))) {
2281 /* Try to use the per cpu buffer first */
2282 val = this_cpu_inc_return(trace_buffered_event_cnt);
2283 if (val == 1) {
2284 trace_event_setup(entry, type, flags, pc);
2285 entry->array[0] = len;
2286 return entry;
2287 }
2288 this_cpu_dec(trace_buffered_event_cnt);
2289 }
2290
3e9a8aad
SRRH
2291 entry = __trace_buffer_lock_reserve(*current_rb,
2292 type, len, flags, pc);
2c4a33ab
SRRH
2293 /*
2294 * If tracing is off, but we have triggers enabled
2295 * we still need to look at the event data. Use the temp_buffer
2296 * to store the trace event for the tigger to use. It's recusive
2297 * safe and will not be recorded anywhere.
2298 */
5d6ad960 2299 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2300 *current_rb = temp_buffer;
3e9a8aad
SRRH
2301 entry = __trace_buffer_lock_reserve(*current_rb,
2302 type, len, flags, pc);
2c4a33ab
SRRH
2303 }
2304 return entry;
ccb469a1
SR
2305}
2306EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2307
42391745
SRRH
2308static DEFINE_SPINLOCK(tracepoint_iter_lock);
2309static DEFINE_MUTEX(tracepoint_printk_mutex);
2310
2311static void output_printk(struct trace_event_buffer *fbuffer)
2312{
2313 struct trace_event_call *event_call;
2314 struct trace_event *event;
2315 unsigned long flags;
2316 struct trace_iterator *iter = tracepoint_print_iter;
2317
2318 /* We should never get here if iter is NULL */
2319 if (WARN_ON_ONCE(!iter))
2320 return;
2321
2322 event_call = fbuffer->trace_file->event_call;
2323 if (!event_call || !event_call->event.funcs ||
2324 !event_call->event.funcs->trace)
2325 return;
2326
2327 event = &fbuffer->trace_file->event_call->event;
2328
2329 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2330 trace_seq_init(&iter->seq);
2331 iter->ent = fbuffer->entry;
2332 event_call->event.funcs->trace(iter, 0, event);
2333 trace_seq_putc(&iter->seq, 0);
2334 printk("%s", iter->seq.buffer);
2335
2336 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2337}
2338
2339int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2340 void __user *buffer, size_t *lenp,
2341 loff_t *ppos)
2342{
2343 int save_tracepoint_printk;
2344 int ret;
2345
2346 mutex_lock(&tracepoint_printk_mutex);
2347 save_tracepoint_printk = tracepoint_printk;
2348
2349 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2350
2351 /*
2352 * This will force exiting early, as tracepoint_printk
2353 * is always zero when tracepoint_printk_iter is not allocated
2354 */
2355 if (!tracepoint_print_iter)
2356 tracepoint_printk = 0;
2357
2358 if (save_tracepoint_printk == tracepoint_printk)
2359 goto out;
2360
2361 if (tracepoint_printk)
2362 static_key_enable(&tracepoint_printk_key.key);
2363 else
2364 static_key_disable(&tracepoint_printk_key.key);
2365
2366 out:
2367 mutex_unlock(&tracepoint_printk_mutex);
2368
2369 return ret;
2370}
2371
2372void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2373{
2374 if (static_key_false(&tracepoint_printk_key.key))
2375 output_printk(fbuffer);
2376
2377 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2378 fbuffer->event, fbuffer->entry,
2379 fbuffer->flags, fbuffer->pc);
2380}
2381EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2382
2ee5b92a
SRV
2383/*
2384 * Skip 3:
2385 *
2386 * trace_buffer_unlock_commit_regs()
2387 * trace_event_buffer_commit()
2388 * trace_event_raw_event_xxx()
2389*/
2390# define STACK_SKIP 3
2391
b7f0c959
SRRH
2392void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2393 struct ring_buffer *buffer,
0d5c6e1c
SR
2394 struct ring_buffer_event *event,
2395 unsigned long flags, int pc,
2396 struct pt_regs *regs)
1fd8df2c 2397{
7ffbd48d 2398 __buffer_unlock_commit(buffer, event);
1fd8df2c 2399
be54f69c 2400 /*
2ee5b92a 2401 * If regs is not set, then skip the necessary functions.
be54f69c
SRRH
2402 * Note, we can still get here via blktrace, wakeup tracer
2403 * and mmiotrace, but that's ok if they lose a function or
2ee5b92a 2404 * two. They are not that meaningful.
be54f69c 2405 */
2ee5b92a 2406 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
1fd8df2c
MH
2407 ftrace_trace_userstack(buffer, flags, pc);
2408}
1fd8df2c 2409
52ffabe3
SRRH
2410/*
2411 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2412 */
2413void
2414trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2415 struct ring_buffer_event *event)
2416{
2417 __buffer_unlock_commit(buffer, event);
2418}
2419
478409dd
CZ
2420static void
2421trace_process_export(struct trace_export *export,
2422 struct ring_buffer_event *event)
2423{
2424 struct trace_entry *entry;
2425 unsigned int size = 0;
2426
2427 entry = ring_buffer_event_data(event);
2428 size = ring_buffer_event_length(event);
a773d419 2429 export->write(export, entry, size);
478409dd
CZ
2430}
2431
2432static DEFINE_MUTEX(ftrace_export_lock);
2433
2434static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2435
2436static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2437
2438static inline void ftrace_exports_enable(void)
2439{
2440 static_branch_enable(&ftrace_exports_enabled);
2441}
2442
2443static inline void ftrace_exports_disable(void)
2444{
2445 static_branch_disable(&ftrace_exports_enabled);
2446}
2447
2448void ftrace_exports(struct ring_buffer_event *event)
2449{
2450 struct trace_export *export;
2451
2452 preempt_disable_notrace();
2453
2454 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2455 while (export) {
2456 trace_process_export(export, event);
2457 export = rcu_dereference_raw_notrace(export->next);
2458 }
2459
2460 preempt_enable_notrace();
2461}
2462
2463static inline void
2464add_trace_export(struct trace_export **list, struct trace_export *export)
2465{
2466 rcu_assign_pointer(export->next, *list);
2467 /*
2468 * We are entering export into the list but another
2469 * CPU might be walking that list. We need to make sure
2470 * the export->next pointer is valid before another CPU sees
2471 * the export pointer included into the list.
2472 */
2473 rcu_assign_pointer(*list, export);
2474}
2475
2476static inline int
2477rm_trace_export(struct trace_export **list, struct trace_export *export)
2478{
2479 struct trace_export **p;
2480
2481 for (p = list; *p != NULL; p = &(*p)->next)
2482 if (*p == export)
2483 break;
2484
2485 if (*p != export)
2486 return -1;
2487
2488 rcu_assign_pointer(*p, (*p)->next);
2489
2490 return 0;
2491}
2492
2493static inline void
2494add_ftrace_export(struct trace_export **list, struct trace_export *export)
2495{
2496 if (*list == NULL)
2497 ftrace_exports_enable();
2498
2499 add_trace_export(list, export);
2500}
2501
2502static inline int
2503rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2504{
2505 int ret;
2506
2507 ret = rm_trace_export(list, export);
2508 if (*list == NULL)
2509 ftrace_exports_disable();
2510
2511 return ret;
2512}
2513
2514int register_ftrace_export(struct trace_export *export)
2515{
2516 if (WARN_ON_ONCE(!export->write))
2517 return -1;
2518
2519 mutex_lock(&ftrace_export_lock);
2520
2521 add_ftrace_export(&ftrace_exports_list, export);
2522
2523 mutex_unlock(&ftrace_export_lock);
2524
2525 return 0;
2526}
2527EXPORT_SYMBOL_GPL(register_ftrace_export);
2528
2529int unregister_ftrace_export(struct trace_export *export)
2530{
2531 int ret;
2532
2533 mutex_lock(&ftrace_export_lock);
2534
2535 ret = rm_ftrace_export(&ftrace_exports_list, export);
2536
2537 mutex_unlock(&ftrace_export_lock);
2538
2539 return ret;
2540}
2541EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2542
e309b41d 2543void
7be42151 2544trace_function(struct trace_array *tr,
38697053
SR
2545 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2546 int pc)
bc0c38d1 2547{
2425bcb9 2548 struct trace_event_call *call = &event_function;
12883efb 2549 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2550 struct ring_buffer_event *event;
777e208d 2551 struct ftrace_entry *entry;
bc0c38d1 2552
3e9a8aad
SRRH
2553 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2554 flags, pc);
3928a8a2
SR
2555 if (!event)
2556 return;
2557 entry = ring_buffer_event_data(event);
777e208d
SR
2558 entry->ip = ip;
2559 entry->parent_ip = parent_ip;
e1112b4d 2560
478409dd
CZ
2561 if (!call_filter_check_discard(call, entry, buffer, event)) {
2562 if (static_branch_unlikely(&ftrace_exports_enabled))
2563 ftrace_exports(event);
7ffbd48d 2564 __buffer_unlock_commit(buffer, event);
478409dd 2565 }
bc0c38d1
SR
2566}
2567
c0a0d0d3 2568#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2569
2570#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2571struct ftrace_stack {
2572 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2573};
2574
2575static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2576static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2577
e77405ad 2578static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2579 unsigned long flags,
1fd8df2c 2580 int skip, int pc, struct pt_regs *regs)
86387f7e 2581{
2425bcb9 2582 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2583 struct ring_buffer_event *event;
777e208d 2584 struct stack_entry *entry;
86387f7e 2585 struct stack_trace trace;
4a9bd3f1
SR
2586 int use_stack;
2587 int size = FTRACE_STACK_ENTRIES;
2588
2589 trace.nr_entries = 0;
2590 trace.skip = skip;
2591
be54f69c 2592 /*
2ee5b92a 2593 * Add one, for this function and the call to save_stack_trace()
be54f69c
SRRH
2594 * If regs is set, then these functions will not be in the way.
2595 */
2ee5b92a 2596#ifndef CONFIG_UNWINDER_ORC
be54f69c 2597 if (!regs)
2ee5b92a
SRV
2598 trace.skip++;
2599#endif
be54f69c 2600
4a9bd3f1
SR
2601 /*
2602 * Since events can happen in NMIs there's no safe way to
2603 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2604 * or NMI comes in, it will just have to use the default
2605 * FTRACE_STACK_SIZE.
2606 */
2607 preempt_disable_notrace();
2608
82146529 2609 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2610 /*
2611 * We don't need any atomic variables, just a barrier.
2612 * If an interrupt comes in, we don't care, because it would
2613 * have exited and put the counter back to what we want.
2614 * We just need a barrier to keep gcc from moving things
2615 * around.
2616 */
2617 barrier();
2618 if (use_stack == 1) {
bdffd893 2619 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2620 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2621
2622 if (regs)
2623 save_stack_trace_regs(regs, &trace);
2624 else
2625 save_stack_trace(&trace);
2626
2627 if (trace.nr_entries > size)
2628 size = trace.nr_entries;
2629 } else
2630 /* From now on, use_stack is a boolean */
2631 use_stack = 0;
2632
2633 size *= sizeof(unsigned long);
86387f7e 2634
3e9a8aad
SRRH
2635 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2636 sizeof(*entry) + size, flags, pc);
3928a8a2 2637 if (!event)
4a9bd3f1
SR
2638 goto out;
2639 entry = ring_buffer_event_data(event);
86387f7e 2640
4a9bd3f1
SR
2641 memset(&entry->caller, 0, size);
2642
2643 if (use_stack)
2644 memcpy(&entry->caller, trace.entries,
2645 trace.nr_entries * sizeof(unsigned long));
2646 else {
2647 trace.max_entries = FTRACE_STACK_ENTRIES;
2648 trace.entries = entry->caller;
2649 if (regs)
2650 save_stack_trace_regs(regs, &trace);
2651 else
2652 save_stack_trace(&trace);
2653 }
2654
2655 entry->size = trace.nr_entries;
86387f7e 2656
f306cc82 2657 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2658 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2659
2660 out:
2661 /* Again, don't let gcc optimize things here */
2662 barrier();
82146529 2663 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2664 preempt_enable_notrace();
2665
f0a920d5
IM
2666}
2667
2d34f489
SRRH
2668static inline void ftrace_trace_stack(struct trace_array *tr,
2669 struct ring_buffer *buffer,
73dddbb5
SRRH
2670 unsigned long flags,
2671 int skip, int pc, struct pt_regs *regs)
53614991 2672{
2d34f489 2673 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2674 return;
2675
73dddbb5 2676 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2677}
2678
c0a0d0d3
FW
2679void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2680 int pc)
38697053 2681{
a33d7d94
SRV
2682 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2683
2684 if (rcu_is_watching()) {
2685 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2686 return;
2687 }
2688
2689 /*
2690 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2691 * but if the above rcu_is_watching() failed, then the NMI
2692 * triggered someplace critical, and rcu_irq_enter() should
2693 * not be called from NMI.
2694 */
2695 if (unlikely(in_nmi()))
2696 return;
2697
2698 /*
2699 * It is possible that a function is being traced in a
2700 * location that RCU is not watching. A call to
2701 * rcu_irq_enter() will make sure that it is, but there's
2702 * a few internal rcu functions that could be traced
2703 * where that wont work either. In those cases, we just
2704 * do nothing.
2705 */
2706 if (unlikely(rcu_irq_enter_disabled()))
2707 return;
2708
2709 rcu_irq_enter_irqson();
2710 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2711 rcu_irq_exit_irqson();
38697053
SR
2712}
2713
03889384
SR
2714/**
2715 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2716 * @skip: Number of functions to skip (helper handlers)
03889384 2717 */
c142be8e 2718void trace_dump_stack(int skip)
03889384
SR
2719{
2720 unsigned long flags;
2721
2722 if (tracing_disabled || tracing_selftest_running)
e36c5458 2723 return;
03889384
SR
2724
2725 local_save_flags(flags);
2726
2ee5b92a
SRV
2727#ifndef CONFIG_UNWINDER_ORC
2728 /* Skip 1 to skip this function. */
2729 skip++;
2730#endif
c142be8e
SRRH
2731 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2732 flags, skip, preempt_count(), NULL);
03889384
SR
2733}
2734
91e86e56
SR
2735static DEFINE_PER_CPU(int, user_stack_count);
2736
e77405ad
SR
2737void
2738ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2739{
2425bcb9 2740 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2741 struct ring_buffer_event *event;
02b67518
TE
2742 struct userstack_entry *entry;
2743 struct stack_trace trace;
02b67518 2744
983f938a 2745 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2746 return;
2747
b6345879
SR
2748 /*
2749 * NMIs can not handle page faults, even with fix ups.
2750 * The save user stack can (and often does) fault.
2751 */
2752 if (unlikely(in_nmi()))
2753 return;
02b67518 2754
91e86e56
SR
2755 /*
2756 * prevent recursion, since the user stack tracing may
2757 * trigger other kernel events.
2758 */
2759 preempt_disable();
2760 if (__this_cpu_read(user_stack_count))
2761 goto out;
2762
2763 __this_cpu_inc(user_stack_count);
2764
3e9a8aad
SRRH
2765 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2766 sizeof(*entry), flags, pc);
02b67518 2767 if (!event)
1dbd1951 2768 goto out_drop_count;
02b67518 2769 entry = ring_buffer_event_data(event);
02b67518 2770
48659d31 2771 entry->tgid = current->tgid;
02b67518
TE
2772 memset(&entry->caller, 0, sizeof(entry->caller));
2773
2774 trace.nr_entries = 0;
2775 trace.max_entries = FTRACE_STACK_ENTRIES;
2776 trace.skip = 0;
2777 trace.entries = entry->caller;
2778
2779 save_stack_trace_user(&trace);
f306cc82 2780 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2781 __buffer_unlock_commit(buffer, event);
91e86e56 2782
1dbd1951 2783 out_drop_count:
91e86e56 2784 __this_cpu_dec(user_stack_count);
91e86e56
SR
2785 out:
2786 preempt_enable();
02b67518
TE
2787}
2788
4fd27358
HE
2789#ifdef UNUSED
2790static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2791{
7be42151 2792 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2793}
4fd27358 2794#endif /* UNUSED */
02b67518 2795
c0a0d0d3
FW
2796#endif /* CONFIG_STACKTRACE */
2797
07d777fe
SR
2798/* created for use with alloc_percpu */
2799struct trace_buffer_struct {
e2ace001
AL
2800 int nesting;
2801 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2802};
2803
2804static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2805
2806/*
e2ace001
AL
2807 * Thise allows for lockless recording. If we're nested too deeply, then
2808 * this returns NULL.
07d777fe
SR
2809 */
2810static char *get_trace_buf(void)
2811{
e2ace001 2812 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2813
e2ace001 2814 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2815 return NULL;
2816
3d9622c1
SRV
2817 buffer->nesting++;
2818
2819 /* Interrupts must see nesting incremented before we use the buffer */
2820 barrier();
2821 return &buffer->buffer[buffer->nesting][0];
e2ace001
AL
2822}
2823
2824static void put_trace_buf(void)
2825{
3d9622c1
SRV
2826 /* Don't let the decrement of nesting leak before this */
2827 barrier();
e2ace001 2828 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2829}
2830
2831static int alloc_percpu_trace_buffer(void)
2832{
2833 struct trace_buffer_struct *buffers;
07d777fe
SR
2834
2835 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2836 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2837 return -ENOMEM;
07d777fe
SR
2838
2839 trace_percpu_buffer = buffers;
07d777fe 2840 return 0;
07d777fe
SR
2841}
2842
81698831
SR
2843static int buffers_allocated;
2844
07d777fe
SR
2845void trace_printk_init_buffers(void)
2846{
07d777fe
SR
2847 if (buffers_allocated)
2848 return;
2849
2850 if (alloc_percpu_trace_buffer())
2851 return;
2852
2184db46
SR
2853 /* trace_printk() is for debug use only. Don't use it in production. */
2854
a395d6a7
JP
2855 pr_warn("\n");
2856 pr_warn("**********************************************************\n");
2857 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2858 pr_warn("** **\n");
2859 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2860 pr_warn("** **\n");
2861 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2862 pr_warn("** unsafe for production use. **\n");
2863 pr_warn("** **\n");
2864 pr_warn("** If you see this message and you are not debugging **\n");
2865 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2866 pr_warn("** **\n");
2867 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2868 pr_warn("**********************************************************\n");
07d777fe 2869
b382ede6
SR
2870 /* Expand the buffers to set size */
2871 tracing_update_buffers();
2872
07d777fe 2873 buffers_allocated = 1;
81698831
SR
2874
2875 /*
2876 * trace_printk_init_buffers() can be called by modules.
2877 * If that happens, then we need to start cmdline recording
2878 * directly here. If the global_trace.buffer is already
2879 * allocated here, then this was called by module code.
2880 */
12883efb 2881 if (global_trace.trace_buffer.buffer)
81698831
SR
2882 tracing_start_cmdline_record();
2883}
2884
2885void trace_printk_start_comm(void)
2886{
2887 /* Start tracing comms if trace printk is set */
2888 if (!buffers_allocated)
2889 return;
2890 tracing_start_cmdline_record();
2891}
2892
2893static void trace_printk_start_stop_comm(int enabled)
2894{
2895 if (!buffers_allocated)
2896 return;
2897
2898 if (enabled)
2899 tracing_start_cmdline_record();
2900 else
2901 tracing_stop_cmdline_record();
07d777fe
SR
2902}
2903
769b0441 2904/**
48ead020 2905 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2906 *
2907 */
40ce74f1 2908int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2909{
2425bcb9 2910 struct trace_event_call *call = &event_bprint;
769b0441 2911 struct ring_buffer_event *event;
e77405ad 2912 struct ring_buffer *buffer;
769b0441 2913 struct trace_array *tr = &global_trace;
48ead020 2914 struct bprint_entry *entry;
769b0441 2915 unsigned long flags;
07d777fe
SR
2916 char *tbuffer;
2917 int len = 0, size, pc;
769b0441
FW
2918
2919 if (unlikely(tracing_selftest_running || tracing_disabled))
2920 return 0;
2921
2922 /* Don't pollute graph traces with trace_vprintk internals */
2923 pause_graph_tracing();
2924
2925 pc = preempt_count();
5168ae50 2926 preempt_disable_notrace();
769b0441 2927
07d777fe
SR
2928 tbuffer = get_trace_buf();
2929 if (!tbuffer) {
2930 len = 0;
e2ace001 2931 goto out_nobuffer;
07d777fe 2932 }
769b0441 2933
07d777fe 2934 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2935
07d777fe
SR
2936 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2937 goto out;
769b0441 2938
07d777fe 2939 local_save_flags(flags);
769b0441 2940 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2941 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2942 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2943 flags, pc);
769b0441 2944 if (!event)
07d777fe 2945 goto out;
769b0441
FW
2946 entry = ring_buffer_event_data(event);
2947 entry->ip = ip;
769b0441
FW
2948 entry->fmt = fmt;
2949
07d777fe 2950 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2951 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2952 __buffer_unlock_commit(buffer, event);
2d34f489 2953 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2954 }
769b0441 2955
769b0441 2956out:
e2ace001
AL
2957 put_trace_buf();
2958
2959out_nobuffer:
5168ae50 2960 preempt_enable_notrace();
769b0441
FW
2961 unpause_graph_tracing();
2962
2963 return len;
2964}
48ead020
FW
2965EXPORT_SYMBOL_GPL(trace_vbprintk);
2966
a085eb4b 2967__printf(3, 0)
12883efb
SRRH
2968static int
2969__trace_array_vprintk(struct ring_buffer *buffer,
2970 unsigned long ip, const char *fmt, va_list args)
48ead020 2971{
2425bcb9 2972 struct trace_event_call *call = &event_print;
48ead020 2973 struct ring_buffer_event *event;
07d777fe 2974 int len = 0, size, pc;
48ead020 2975 struct print_entry *entry;
07d777fe
SR
2976 unsigned long flags;
2977 char *tbuffer;
48ead020
FW
2978
2979 if (tracing_disabled || tracing_selftest_running)
2980 return 0;
2981
07d777fe
SR
2982 /* Don't pollute graph traces with trace_vprintk internals */
2983 pause_graph_tracing();
2984
48ead020
FW
2985 pc = preempt_count();
2986 preempt_disable_notrace();
48ead020 2987
07d777fe
SR
2988
2989 tbuffer = get_trace_buf();
2990 if (!tbuffer) {
2991 len = 0;
e2ace001 2992 goto out_nobuffer;
07d777fe 2993 }
48ead020 2994
3558a5ac 2995 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2996
07d777fe 2997 local_save_flags(flags);
48ead020 2998 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2999 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3000 flags, pc);
48ead020 3001 if (!event)
07d777fe 3002 goto out;
48ead020 3003 entry = ring_buffer_event_data(event);
c13d2f7c 3004 entry->ip = ip;
48ead020 3005
3558a5ac 3006 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 3007 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 3008 __buffer_unlock_commit(buffer, event);
2d34f489 3009 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 3010 }
e2ace001
AL
3011
3012out:
3013 put_trace_buf();
3014
3015out_nobuffer:
48ead020 3016 preempt_enable_notrace();
07d777fe 3017 unpause_graph_tracing();
48ead020
FW
3018
3019 return len;
3020}
659372d3 3021
a085eb4b 3022__printf(3, 0)
12883efb
SRRH
3023int trace_array_vprintk(struct trace_array *tr,
3024 unsigned long ip, const char *fmt, va_list args)
3025{
3026 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3027}
3028
a085eb4b 3029__printf(3, 0)
12883efb
SRRH
3030int trace_array_printk(struct trace_array *tr,
3031 unsigned long ip, const char *fmt, ...)
3032{
3033 int ret;
3034 va_list ap;
3035
983f938a 3036 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3037 return 0;
3038
3039 va_start(ap, fmt);
3040 ret = trace_array_vprintk(tr, ip, fmt, ap);
3041 va_end(ap);
3042 return ret;
3043}
3044
a085eb4b 3045__printf(3, 4)
12883efb
SRRH
3046int trace_array_printk_buf(struct ring_buffer *buffer,
3047 unsigned long ip, const char *fmt, ...)
3048{
3049 int ret;
3050 va_list ap;
3051
983f938a 3052 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3053 return 0;
3054
3055 va_start(ap, fmt);
3056 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3057 va_end(ap);
3058 return ret;
3059}
3060
a085eb4b 3061__printf(2, 0)
659372d3
SR
3062int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3063{
a813a159 3064 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 3065}
769b0441
FW
3066EXPORT_SYMBOL_GPL(trace_vprintk);
3067
e2ac8ef5 3068static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 3069{
6d158a81
SR
3070 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3071
5a90f577 3072 iter->idx++;
6d158a81
SR
3073 if (buf_iter)
3074 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
3075}
3076
e309b41d 3077static struct trace_entry *
bc21b478
SR
3078peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3079 unsigned long *lost_events)
dd0e545f 3080{
3928a8a2 3081 struct ring_buffer_event *event;
6d158a81 3082 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 3083
d769041f
SR
3084 if (buf_iter)
3085 event = ring_buffer_iter_peek(buf_iter, ts);
3086 else
12883efb 3087 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 3088 lost_events);
d769041f 3089
4a9bd3f1
SR
3090 if (event) {
3091 iter->ent_size = ring_buffer_event_length(event);
3092 return ring_buffer_event_data(event);
3093 }
3094 iter->ent_size = 0;
3095 return NULL;
dd0e545f 3096}
d769041f 3097
dd0e545f 3098static struct trace_entry *
bc21b478
SR
3099__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3100 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 3101{
12883efb 3102 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 3103 struct trace_entry *ent, *next = NULL;
aa27497c 3104 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 3105 int cpu_file = iter->cpu_file;
3928a8a2 3106 u64 next_ts = 0, ts;
bc0c38d1 3107 int next_cpu = -1;
12b5da34 3108 int next_size = 0;
bc0c38d1
SR
3109 int cpu;
3110
b04cc6b1
FW
3111 /*
3112 * If we are in a per_cpu trace file, don't bother by iterating over
3113 * all cpu and peek directly.
3114 */
ae3b5093 3115 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
3116 if (ring_buffer_empty_cpu(buffer, cpu_file))
3117 return NULL;
bc21b478 3118 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
3119 if (ent_cpu)
3120 *ent_cpu = cpu_file;
3121
3122 return ent;
3123 }
3124
ab46428c 3125 for_each_tracing_cpu(cpu) {
dd0e545f 3126
3928a8a2
SR
3127 if (ring_buffer_empty_cpu(buffer, cpu))
3128 continue;
dd0e545f 3129
bc21b478 3130 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3131
cdd31cd2
IM
3132 /*
3133 * Pick the entry with the smallest timestamp:
3134 */
3928a8a2 3135 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3136 next = ent;
3137 next_cpu = cpu;
3928a8a2 3138 next_ts = ts;
bc21b478 3139 next_lost = lost_events;
12b5da34 3140 next_size = iter->ent_size;
bc0c38d1
SR
3141 }
3142 }
3143
12b5da34
SR
3144 iter->ent_size = next_size;
3145
bc0c38d1
SR
3146 if (ent_cpu)
3147 *ent_cpu = next_cpu;
3148
3928a8a2
SR
3149 if (ent_ts)
3150 *ent_ts = next_ts;
3151
bc21b478
SR
3152 if (missing_events)
3153 *missing_events = next_lost;
3154
bc0c38d1
SR
3155 return next;
3156}
3157
dd0e545f 3158/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3159struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3160 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3161{
bc21b478 3162 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3163}
3164
3165/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3166void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3167{
bc21b478
SR
3168 iter->ent = __find_next_entry(iter, &iter->cpu,
3169 &iter->lost_events, &iter->ts);
dd0e545f 3170
3928a8a2 3171 if (iter->ent)
e2ac8ef5 3172 trace_iterator_increment(iter);
dd0e545f 3173
3928a8a2 3174 return iter->ent ? iter : NULL;
b3806b43 3175}
bc0c38d1 3176
e309b41d 3177static void trace_consume(struct trace_iterator *iter)
b3806b43 3178{
12883efb 3179 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3180 &iter->lost_events);
bc0c38d1
SR
3181}
3182
e309b41d 3183static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3184{
3185 struct trace_iterator *iter = m->private;
bc0c38d1 3186 int i = (int)*pos;
4e3c3333 3187 void *ent;
bc0c38d1 3188
a63ce5b3
SR
3189 WARN_ON_ONCE(iter->leftover);
3190
bc0c38d1
SR
3191 (*pos)++;
3192
3193 /* can't go backwards */
3194 if (iter->idx > i)
3195 return NULL;
3196
3197 if (iter->idx < 0)
955b61e5 3198 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3199 else
3200 ent = iter;
3201
3202 while (ent && iter->idx < i)
955b61e5 3203 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3204
3205 iter->pos = *pos;
3206
bc0c38d1
SR
3207 return ent;
3208}
3209
955b61e5 3210void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3211{
2f26ebd5
SR
3212 struct ring_buffer_event *event;
3213 struct ring_buffer_iter *buf_iter;
3214 unsigned long entries = 0;
3215 u64 ts;
3216
12883efb 3217 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3218
6d158a81
SR
3219 buf_iter = trace_buffer_iter(iter, cpu);
3220 if (!buf_iter)
2f26ebd5
SR
3221 return;
3222
2f26ebd5
SR
3223 ring_buffer_iter_reset(buf_iter);
3224
3225 /*
3226 * We could have the case with the max latency tracers
3227 * that a reset never took place on a cpu. This is evident
3228 * by the timestamp being before the start of the buffer.
3229 */
3230 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3231 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3232 break;
3233 entries++;
3234 ring_buffer_read(buf_iter, NULL);
3235 }
3236
12883efb 3237 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3238}
3239
d7350c3f 3240/*
d7350c3f
FW
3241 * The current tracer is copied to avoid a global locking
3242 * all around.
3243 */
bc0c38d1
SR
3244static void *s_start(struct seq_file *m, loff_t *pos)
3245{
3246 struct trace_iterator *iter = m->private;
2b6080f2 3247 struct trace_array *tr = iter->tr;
b04cc6b1 3248 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3249 void *p = NULL;
3250 loff_t l = 0;
3928a8a2 3251 int cpu;
bc0c38d1 3252
2fd196ec
HT
3253 /*
3254 * copy the tracer to avoid using a global lock all around.
3255 * iter->trace is a copy of current_trace, the pointer to the
3256 * name may be used instead of a strcmp(), as iter->trace->name
3257 * will point to the same string as current_trace->name.
3258 */
bc0c38d1 3259 mutex_lock(&trace_types_lock);
2b6080f2
SR
3260 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3261 *iter->trace = *tr->current_trace;
d7350c3f 3262 mutex_unlock(&trace_types_lock);
bc0c38d1 3263
12883efb 3264#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3265 if (iter->snapshot && iter->trace->use_max_tr)
3266 return ERR_PTR(-EBUSY);
12883efb 3267#endif
debdd57f
HT
3268
3269 if (!iter->snapshot)
d914ba37 3270 atomic_inc(&trace_record_taskinfo_disabled);
bc0c38d1 3271
bc0c38d1
SR
3272 if (*pos != iter->pos) {
3273 iter->ent = NULL;
3274 iter->cpu = 0;
3275 iter->idx = -1;
3276
ae3b5093 3277 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3278 for_each_tracing_cpu(cpu)
2f26ebd5 3279 tracing_iter_reset(iter, cpu);
b04cc6b1 3280 } else
2f26ebd5 3281 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3282
ac91d854 3283 iter->leftover = 0;
bc0c38d1
SR
3284 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3285 ;
3286
3287 } else {
a63ce5b3
SR
3288 /*
3289 * If we overflowed the seq_file before, then we want
3290 * to just reuse the trace_seq buffer again.
3291 */
3292 if (iter->leftover)
3293 p = iter;
3294 else {
3295 l = *pos - 1;
3296 p = s_next(m, p, &l);
3297 }
bc0c38d1
SR
3298 }
3299
4f535968 3300 trace_event_read_lock();
7e53bd42 3301 trace_access_lock(cpu_file);
bc0c38d1
SR
3302 return p;
3303}
3304
3305static void s_stop(struct seq_file *m, void *p)
3306{
7e53bd42
LJ
3307 struct trace_iterator *iter = m->private;
3308
12883efb 3309#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3310 if (iter->snapshot && iter->trace->use_max_tr)
3311 return;
12883efb 3312#endif
debdd57f
HT
3313
3314 if (!iter->snapshot)
d914ba37 3315 atomic_dec(&trace_record_taskinfo_disabled);
12883efb 3316
7e53bd42 3317 trace_access_unlock(iter->cpu_file);
4f535968 3318 trace_event_read_unlock();
bc0c38d1
SR
3319}
3320
39eaf7ef 3321static void
12883efb
SRRH
3322get_total_entries(struct trace_buffer *buf,
3323 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3324{
3325 unsigned long count;
3326 int cpu;
3327
3328 *total = 0;
3329 *entries = 0;
3330
3331 for_each_tracing_cpu(cpu) {
12883efb 3332 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3333 /*
3334 * If this buffer has skipped entries, then we hold all
3335 * entries for the trace and we need to ignore the
3336 * ones before the time stamp.
3337 */
12883efb
SRRH
3338 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3339 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3340 /* total is the same as the entries */
3341 *total += count;
3342 } else
3343 *total += count +
12883efb 3344 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3345 *entries += count;
3346 }
3347}
3348
e309b41d 3349static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3350{
d79ac28f
RV
3351 seq_puts(m, "# _------=> CPU# \n"
3352 "# / _-----=> irqs-off \n"
3353 "# | / _----=> need-resched \n"
3354 "# || / _---=> hardirq/softirq \n"
3355 "# ||| / _--=> preempt-depth \n"
3356 "# |||| / delay \n"
3357 "# cmd pid ||||| time | caller \n"
3358 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3359}
3360
12883efb 3361static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3362{
39eaf7ef
SR
3363 unsigned long total;
3364 unsigned long entries;
3365
12883efb 3366 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3367 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3368 entries, total, num_online_cpus());
3369 seq_puts(m, "#\n");
3370}
3371
441dae8f
JF
3372static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3373 unsigned int flags)
39eaf7ef 3374{
441dae8f
JF
3375 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3376
12883efb 3377 print_event_info(buf, m);
441dae8f 3378
9eb0f039
JFG
3379 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3380 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
bc0c38d1
SR
3381}
3382
441dae8f
JF
3383static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3384 unsigned int flags)
77271ce4 3385{
441dae8f 3386 bool tgid = flags & TRACE_ITER_RECORD_TGID;
b11fb737
SRV
3387 const char tgid_space[] = " ";
3388 const char space[] = " ";
3389
3390 seq_printf(m, "# %s _-----=> irqs-off\n",
3391 tgid ? tgid_space : space);
3392 seq_printf(m, "# %s / _----=> need-resched\n",
3393 tgid ? tgid_space : space);
3394 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3395 tgid ? tgid_space : space);
3396 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3397 tgid ? tgid_space : space);
3398 seq_printf(m, "# %s||| / delay\n",
3399 tgid ? tgid_space : space);
9eb0f039 3400 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
b11fb737 3401 tgid ? " TGID " : space);
9eb0f039 3402 seq_printf(m, "# | | %s | |||| | |\n",
b11fb737 3403 tgid ? " | " : space);
77271ce4 3404}
bc0c38d1 3405
62b915f1 3406void
bc0c38d1
SR
3407print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3408{
983f938a 3409 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3410 struct trace_buffer *buf = iter->trace_buffer;
3411 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3412 struct tracer *type = iter->trace;
39eaf7ef
SR
3413 unsigned long entries;
3414 unsigned long total;
bc0c38d1
SR
3415 const char *name = "preemption";
3416
d840f718 3417 name = type->name;
bc0c38d1 3418
12883efb 3419 get_total_entries(buf, &total, &entries);
bc0c38d1 3420
888b55dc 3421 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3422 name, UTS_RELEASE);
888b55dc 3423 seq_puts(m, "# -----------------------------------"
bc0c38d1 3424 "---------------------------------\n");
888b55dc 3425 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3426 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3427 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3428 entries,
4c11d7ae 3429 total,
12883efb 3430 buf->cpu,
bc0c38d1
SR
3431#if defined(CONFIG_PREEMPT_NONE)
3432 "server",
3433#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3434 "desktop",
b5c21b45 3435#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3436 "preempt",
3437#else
3438 "unknown",
3439#endif
3440 /* These are reserved for later use */
3441 0, 0, 0, 0);
3442#ifdef CONFIG_SMP
3443 seq_printf(m, " #P:%d)\n", num_online_cpus());
3444#else
3445 seq_puts(m, ")\n");
3446#endif
888b55dc
KM
3447 seq_puts(m, "# -----------------\n");
3448 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3449 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3450 data->comm, data->pid,
3451 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3452 data->policy, data->rt_priority);
888b55dc 3453 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3454
3455 if (data->critical_start) {
888b55dc 3456 seq_puts(m, "# => started at: ");
214023c3
SR
3457 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3458 trace_print_seq(m, &iter->seq);
888b55dc 3459 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3460 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3461 trace_print_seq(m, &iter->seq);
8248ac05 3462 seq_puts(m, "\n#\n");
bc0c38d1
SR
3463 }
3464
888b55dc 3465 seq_puts(m, "#\n");
bc0c38d1
SR
3466}
3467
a309720c
SR
3468static void test_cpu_buff_start(struct trace_iterator *iter)
3469{
3470 struct trace_seq *s = &iter->seq;
983f938a 3471 struct trace_array *tr = iter->tr;
a309720c 3472
983f938a 3473 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3474 return;
3475
3476 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3477 return;
3478
4dbbe2d8
MK
3479 if (cpumask_available(iter->started) &&
3480 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3481 return;
3482
12883efb 3483 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3484 return;
3485
4dbbe2d8 3486 if (cpumask_available(iter->started))
919cd979 3487 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3488
3489 /* Don't print started cpu buffer for the first entry of the trace */
3490 if (iter->idx > 1)
3491 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3492 iter->cpu);
a309720c
SR
3493}
3494
2c4f035f 3495static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3496{
983f938a 3497 struct trace_array *tr = iter->tr;
214023c3 3498 struct trace_seq *s = &iter->seq;
983f938a 3499 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3500 struct trace_entry *entry;
f633cef0 3501 struct trace_event *event;
bc0c38d1 3502
4e3c3333 3503 entry = iter->ent;
dd0e545f 3504
a309720c
SR
3505 test_cpu_buff_start(iter);
3506
c4a8e8be 3507 event = ftrace_find_event(entry->type);
bc0c38d1 3508
983f938a 3509 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3510 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3511 trace_print_lat_context(iter);
3512 else
3513 trace_print_context(iter);
c4a8e8be 3514 }
bc0c38d1 3515
19a7fe20
SRRH
3516 if (trace_seq_has_overflowed(s))
3517 return TRACE_TYPE_PARTIAL_LINE;
3518
268ccda0 3519 if (event)
a9a57763 3520 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3521
19a7fe20 3522 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3523
19a7fe20 3524 return trace_handle_return(s);
bc0c38d1
SR
3525}
3526
2c4f035f 3527static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3528{
983f938a 3529 struct trace_array *tr = iter->tr;
f9896bf3
IM
3530 struct trace_seq *s = &iter->seq;
3531 struct trace_entry *entry;
f633cef0 3532 struct trace_event *event;
f9896bf3
IM
3533
3534 entry = iter->ent;
dd0e545f 3535
983f938a 3536 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3537 trace_seq_printf(s, "%d %d %llu ",
3538 entry->pid, iter->cpu, iter->ts);
3539
3540 if (trace_seq_has_overflowed(s))
3541 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3542
f633cef0 3543 event = ftrace_find_event(entry->type);
268ccda0 3544 if (event)
a9a57763 3545 return event->funcs->raw(iter, 0, event);
d9793bd8 3546
19a7fe20 3547 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3548
19a7fe20 3549 return trace_handle_return(s);
f9896bf3
IM
3550}
3551
2c4f035f 3552static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3553{
983f938a 3554 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3555 struct trace_seq *s = &iter->seq;
3556 unsigned char newline = '\n';
3557 struct trace_entry *entry;
f633cef0 3558 struct trace_event *event;
5e3ca0ec
IM
3559
3560 entry = iter->ent;
dd0e545f 3561
983f938a 3562 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3563 SEQ_PUT_HEX_FIELD(s, entry->pid);
3564 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3565 SEQ_PUT_HEX_FIELD(s, iter->ts);
3566 if (trace_seq_has_overflowed(s))
3567 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3568 }
5e3ca0ec 3569
f633cef0 3570 event = ftrace_find_event(entry->type);
268ccda0 3571 if (event) {
a9a57763 3572 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3573 if (ret != TRACE_TYPE_HANDLED)
3574 return ret;
3575 }
7104f300 3576
19a7fe20 3577 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3578
19a7fe20 3579 return trace_handle_return(s);
5e3ca0ec
IM
3580}
3581
2c4f035f 3582static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3583{
983f938a 3584 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3585 struct trace_seq *s = &iter->seq;
3586 struct trace_entry *entry;
f633cef0 3587 struct trace_event *event;
cb0f12aa
IM
3588
3589 entry = iter->ent;
dd0e545f 3590
983f938a 3591 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3592 SEQ_PUT_FIELD(s, entry->pid);
3593 SEQ_PUT_FIELD(s, iter->cpu);
3594 SEQ_PUT_FIELD(s, iter->ts);
3595 if (trace_seq_has_overflowed(s))
3596 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3597 }
cb0f12aa 3598
f633cef0 3599 event = ftrace_find_event(entry->type);
a9a57763
SR
3600 return event ? event->funcs->binary(iter, 0, event) :
3601 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3602}
3603
62b915f1 3604int trace_empty(struct trace_iterator *iter)
bc0c38d1 3605{
6d158a81 3606 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3607 int cpu;
3608
9aba60fe 3609 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3610 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3611 cpu = iter->cpu_file;
6d158a81
SR
3612 buf_iter = trace_buffer_iter(iter, cpu);
3613 if (buf_iter) {
3614 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3615 return 0;
3616 } else {
12883efb 3617 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3618 return 0;
3619 }
3620 return 1;
3621 }
3622
ab46428c 3623 for_each_tracing_cpu(cpu) {
6d158a81
SR
3624 buf_iter = trace_buffer_iter(iter, cpu);
3625 if (buf_iter) {
3626 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3627 return 0;
3628 } else {
12883efb 3629 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3630 return 0;
3631 }
bc0c38d1 3632 }
d769041f 3633
797d3712 3634 return 1;
bc0c38d1
SR
3635}
3636
4f535968 3637/* Called with trace_event_read_lock() held. */
955b61e5 3638enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3639{
983f938a
SRRH
3640 struct trace_array *tr = iter->tr;
3641 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3642 enum print_line_t ret;
3643
19a7fe20
SRRH
3644 if (iter->lost_events) {
3645 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3646 iter->cpu, iter->lost_events);
3647 if (trace_seq_has_overflowed(&iter->seq))
3648 return TRACE_TYPE_PARTIAL_LINE;
3649 }
bc21b478 3650
2c4f035f
FW
3651 if (iter->trace && iter->trace->print_line) {
3652 ret = iter->trace->print_line(iter);
3653 if (ret != TRACE_TYPE_UNHANDLED)
3654 return ret;
3655 }
72829bc3 3656
09ae7234
SRRH
3657 if (iter->ent->type == TRACE_BPUTS &&
3658 trace_flags & TRACE_ITER_PRINTK &&
3659 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3660 return trace_print_bputs_msg_only(iter);
3661
48ead020
FW
3662 if (iter->ent->type == TRACE_BPRINT &&
3663 trace_flags & TRACE_ITER_PRINTK &&
3664 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3665 return trace_print_bprintk_msg_only(iter);
48ead020 3666
66896a85
FW
3667 if (iter->ent->type == TRACE_PRINT &&
3668 trace_flags & TRACE_ITER_PRINTK &&
3669 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3670 return trace_print_printk_msg_only(iter);
66896a85 3671
cb0f12aa
IM
3672 if (trace_flags & TRACE_ITER_BIN)
3673 return print_bin_fmt(iter);
3674
5e3ca0ec
IM
3675 if (trace_flags & TRACE_ITER_HEX)
3676 return print_hex_fmt(iter);
3677
f9896bf3
IM
3678 if (trace_flags & TRACE_ITER_RAW)
3679 return print_raw_fmt(iter);
3680
f9896bf3
IM
3681 return print_trace_fmt(iter);
3682}
3683
7e9a49ef
JO
3684void trace_latency_header(struct seq_file *m)
3685{
3686 struct trace_iterator *iter = m->private;
983f938a 3687 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3688
3689 /* print nothing if the buffers are empty */
3690 if (trace_empty(iter))
3691 return;
3692
3693 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3694 print_trace_header(m, iter);
3695
983f938a 3696 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3697 print_lat_help_header(m);
3698}
3699
62b915f1
JO
3700void trace_default_header(struct seq_file *m)
3701{
3702 struct trace_iterator *iter = m->private;
983f938a
SRRH
3703 struct trace_array *tr = iter->tr;
3704 unsigned long trace_flags = tr->trace_flags;
62b915f1 3705
f56e7f8e
JO
3706 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3707 return;
3708
62b915f1
JO
3709 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3710 /* print nothing if the buffers are empty */
3711 if (trace_empty(iter))
3712 return;
3713 print_trace_header(m, iter);
3714 if (!(trace_flags & TRACE_ITER_VERBOSE))
3715 print_lat_help_header(m);
3716 } else {
77271ce4
SR
3717 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3718 if (trace_flags & TRACE_ITER_IRQ_INFO)
441dae8f
JF
3719 print_func_help_header_irq(iter->trace_buffer,
3720 m, trace_flags);
77271ce4 3721 else
441dae8f
JF
3722 print_func_help_header(iter->trace_buffer, m,
3723 trace_flags);
77271ce4 3724 }
62b915f1
JO
3725 }
3726}
3727
e0a413f6
SR
3728static void test_ftrace_alive(struct seq_file *m)
3729{
3730 if (!ftrace_is_dead())
3731 return;
d79ac28f
RV
3732 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3733 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3734}
3735
d8741e2e 3736#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3737static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3738{
d79ac28f
RV
3739 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3740 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3741 "# Takes a snapshot of the main buffer.\n"
3742 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3743 "# (Doesn't have to be '2' works with any number that\n"
3744 "# is not a '0' or '1')\n");
d8741e2e 3745}
f1affcaa
SRRH
3746
3747static void show_snapshot_percpu_help(struct seq_file *m)
3748{
fa6f0cc7 3749 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3750#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3751 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3752 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3753#else
d79ac28f
RV
3754 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3755 "# Must use main snapshot file to allocate.\n");
f1affcaa 3756#endif
d79ac28f
RV
3757 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3758 "# (Doesn't have to be '2' works with any number that\n"
3759 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3760}
3761
d8741e2e
SRRH
3762static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3763{
45ad21ca 3764 if (iter->tr->allocated_snapshot)
fa6f0cc7 3765 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3766 else
fa6f0cc7 3767 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3768
fa6f0cc7 3769 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3770 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3771 show_snapshot_main_help(m);
3772 else
3773 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3774}
3775#else
3776/* Should never be called */
3777static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3778#endif
3779
bc0c38d1
SR
3780static int s_show(struct seq_file *m, void *v)
3781{
3782 struct trace_iterator *iter = v;
a63ce5b3 3783 int ret;
bc0c38d1
SR
3784
3785 if (iter->ent == NULL) {
3786 if (iter->tr) {
3787 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3788 seq_puts(m, "#\n");
e0a413f6 3789 test_ftrace_alive(m);
bc0c38d1 3790 }
d8741e2e
SRRH
3791 if (iter->snapshot && trace_empty(iter))
3792 print_snapshot_help(m, iter);
3793 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3794 iter->trace->print_header(m);
62b915f1
JO
3795 else
3796 trace_default_header(m);
3797
a63ce5b3
SR
3798 } else if (iter->leftover) {
3799 /*
3800 * If we filled the seq_file buffer earlier, we
3801 * want to just show it now.
3802 */
3803 ret = trace_print_seq(m, &iter->seq);
3804
3805 /* ret should this time be zero, but you never know */
3806 iter->leftover = ret;
3807
bc0c38d1 3808 } else {
f9896bf3 3809 print_trace_line(iter);
a63ce5b3
SR
3810 ret = trace_print_seq(m, &iter->seq);
3811 /*
3812 * If we overflow the seq_file buffer, then it will
3813 * ask us for this data again at start up.
3814 * Use that instead.
3815 * ret is 0 if seq_file write succeeded.
3816 * -1 otherwise.
3817 */
3818 iter->leftover = ret;
bc0c38d1
SR
3819 }
3820
3821 return 0;
3822}
3823
649e9c70
ON
3824/*
3825 * Should be used after trace_array_get(), trace_types_lock
3826 * ensures that i_cdev was already initialized.
3827 */
3828static inline int tracing_get_cpu(struct inode *inode)
3829{
3830 if (inode->i_cdev) /* See trace_create_cpu_file() */
3831 return (long)inode->i_cdev - 1;
3832 return RING_BUFFER_ALL_CPUS;
3833}
3834
88e9d34c 3835static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3836 .start = s_start,
3837 .next = s_next,
3838 .stop = s_stop,
3839 .show = s_show,
bc0c38d1
SR
3840};
3841
e309b41d 3842static struct trace_iterator *
6484c71c 3843__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3844{
6484c71c 3845 struct trace_array *tr = inode->i_private;
bc0c38d1 3846 struct trace_iterator *iter;
50e18b94 3847 int cpu;
bc0c38d1 3848
85a2f9b4
SR
3849 if (tracing_disabled)
3850 return ERR_PTR(-ENODEV);
60a11774 3851
50e18b94 3852 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3853 if (!iter)
3854 return ERR_PTR(-ENOMEM);
bc0c38d1 3855
72917235 3856 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3857 GFP_KERNEL);
93574fcc
DC
3858 if (!iter->buffer_iter)
3859 goto release;
3860
d7350c3f
FW
3861 /*
3862 * We make a copy of the current tracer to avoid concurrent
3863 * changes on it while we are reading.
3864 */
bc0c38d1 3865 mutex_lock(&trace_types_lock);
d7350c3f 3866 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3867 if (!iter->trace)
d7350c3f 3868 goto fail;
85a2f9b4 3869
2b6080f2 3870 *iter->trace = *tr->current_trace;
d7350c3f 3871
79f55997 3872 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3873 goto fail;
3874
12883efb
SRRH
3875 iter->tr = tr;
3876
3877#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3878 /* Currently only the top directory has a snapshot */
3879 if (tr->current_trace->print_max || snapshot)
12883efb 3880 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3881 else
12883efb
SRRH
3882#endif
3883 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3884 iter->snapshot = snapshot;
bc0c38d1 3885 iter->pos = -1;
6484c71c 3886 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3887 mutex_init(&iter->mutex);
bc0c38d1 3888
8bba1bf5
MM
3889 /* Notify the tracer early; before we stop tracing. */
3890 if (iter->trace && iter->trace->open)
a93751ca 3891 iter->trace->open(iter);
8bba1bf5 3892
12ef7d44 3893 /* Annotate start of buffers if we had overruns */
12883efb 3894 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3895 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3896
8be0709f 3897 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3898 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3899 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3900
debdd57f
HT
3901 /* stop the trace while dumping if we are not opening "snapshot" */
3902 if (!iter->snapshot)
2b6080f2 3903 tracing_stop_tr(tr);
2f26ebd5 3904
ae3b5093 3905 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3906 for_each_tracing_cpu(cpu) {
b04cc6b1 3907 iter->buffer_iter[cpu] =
12883efb 3908 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3909 }
3910 ring_buffer_read_prepare_sync();
3911 for_each_tracing_cpu(cpu) {
3912 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3913 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3914 }
3915 } else {
3916 cpu = iter->cpu_file;
3928a8a2 3917 iter->buffer_iter[cpu] =
12883efb 3918 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3919 ring_buffer_read_prepare_sync();
3920 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3921 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3922 }
3923
bc0c38d1
SR
3924 mutex_unlock(&trace_types_lock);
3925
bc0c38d1 3926 return iter;
3928a8a2 3927
d7350c3f 3928 fail:
3928a8a2 3929 mutex_unlock(&trace_types_lock);
d7350c3f 3930 kfree(iter->trace);
6d158a81 3931 kfree(iter->buffer_iter);
93574fcc 3932release:
50e18b94
JO
3933 seq_release_private(inode, file);
3934 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3935}
3936
3937int tracing_open_generic(struct inode *inode, struct file *filp)
3938{
60a11774
SR
3939 if (tracing_disabled)
3940 return -ENODEV;
3941
bc0c38d1
SR
3942 filp->private_data = inode->i_private;
3943 return 0;
3944}
3945
2e86421d
GB
3946bool tracing_is_disabled(void)
3947{
3948 return (tracing_disabled) ? true: false;
3949}
3950
7b85af63
SRRH
3951/*
3952 * Open and update trace_array ref count.
3953 * Must have the current trace_array passed to it.
3954 */
dcc30223 3955static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3956{
3957 struct trace_array *tr = inode->i_private;
3958
3959 if (tracing_disabled)
3960 return -ENODEV;
3961
3962 if (trace_array_get(tr) < 0)
3963 return -ENODEV;
3964
3965 filp->private_data = inode->i_private;
3966
3967 return 0;
7b85af63
SRRH
3968}
3969
4fd27358 3970static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3971{
6484c71c 3972 struct trace_array *tr = inode->i_private;
907f2784 3973 struct seq_file *m = file->private_data;
4acd4d00 3974 struct trace_iterator *iter;
3928a8a2 3975 int cpu;
bc0c38d1 3976
ff451961 3977 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3978 trace_array_put(tr);
4acd4d00 3979 return 0;
ff451961 3980 }
4acd4d00 3981
6484c71c 3982 /* Writes do not use seq_file */
4acd4d00 3983 iter = m->private;
bc0c38d1 3984 mutex_lock(&trace_types_lock);
a695cb58 3985
3928a8a2
SR
3986 for_each_tracing_cpu(cpu) {
3987 if (iter->buffer_iter[cpu])
3988 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3989 }
3990
bc0c38d1
SR
3991 if (iter->trace && iter->trace->close)
3992 iter->trace->close(iter);
3993
debdd57f
HT
3994 if (!iter->snapshot)
3995 /* reenable tracing if it was previously enabled */
2b6080f2 3996 tracing_start_tr(tr);
f77d09a3
AL
3997
3998 __trace_array_put(tr);
3999
bc0c38d1
SR
4000 mutex_unlock(&trace_types_lock);
4001
d7350c3f 4002 mutex_destroy(&iter->mutex);
b0dfa978 4003 free_cpumask_var(iter->started);
d7350c3f 4004 kfree(iter->trace);
6d158a81 4005 kfree(iter->buffer_iter);
50e18b94 4006 seq_release_private(inode, file);
ff451961 4007
bc0c38d1
SR
4008 return 0;
4009}
4010
7b85af63
SRRH
4011static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4012{
4013 struct trace_array *tr = inode->i_private;
4014
4015 trace_array_put(tr);
bc0c38d1
SR
4016 return 0;
4017}
4018
7b85af63
SRRH
4019static int tracing_single_release_tr(struct inode *inode, struct file *file)
4020{
4021 struct trace_array *tr = inode->i_private;
4022
4023 trace_array_put(tr);
4024
4025 return single_release(inode, file);
4026}
4027
bc0c38d1
SR
4028static int tracing_open(struct inode *inode, struct file *file)
4029{
6484c71c 4030 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
4031 struct trace_iterator *iter;
4032 int ret = 0;
bc0c38d1 4033
ff451961
SRRH
4034 if (trace_array_get(tr) < 0)
4035 return -ENODEV;
4036
4acd4d00 4037 /* If this file was open for write, then erase contents */
6484c71c
ON
4038 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4039 int cpu = tracing_get_cpu(inode);
8dd33bcb
BY
4040 struct trace_buffer *trace_buf = &tr->trace_buffer;
4041
4042#ifdef CONFIG_TRACER_MAX_TRACE
4043 if (tr->current_trace->print_max)
4044 trace_buf = &tr->max_buffer;
4045#endif
6484c71c
ON
4046
4047 if (cpu == RING_BUFFER_ALL_CPUS)
8dd33bcb 4048 tracing_reset_online_cpus(trace_buf);
4acd4d00 4049 else
8dd33bcb 4050 tracing_reset(trace_buf, cpu);
4acd4d00 4051 }
bc0c38d1 4052
4acd4d00 4053 if (file->f_mode & FMODE_READ) {
6484c71c 4054 iter = __tracing_open(inode, file, false);
4acd4d00
SR
4055 if (IS_ERR(iter))
4056 ret = PTR_ERR(iter);
983f938a 4057 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
4058 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4059 }
ff451961
SRRH
4060
4061 if (ret < 0)
4062 trace_array_put(tr);
4063
bc0c38d1
SR
4064 return ret;
4065}
4066
607e2ea1
SRRH
4067/*
4068 * Some tracers are not suitable for instance buffers.
4069 * A tracer is always available for the global array (toplevel)
4070 * or if it explicitly states that it is.
4071 */
4072static bool
4073trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4074{
4075 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4076}
4077
4078/* Find the next tracer that this trace array may use */
4079static struct tracer *
4080get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4081{
4082 while (t && !trace_ok_for_array(t, tr))
4083 t = t->next;
4084
4085 return t;
4086}
4087
e309b41d 4088static void *
bc0c38d1
SR
4089t_next(struct seq_file *m, void *v, loff_t *pos)
4090{
607e2ea1 4091 struct trace_array *tr = m->private;
f129e965 4092 struct tracer *t = v;
bc0c38d1
SR
4093
4094 (*pos)++;
4095
4096 if (t)
607e2ea1 4097 t = get_tracer_for_array(tr, t->next);
bc0c38d1 4098
bc0c38d1
SR
4099 return t;
4100}
4101
4102static void *t_start(struct seq_file *m, loff_t *pos)
4103{
607e2ea1 4104 struct trace_array *tr = m->private;
f129e965 4105 struct tracer *t;
bc0c38d1
SR
4106 loff_t l = 0;
4107
4108 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
4109
4110 t = get_tracer_for_array(tr, trace_types);
4111 for (; t && l < *pos; t = t_next(m, t, &l))
4112 ;
bc0c38d1
SR
4113
4114 return t;
4115}
4116
4117static void t_stop(struct seq_file *m, void *p)
4118{
4119 mutex_unlock(&trace_types_lock);
4120}
4121
4122static int t_show(struct seq_file *m, void *v)
4123{
4124 struct tracer *t = v;
4125
4126 if (!t)
4127 return 0;
4128
fa6f0cc7 4129 seq_puts(m, t->name);
bc0c38d1
SR
4130 if (t->next)
4131 seq_putc(m, ' ');
4132 else
4133 seq_putc(m, '\n');
4134
4135 return 0;
4136}
4137
88e9d34c 4138static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
4139 .start = t_start,
4140 .next = t_next,
4141 .stop = t_stop,
4142 .show = t_show,
bc0c38d1
SR
4143};
4144
4145static int show_traces_open(struct inode *inode, struct file *file)
4146{
607e2ea1
SRRH
4147 struct trace_array *tr = inode->i_private;
4148 struct seq_file *m;
4149 int ret;
4150
60a11774
SR
4151 if (tracing_disabled)
4152 return -ENODEV;
4153
607e2ea1
SRRH
4154 ret = seq_open(file, &show_traces_seq_ops);
4155 if (ret)
4156 return ret;
4157
4158 m = file->private_data;
4159 m->private = tr;
4160
4161 return 0;
bc0c38d1
SR
4162}
4163
4acd4d00
SR
4164static ssize_t
4165tracing_write_stub(struct file *filp, const char __user *ubuf,
4166 size_t count, loff_t *ppos)
4167{
4168 return count;
4169}
4170
098c879e 4171loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4172{
098c879e
SRRH
4173 int ret;
4174
364829b1 4175 if (file->f_mode & FMODE_READ)
098c879e 4176 ret = seq_lseek(file, offset, whence);
364829b1 4177 else
098c879e
SRRH
4178 file->f_pos = ret = 0;
4179
4180 return ret;
364829b1
SP
4181}
4182
5e2336a0 4183static const struct file_operations tracing_fops = {
4bf39a94
IM
4184 .open = tracing_open,
4185 .read = seq_read,
4acd4d00 4186 .write = tracing_write_stub,
098c879e 4187 .llseek = tracing_lseek,
4bf39a94 4188 .release = tracing_release,
bc0c38d1
SR
4189};
4190
5e2336a0 4191static const struct file_operations show_traces_fops = {
c7078de1
IM
4192 .open = show_traces_open,
4193 .read = seq_read,
4194 .release = seq_release,
b444786f 4195 .llseek = seq_lseek,
c7078de1
IM
4196};
4197
4198static ssize_t
4199tracing_cpumask_read(struct file *filp, char __user *ubuf,
4200 size_t count, loff_t *ppos)
4201{
ccfe9e42 4202 struct trace_array *tr = file_inode(filp)->i_private;
90e406f9 4203 char *mask_str;
36dfe925 4204 int len;
c7078de1 4205
90e406f9
CD
4206 len = snprintf(NULL, 0, "%*pb\n",
4207 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4208 mask_str = kmalloc(len, GFP_KERNEL);
4209 if (!mask_str)
4210 return -ENOMEM;
36dfe925 4211
90e406f9 4212 len = snprintf(mask_str, len, "%*pb\n",
1a40243b
TH
4213 cpumask_pr_args(tr->tracing_cpumask));
4214 if (len >= count) {
36dfe925
IM
4215 count = -EINVAL;
4216 goto out_err;
4217 }
90e406f9 4218 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
36dfe925
IM
4219
4220out_err:
90e406f9 4221 kfree(mask_str);
c7078de1
IM
4222
4223 return count;
4224}
4225
4226static ssize_t
4227tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4228 size_t count, loff_t *ppos)
4229{
ccfe9e42 4230 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4231 cpumask_var_t tracing_cpumask_new;
2b6080f2 4232 int err, cpu;
9e01c1b7
RR
4233
4234 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4235 return -ENOMEM;
c7078de1 4236
9e01c1b7 4237 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4238 if (err)
36dfe925
IM
4239 goto err_unlock;
4240
a5e25883 4241 local_irq_disable();
0b9b12c1 4242 arch_spin_lock(&tr->max_lock);
ab46428c 4243 for_each_tracing_cpu(cpu) {
36dfe925
IM
4244 /*
4245 * Increase/decrease the disabled counter if we are
4246 * about to flip a bit in the cpumask:
4247 */
ccfe9e42 4248 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4249 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4250 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4251 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4252 }
ccfe9e42 4253 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4254 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4255 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4256 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4257 }
4258 }
0b9b12c1 4259 arch_spin_unlock(&tr->max_lock);
a5e25883 4260 local_irq_enable();
36dfe925 4261
ccfe9e42 4262 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
9e01c1b7 4263 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4264
4265 return count;
36dfe925
IM
4266
4267err_unlock:
215368e8 4268 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4269
4270 return err;
c7078de1
IM
4271}
4272
5e2336a0 4273static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4274 .open = tracing_open_generic_tr,
c7078de1
IM
4275 .read = tracing_cpumask_read,
4276 .write = tracing_cpumask_write,
ccfe9e42 4277 .release = tracing_release_generic_tr,
b444786f 4278 .llseek = generic_file_llseek,
bc0c38d1
SR
4279};
4280
fdb372ed 4281static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4282{
d8e83d26 4283 struct tracer_opt *trace_opts;
2b6080f2 4284 struct trace_array *tr = m->private;
d8e83d26 4285 u32 tracer_flags;
d8e83d26 4286 int i;
adf9f195 4287
d8e83d26 4288 mutex_lock(&trace_types_lock);
2b6080f2
SR
4289 tracer_flags = tr->current_trace->flags->val;
4290 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4291
bc0c38d1 4292 for (i = 0; trace_options[i]; i++) {
983f938a 4293 if (tr->trace_flags & (1 << i))
fdb372ed 4294 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4295 else
fdb372ed 4296 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4297 }
4298
adf9f195
FW
4299 for (i = 0; trace_opts[i].name; i++) {
4300 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4301 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4302 else
fdb372ed 4303 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4304 }
d8e83d26 4305 mutex_unlock(&trace_types_lock);
adf9f195 4306
fdb372ed 4307 return 0;
bc0c38d1 4308}
bc0c38d1 4309
8c1a49ae 4310static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4311 struct tracer_flags *tracer_flags,
4312 struct tracer_opt *opts, int neg)
4313{
d39cdd20 4314 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4315 int ret;
bc0c38d1 4316
8c1a49ae 4317 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4318 if (ret)
4319 return ret;
4320
4321 if (neg)
4322 tracer_flags->val &= ~opts->bit;
4323 else
4324 tracer_flags->val |= opts->bit;
4325 return 0;
bc0c38d1
SR
4326}
4327
adf9f195 4328/* Try to assign a tracer specific option */
8c1a49ae 4329static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4330{
8c1a49ae 4331 struct tracer *trace = tr->current_trace;
7770841e 4332 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4333 struct tracer_opt *opts = NULL;
8d18eaaf 4334 int i;
adf9f195 4335
7770841e
Z
4336 for (i = 0; tracer_flags->opts[i].name; i++) {
4337 opts = &tracer_flags->opts[i];
adf9f195 4338
8d18eaaf 4339 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4340 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4341 }
adf9f195 4342
8d18eaaf 4343 return -EINVAL;
adf9f195
FW
4344}
4345
613f04a0
SRRH
4346/* Some tracers require overwrite to stay enabled */
4347int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4348{
4349 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4350 return -1;
4351
4352 return 0;
4353}
4354
2b6080f2 4355int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4356{
4357 /* do nothing if flag is already set */
983f938a 4358 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4359 return 0;
4360
4361 /* Give the tracer a chance to approve the change */
2b6080f2 4362 if (tr->current_trace->flag_changed)
bf6065b5 4363 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4364 return -EINVAL;
af4617bd
SR
4365
4366 if (enabled)
983f938a 4367 tr->trace_flags |= mask;
af4617bd 4368 else
983f938a 4369 tr->trace_flags &= ~mask;
e870e9a1
LZ
4370
4371 if (mask == TRACE_ITER_RECORD_CMD)
4372 trace_event_enable_cmd_record(enabled);
750912fa 4373
d914ba37
JF
4374 if (mask == TRACE_ITER_RECORD_TGID) {
4375 if (!tgid_map)
4376 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4377 GFP_KERNEL);
4378 if (!tgid_map) {
4379 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4380 return -ENOMEM;
4381 }
4382
4383 trace_event_enable_tgid_record(enabled);
4384 }
4385
c37775d5
SR
4386 if (mask == TRACE_ITER_EVENT_FORK)
4387 trace_event_follow_fork(tr, enabled);
4388
1e10486f
NK
4389 if (mask == TRACE_ITER_FUNC_FORK)
4390 ftrace_pid_follow_fork(tr, enabled);
4391
80902822 4392 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4393 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4394#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4395 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4396#endif
4397 }
81698831 4398
b9f9108c 4399 if (mask == TRACE_ITER_PRINTK) {
81698831 4400 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4401 trace_printk_control(enabled);
4402 }
613f04a0
SRRH
4403
4404 return 0;
af4617bd
SR
4405}
4406
2b6080f2 4407static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4408{
8d18eaaf 4409 char *cmp;
bc0c38d1 4410 int neg = 0;
613f04a0 4411 int ret = -ENODEV;
bc0c38d1 4412 int i;
a4d1e688 4413 size_t orig_len = strlen(option);
bc0c38d1 4414
7bcfaf54 4415 cmp = strstrip(option);
bc0c38d1 4416
8d18eaaf 4417 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
4418 neg = 1;
4419 cmp += 2;
4420 }
4421
69d34da2
SRRH
4422 mutex_lock(&trace_types_lock);
4423
bc0c38d1 4424 for (i = 0; trace_options[i]; i++) {
8d18eaaf 4425 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 4426 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
4427 break;
4428 }
4429 }
adf9f195
FW
4430
4431 /* If no option could be set, test the specific tracer options */
69d34da2 4432 if (!trace_options[i])
8c1a49ae 4433 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
4434
4435 mutex_unlock(&trace_types_lock);
bc0c38d1 4436
a4d1e688
JW
4437 /*
4438 * If the first trailing whitespace is replaced with '\0' by strstrip,
4439 * turn it back into a space.
4440 */
4441 if (orig_len > strlen(option))
4442 option[strlen(option)] = ' ';
4443
7bcfaf54
SR
4444 return ret;
4445}
4446
a4d1e688
JW
4447static void __init apply_trace_boot_options(void)
4448{
4449 char *buf = trace_boot_options_buf;
4450 char *option;
4451
4452 while (true) {
4453 option = strsep(&buf, ",");
4454
4455 if (!option)
4456 break;
a4d1e688 4457
43ed3843
SRRH
4458 if (*option)
4459 trace_set_options(&global_trace, option);
a4d1e688
JW
4460
4461 /* Put back the comma to allow this to be called again */
4462 if (buf)
4463 *(buf - 1) = ',';
4464 }
4465}
4466
7bcfaf54
SR
4467static ssize_t
4468tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4469 size_t cnt, loff_t *ppos)
4470{
2b6080f2
SR
4471 struct seq_file *m = filp->private_data;
4472 struct trace_array *tr = m->private;
7bcfaf54 4473 char buf[64];
613f04a0 4474 int ret;
7bcfaf54
SR
4475
4476 if (cnt >= sizeof(buf))
4477 return -EINVAL;
4478
4afe6495 4479 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4480 return -EFAULT;
4481
a8dd2176
SR
4482 buf[cnt] = 0;
4483
2b6080f2 4484 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4485 if (ret < 0)
4486 return ret;
7bcfaf54 4487
cf8517cf 4488 *ppos += cnt;
bc0c38d1
SR
4489
4490 return cnt;
4491}
4492
fdb372ed
LZ
4493static int tracing_trace_options_open(struct inode *inode, struct file *file)
4494{
7b85af63 4495 struct trace_array *tr = inode->i_private;
f77d09a3 4496 int ret;
7b85af63 4497
fdb372ed
LZ
4498 if (tracing_disabled)
4499 return -ENODEV;
2b6080f2 4500
7b85af63
SRRH
4501 if (trace_array_get(tr) < 0)
4502 return -ENODEV;
4503
f77d09a3
AL
4504 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4505 if (ret < 0)
4506 trace_array_put(tr);
4507
4508 return ret;
fdb372ed
LZ
4509}
4510
5e2336a0 4511static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4512 .open = tracing_trace_options_open,
4513 .read = seq_read,
4514 .llseek = seq_lseek,
7b85af63 4515 .release = tracing_single_release_tr,
ee6bce52 4516 .write = tracing_trace_options_write,
bc0c38d1
SR
4517};
4518
7bd2f24c
IM
4519static const char readme_msg[] =
4520 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4521 "# echo 0 > tracing_on : quick way to disable tracing\n"
4522 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4523 " Important files:\n"
4524 " trace\t\t\t- The static contents of the buffer\n"
4525 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4526 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4527 " current_tracer\t- function and latency tracers\n"
4528 " available_tracers\t- list of configured tracers for current_tracer\n"
4529 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4530 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4531 " trace_clock\t\t-change the clock used to order events\n"
4532 " local: Per cpu clock but may not be synced across CPUs\n"
4533 " global: Synced across CPUs but slows tracing down.\n"
4534 " counter: Not a clock, but just an increment\n"
4535 " uptime: Jiffy counter from time of boot\n"
4536 " perf: Same clock that perf events use\n"
4537#ifdef CONFIG_X86_64
4538 " x86-tsc: TSC cycle counter\n"
4539#endif
4540 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4541 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4542 " tracing_cpumask\t- Limit which CPUs to trace\n"
4543 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4544 "\t\t\t Remove sub-buffer with rmdir\n"
4545 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4546 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4547 "\t\t\t option name\n"
939c7a4f 4548 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4549#ifdef CONFIG_DYNAMIC_FTRACE
4550 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4551 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4552 "\t\t\t functions\n"
60f1d5e3 4553 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4554 "\t modules: Can select a group via module\n"
4555 "\t Format: :mod:<module-name>\n"
4556 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4557 "\t triggers: a command to perform when function is hit\n"
4558 "\t Format: <function>:<trigger>[:count]\n"
4559 "\t trigger: traceon, traceoff\n"
4560 "\t\t enable_event:<system>:<event>\n"
4561 "\t\t disable_event:<system>:<event>\n"
22f45649 4562#ifdef CONFIG_STACKTRACE
71485c45 4563 "\t\t stacktrace\n"
22f45649
SRRH
4564#endif
4565#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4566 "\t\t snapshot\n"
22f45649 4567#endif
17a280ea
SRRH
4568 "\t\t dump\n"
4569 "\t\t cpudump\n"
71485c45
SRRH
4570 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4571 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4572 "\t The first one will disable tracing every time do_fault is hit\n"
4573 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4574 "\t The first time do trap is hit and it disables tracing, the\n"
4575 "\t counter will decrement to 2. If tracing is already disabled,\n"
4576 "\t the counter will not decrement. It only decrements when the\n"
4577 "\t trigger did work\n"
4578 "\t To remove trigger without count:\n"
4579 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4580 "\t To remove trigger with a count:\n"
4581 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4582 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4583 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4584 "\t modules: Can select a group via module command :mod:\n"
4585 "\t Does not accept triggers\n"
22f45649
SRRH
4586#endif /* CONFIG_DYNAMIC_FTRACE */
4587#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4588 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4589 "\t\t (function)\n"
22f45649
SRRH
4590#endif
4591#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4592 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4593 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4594 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4595#endif
4596#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4597 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4598 "\t\t\t snapshot buffer. Read the contents for more\n"
4599 "\t\t\t information\n"
22f45649 4600#endif
991821c8 4601#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4602 " stack_trace\t\t- Shows the max stack trace when active\n"
4603 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4604 "\t\t\t Write into this file to reset the max size (trigger a\n"
4605 "\t\t\t new trace)\n"
22f45649 4606#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4607 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4608 "\t\t\t traces\n"
22f45649 4609#endif
991821c8 4610#endif /* CONFIG_STACK_TRACER */
6b0b7551 4611#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4612 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4613 "\t\t\t Write into this file to define/undefine new trace events.\n"
4614#endif
6b0b7551 4615#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4616 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4617 "\t\t\t Write into this file to define/undefine new trace events.\n"
4618#endif
6b0b7551 4619#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625 4620 "\t accepts: event-definitions (one definition per line)\n"
c3ca46ef
MH
4621 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4622 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
86425625 4623 "\t -:[<group>/]<event>\n"
6b0b7551 4624#ifdef CONFIG_KPROBE_EVENTS
86425625 4625 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4626 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4627#endif
6b0b7551 4628#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4629 "\t place: <path>:<offset>\n"
4630#endif
4631 "\t args: <name>=fetcharg[:type]\n"
4632 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4633 "\t $stack<index>, $stack, $retval, $comm\n"
4634 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4635 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4636#endif
26f25564
TZ
4637 " events/\t\t- Directory containing all trace event subsystems:\n"
4638 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4639 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4640 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4641 "\t\t\t events\n"
26f25564 4642 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4643 " events/<system>/<event>/\t- Directory containing control files for\n"
4644 "\t\t\t <event>:\n"
26f25564
TZ
4645 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4646 " filter\t\t- If set, only events passing filter are traced\n"
4647 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4648 "\t Format: <trigger>[:count][if <filter>]\n"
4649 "\t trigger: traceon, traceoff\n"
4650 "\t enable_event:<system>:<event>\n"
4651 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4652#ifdef CONFIG_HIST_TRIGGERS
4653 "\t enable_hist:<system>:<event>\n"
4654 "\t disable_hist:<system>:<event>\n"
4655#endif
26f25564 4656#ifdef CONFIG_STACKTRACE
71485c45 4657 "\t\t stacktrace\n"
26f25564
TZ
4658#endif
4659#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4660 "\t\t snapshot\n"
7ef224d1
TZ
4661#endif
4662#ifdef CONFIG_HIST_TRIGGERS
4663 "\t\t hist (see below)\n"
26f25564 4664#endif
71485c45
SRRH
4665 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4666 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4667 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4668 "\t events/block/block_unplug/trigger\n"
4669 "\t The first disables tracing every time block_unplug is hit.\n"
4670 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4671 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4672 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4673 "\t Like function triggers, the counter is only decremented if it\n"
4674 "\t enabled or disabled tracing.\n"
4675 "\t To remove a trigger without a count:\n"
4676 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4677 "\t To remove a trigger with a count:\n"
4678 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4679 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4680#ifdef CONFIG_HIST_TRIGGERS
4681 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4682 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4683 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4684 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4685 "\t [:size=#entries]\n"
e86ae9ba 4686 "\t [:pause][:continue][:clear]\n"
5463bfda 4687 "\t [:name=histname1]\n"
7ef224d1
TZ
4688 "\t [if <filter>]\n\n"
4689 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4690 "\t table using the key(s) and value(s) named, and the value of a\n"
4691 "\t sum called 'hitcount' is incremented. Keys and values\n"
4692 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4693 "\t can be any field, or the special string 'stacktrace'.\n"
4694 "\t Compound keys consisting of up to two fields can be specified\n"
4695 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4696 "\t fields. Sort keys consisting of up to two fields can be\n"
4697 "\t specified using the 'sort' keyword. The sort direction can\n"
4698 "\t be modified by appending '.descending' or '.ascending' to a\n"
4699 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4700 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4701 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4702 "\t its histogram data will be shared with other triggers of the\n"
4703 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4704 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4705 "\t table in its entirety to stdout. If there are multiple hist\n"
4706 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4707 "\t trigger in the output. The table displayed for a named\n"
4708 "\t trigger will be the same as any other instance having the\n"
4709 "\t same name. The default format used to display a given field\n"
4710 "\t can be modified by appending any of the following modifiers\n"
4711 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4712 "\t .hex display a number as a hex value\n"
4713 "\t .sym display an address as a symbol\n"
6b4827ad 4714 "\t .sym-offset display an address as a symbol and offset\n"
31696198
TZ
4715 "\t .execname display a common_pid as a program name\n"
4716 "\t .syscall display a syscall id as a syscall name\n\n"
4b94f5b7 4717 "\t .log2 display log2 value rather than raw number\n\n"
83e99914
TZ
4718 "\t The 'pause' parameter can be used to pause an existing hist\n"
4719 "\t trigger or to start a hist trigger but not log any events\n"
4720 "\t until told to do so. 'continue' can be used to start or\n"
4721 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4722 "\t The 'clear' parameter will clear the contents of a running\n"
4723 "\t hist trigger and leave its current paused/active state\n"
4724 "\t unchanged.\n\n"
d0bad49b
TZ
4725 "\t The enable_hist and disable_hist triggers can be used to\n"
4726 "\t have one event conditionally start and stop another event's\n"
4727 "\t already-attached hist trigger. The syntax is analagous to\n"
4728 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4729#endif
7bd2f24c
IM
4730;
4731
4732static ssize_t
4733tracing_readme_read(struct file *filp, char __user *ubuf,
4734 size_t cnt, loff_t *ppos)
4735{
4736 return simple_read_from_buffer(ubuf, cnt, ppos,
4737 readme_msg, strlen(readme_msg));
4738}
4739
5e2336a0 4740static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4741 .open = tracing_open_generic,
4742 .read = tracing_readme_read,
b444786f 4743 .llseek = generic_file_llseek,
7bd2f24c
IM
4744};
4745
99c621d7
MS
4746static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4747{
4748 int *ptr = v;
4749
4750 if (*pos || m->count)
4751 ptr++;
4752
4753 (*pos)++;
4754
4755 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4756 if (trace_find_tgid(*ptr))
4757 return ptr;
4758 }
4759
4760 return NULL;
4761}
4762
4763static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4764{
4765 void *v;
4766 loff_t l = 0;
4767
4768 if (!tgid_map)
4769 return NULL;
4770
4771 v = &tgid_map[0];
4772 while (l <= *pos) {
4773 v = saved_tgids_next(m, v, &l);
4774 if (!v)
4775 return NULL;
4776 }
4777
4778 return v;
4779}
4780
4781static void saved_tgids_stop(struct seq_file *m, void *v)
4782{
4783}
4784
4785static int saved_tgids_show(struct seq_file *m, void *v)
4786{
4787 int pid = (int *)v - tgid_map;
4788
4789 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4790 return 0;
4791}
4792
4793static const struct seq_operations tracing_saved_tgids_seq_ops = {
4794 .start = saved_tgids_start,
4795 .stop = saved_tgids_stop,
4796 .next = saved_tgids_next,
4797 .show = saved_tgids_show,
4798};
4799
4800static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4801{
4802 if (tracing_disabled)
4803 return -ENODEV;
4804
4805 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4806}
4807
4808
4809static const struct file_operations tracing_saved_tgids_fops = {
4810 .open = tracing_saved_tgids_open,
4811 .read = seq_read,
4812 .llseek = seq_lseek,
4813 .release = seq_release,
4814};
4815
42584c81
YY
4816static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4817{
4818 unsigned int *ptr = v;
69abe6a5 4819
42584c81
YY
4820 if (*pos || m->count)
4821 ptr++;
69abe6a5 4822
42584c81 4823 (*pos)++;
69abe6a5 4824
939c7a4f
YY
4825 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4826 ptr++) {
42584c81
YY
4827 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4828 continue;
69abe6a5 4829
42584c81
YY
4830 return ptr;
4831 }
69abe6a5 4832
42584c81
YY
4833 return NULL;
4834}
4835
4836static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4837{
4838 void *v;
4839 loff_t l = 0;
69abe6a5 4840
4c27e756
SRRH
4841 preempt_disable();
4842 arch_spin_lock(&trace_cmdline_lock);
4843
939c7a4f 4844 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4845 while (l <= *pos) {
4846 v = saved_cmdlines_next(m, v, &l);
4847 if (!v)
4848 return NULL;
69abe6a5
AP
4849 }
4850
42584c81
YY
4851 return v;
4852}
4853
4854static void saved_cmdlines_stop(struct seq_file *m, void *v)
4855{
4c27e756
SRRH
4856 arch_spin_unlock(&trace_cmdline_lock);
4857 preempt_enable();
42584c81 4858}
69abe6a5 4859
42584c81
YY
4860static int saved_cmdlines_show(struct seq_file *m, void *v)
4861{
4862 char buf[TASK_COMM_LEN];
4863 unsigned int *pid = v;
69abe6a5 4864
4c27e756 4865 __trace_find_cmdline(*pid, buf);
42584c81
YY
4866 seq_printf(m, "%d %s\n", *pid, buf);
4867 return 0;
4868}
4869
4870static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4871 .start = saved_cmdlines_start,
4872 .next = saved_cmdlines_next,
4873 .stop = saved_cmdlines_stop,
4874 .show = saved_cmdlines_show,
4875};
4876
4877static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4878{
4879 if (tracing_disabled)
4880 return -ENODEV;
4881
4882 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4883}
4884
4885static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4886 .open = tracing_saved_cmdlines_open,
4887 .read = seq_read,
4888 .llseek = seq_lseek,
4889 .release = seq_release,
69abe6a5
AP
4890};
4891
939c7a4f
YY
4892static ssize_t
4893tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4894 size_t cnt, loff_t *ppos)
4895{
4896 char buf[64];
4897 int r;
4898
4899 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4900 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4901 arch_spin_unlock(&trace_cmdline_lock);
4902
4903 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4904}
4905
4906static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4907{
4908 kfree(s->saved_cmdlines);
4909 kfree(s->map_cmdline_to_pid);
4910 kfree(s);
4911}
4912
4913static int tracing_resize_saved_cmdlines(unsigned int val)
4914{
4915 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4916
a6af8fbf 4917 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4918 if (!s)
4919 return -ENOMEM;
4920
4921 if (allocate_cmdlines_buffer(val, s) < 0) {
4922 kfree(s);
4923 return -ENOMEM;
4924 }
4925
4926 arch_spin_lock(&trace_cmdline_lock);
4927 savedcmd_temp = savedcmd;
4928 savedcmd = s;
4929 arch_spin_unlock(&trace_cmdline_lock);
4930 free_saved_cmdlines_buffer(savedcmd_temp);
4931
4932 return 0;
4933}
4934
4935static ssize_t
4936tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4937 size_t cnt, loff_t *ppos)
4938{
4939 unsigned long val;
4940 int ret;
4941
4942 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4943 if (ret)
4944 return ret;
4945
4946 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4947 if (!val || val > PID_MAX_DEFAULT)
4948 return -EINVAL;
4949
4950 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4951 if (ret < 0)
4952 return ret;
4953
4954 *ppos += cnt;
4955
4956 return cnt;
4957}
4958
4959static const struct file_operations tracing_saved_cmdlines_size_fops = {
4960 .open = tracing_open_generic,
4961 .read = tracing_saved_cmdlines_size_read,
4962 .write = tracing_saved_cmdlines_size_write,
4963};
4964
681bec03 4965#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 4966static union trace_eval_map_item *
f57a4143 4967update_eval_map(union trace_eval_map_item *ptr)
9828413d 4968{
00f4b652 4969 if (!ptr->map.eval_string) {
9828413d
SRRH
4970 if (ptr->tail.next) {
4971 ptr = ptr->tail.next;
4972 /* Set ptr to the next real item (skip head) */
4973 ptr++;
4974 } else
4975 return NULL;
4976 }
4977 return ptr;
4978}
4979
f57a4143 4980static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 4981{
23bf8cb8 4982 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4983
4984 /*
4985 * Paranoid! If ptr points to end, we don't want to increment past it.
4986 * This really should never happen.
4987 */
f57a4143 4988 ptr = update_eval_map(ptr);
9828413d
SRRH
4989 if (WARN_ON_ONCE(!ptr))
4990 return NULL;
4991
4992 ptr++;
4993
4994 (*pos)++;
4995
f57a4143 4996 ptr = update_eval_map(ptr);
9828413d
SRRH
4997
4998 return ptr;
4999}
5000
f57a4143 5001static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 5002{
23bf8cb8 5003 union trace_eval_map_item *v;
9828413d
SRRH
5004 loff_t l = 0;
5005
1793ed93 5006 mutex_lock(&trace_eval_mutex);
9828413d 5007
23bf8cb8 5008 v = trace_eval_maps;
9828413d
SRRH
5009 if (v)
5010 v++;
5011
5012 while (v && l < *pos) {
f57a4143 5013 v = eval_map_next(m, v, &l);
9828413d
SRRH
5014 }
5015
5016 return v;
5017}
5018
f57a4143 5019static void eval_map_stop(struct seq_file *m, void *v)
9828413d 5020{
1793ed93 5021 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5022}
5023
f57a4143 5024static int eval_map_show(struct seq_file *m, void *v)
9828413d 5025{
23bf8cb8 5026 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5027
5028 seq_printf(m, "%s %ld (%s)\n",
00f4b652 5029 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
5030 ptr->map.system);
5031
5032 return 0;
5033}
5034
f57a4143
JL
5035static const struct seq_operations tracing_eval_map_seq_ops = {
5036 .start = eval_map_start,
5037 .next = eval_map_next,
5038 .stop = eval_map_stop,
5039 .show = eval_map_show,
9828413d
SRRH
5040};
5041
f57a4143 5042static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
5043{
5044 if (tracing_disabled)
5045 return -ENODEV;
5046
f57a4143 5047 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
5048}
5049
f57a4143
JL
5050static const struct file_operations tracing_eval_map_fops = {
5051 .open = tracing_eval_map_open,
9828413d
SRRH
5052 .read = seq_read,
5053 .llseek = seq_lseek,
5054 .release = seq_release,
5055};
5056
23bf8cb8 5057static inline union trace_eval_map_item *
5f60b351 5058trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
5059{
5060 /* Return tail of array given the head */
5061 return ptr + ptr->head.length + 1;
5062}
5063
5064static void
f57a4143 5065trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
5066 int len)
5067{
00f4b652
JL
5068 struct trace_eval_map **stop;
5069 struct trace_eval_map **map;
23bf8cb8
JL
5070 union trace_eval_map_item *map_array;
5071 union trace_eval_map_item *ptr;
9828413d
SRRH
5072
5073 stop = start + len;
5074
5075 /*
23bf8cb8 5076 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
5077 * where the head holds the module and length of array, and the
5078 * tail holds a pointer to the next list.
5079 */
5080 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5081 if (!map_array) {
f57a4143 5082 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
5083 return;
5084 }
5085
1793ed93 5086 mutex_lock(&trace_eval_mutex);
9828413d 5087
23bf8cb8
JL
5088 if (!trace_eval_maps)
5089 trace_eval_maps = map_array;
9828413d 5090 else {
23bf8cb8 5091 ptr = trace_eval_maps;
9828413d 5092 for (;;) {
5f60b351 5093 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
5094 if (!ptr->tail.next)
5095 break;
5096 ptr = ptr->tail.next;
5097
5098 }
5099 ptr->tail.next = map_array;
5100 }
5101 map_array->head.mod = mod;
5102 map_array->head.length = len;
5103 map_array++;
5104
5105 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5106 map_array->map = **map;
5107 map_array++;
5108 }
5109 memset(map_array, 0, sizeof(*map_array));
5110
1793ed93 5111 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5112}
5113
f57a4143 5114static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 5115{
681bec03 5116 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 5117 NULL, &tracing_eval_map_fops);
9828413d
SRRH
5118}
5119
681bec03 5120#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
5121static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5122static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 5123 struct trace_eval_map **start, int len) { }
681bec03 5124#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 5125
f57a4143 5126static void trace_insert_eval_map(struct module *mod,
00f4b652 5127 struct trace_eval_map **start, int len)
0c564a53 5128{
00f4b652 5129 struct trace_eval_map **map;
0c564a53
SRRH
5130
5131 if (len <= 0)
5132 return;
5133
5134 map = start;
5135
f57a4143 5136 trace_event_eval_update(map, len);
9828413d 5137
f57a4143 5138 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
5139}
5140
bc0c38d1
SR
5141static ssize_t
5142tracing_set_trace_read(struct file *filp, char __user *ubuf,
5143 size_t cnt, loff_t *ppos)
5144{
2b6080f2 5145 struct trace_array *tr = filp->private_data;
ee6c2c1b 5146 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
5147 int r;
5148
5149 mutex_lock(&trace_types_lock);
2b6080f2 5150 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
5151 mutex_unlock(&trace_types_lock);
5152
4bf39a94 5153 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5154}
5155
b6f11df2
ACM
5156int tracer_init(struct tracer *t, struct trace_array *tr)
5157{
12883efb 5158 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
5159 return t->init(tr);
5160}
5161
12883efb 5162static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
5163{
5164 int cpu;
737223fb 5165
438ced17 5166 for_each_tracing_cpu(cpu)
12883efb 5167 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
5168}
5169
12883efb 5170#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 5171/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
5172static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5173 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
5174{
5175 int cpu, ret = 0;
5176
5177 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5178 for_each_tracing_cpu(cpu) {
12883efb
SRRH
5179 ret = ring_buffer_resize(trace_buf->buffer,
5180 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
5181 if (ret < 0)
5182 break;
12883efb
SRRH
5183 per_cpu_ptr(trace_buf->data, cpu)->entries =
5184 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
5185 }
5186 } else {
12883efb
SRRH
5187 ret = ring_buffer_resize(trace_buf->buffer,
5188 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 5189 if (ret == 0)
12883efb
SRRH
5190 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5191 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
5192 }
5193
5194 return ret;
5195}
12883efb 5196#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 5197
2b6080f2
SR
5198static int __tracing_resize_ring_buffer(struct trace_array *tr,
5199 unsigned long size, int cpu)
73c5162a
SR
5200{
5201 int ret;
5202
5203 /*
5204 * If kernel or user changes the size of the ring buffer
a123c52b
SR
5205 * we use the size that was given, and we can forget about
5206 * expanding it later.
73c5162a 5207 */
55034cd6 5208 ring_buffer_expanded = true;
73c5162a 5209
b382ede6 5210 /* May be called before buffers are initialized */
12883efb 5211 if (!tr->trace_buffer.buffer)
b382ede6
SR
5212 return 0;
5213
12883efb 5214 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
5215 if (ret < 0)
5216 return ret;
5217
12883efb 5218#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5219 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5220 !tr->current_trace->use_max_tr)
ef710e10
KM
5221 goto out;
5222
12883efb 5223 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5224 if (ret < 0) {
12883efb
SRRH
5225 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5226 &tr->trace_buffer, cpu);
73c5162a 5227 if (r < 0) {
a123c52b
SR
5228 /*
5229 * AARGH! We are left with different
5230 * size max buffer!!!!
5231 * The max buffer is our "snapshot" buffer.
5232 * When a tracer needs a snapshot (one of the
5233 * latency tracers), it swaps the max buffer
5234 * with the saved snap shot. We succeeded to
5235 * update the size of the main buffer, but failed to
5236 * update the size of the max buffer. But when we tried
5237 * to reset the main buffer to the original size, we
5238 * failed there too. This is very unlikely to
5239 * happen, but if it does, warn and kill all
5240 * tracing.
5241 */
73c5162a
SR
5242 WARN_ON(1);
5243 tracing_disabled = 1;
5244 }
5245 return ret;
5246 }
5247
438ced17 5248 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5249 set_buffer_entries(&tr->max_buffer, size);
438ced17 5250 else
12883efb 5251 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5252
ef710e10 5253 out:
12883efb
SRRH
5254#endif /* CONFIG_TRACER_MAX_TRACE */
5255
438ced17 5256 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5257 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5258 else
12883efb 5259 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5260
5261 return ret;
5262}
5263
2b6080f2
SR
5264static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5265 unsigned long size, int cpu_id)
4f271a2a 5266{
83f40318 5267 int ret = size;
4f271a2a
VN
5268
5269 mutex_lock(&trace_types_lock);
5270
438ced17
VN
5271 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5272 /* make sure, this cpu is enabled in the mask */
5273 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5274 ret = -EINVAL;
5275 goto out;
5276 }
5277 }
4f271a2a 5278
2b6080f2 5279 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5280 if (ret < 0)
5281 ret = -ENOMEM;
5282
438ced17 5283out:
4f271a2a
VN
5284 mutex_unlock(&trace_types_lock);
5285
5286 return ret;
5287}
5288
ef710e10 5289
1852fcce
SR
5290/**
5291 * tracing_update_buffers - used by tracing facility to expand ring buffers
5292 *
5293 * To save on memory when the tracing is never used on a system with it
5294 * configured in. The ring buffers are set to a minimum size. But once
5295 * a user starts to use the tracing facility, then they need to grow
5296 * to their default size.
5297 *
5298 * This function is to be called when a tracer is about to be used.
5299 */
5300int tracing_update_buffers(void)
5301{
5302 int ret = 0;
5303
1027fcb2 5304 mutex_lock(&trace_types_lock);
1852fcce 5305 if (!ring_buffer_expanded)
2b6080f2 5306 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5307 RING_BUFFER_ALL_CPUS);
1027fcb2 5308 mutex_unlock(&trace_types_lock);
1852fcce
SR
5309
5310 return ret;
5311}
5312
577b785f
SR
5313struct trace_option_dentry;
5314
37aea98b 5315static void
2b6080f2 5316create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5317
6b450d25
SRRH
5318/*
5319 * Used to clear out the tracer before deletion of an instance.
5320 * Must have trace_types_lock held.
5321 */
5322static void tracing_set_nop(struct trace_array *tr)
5323{
5324 if (tr->current_trace == &nop_trace)
5325 return;
5326
50512ab5 5327 tr->current_trace->enabled--;
6b450d25
SRRH
5328
5329 if (tr->current_trace->reset)
5330 tr->current_trace->reset(tr);
5331
5332 tr->current_trace = &nop_trace;
5333}
5334
41d9c0be 5335static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5336{
09d23a1d
SRRH
5337 /* Only enable if the directory has been created already. */
5338 if (!tr->dir)
5339 return;
5340
37aea98b 5341 create_trace_option_files(tr, t);
09d23a1d
SRRH
5342}
5343
5344static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5345{
bc0c38d1 5346 struct tracer *t;
12883efb 5347#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5348 bool had_max_tr;
12883efb 5349#endif
d9e54076 5350 int ret = 0;
bc0c38d1 5351
1027fcb2
SR
5352 mutex_lock(&trace_types_lock);
5353
73c5162a 5354 if (!ring_buffer_expanded) {
2b6080f2 5355 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5356 RING_BUFFER_ALL_CPUS);
73c5162a 5357 if (ret < 0)
59f586db 5358 goto out;
73c5162a
SR
5359 ret = 0;
5360 }
5361
bc0c38d1
SR
5362 for (t = trace_types; t; t = t->next) {
5363 if (strcmp(t->name, buf) == 0)
5364 break;
5365 }
c2931e05
FW
5366 if (!t) {
5367 ret = -EINVAL;
5368 goto out;
5369 }
2b6080f2 5370 if (t == tr->current_trace)
bc0c38d1
SR
5371 goto out;
5372
c7b3ae0b
ZSZ
5373 /* Some tracers won't work on kernel command line */
5374 if (system_state < SYSTEM_RUNNING && t->noboot) {
5375 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5376 t->name);
5377 goto out;
5378 }
5379
607e2ea1
SRRH
5380 /* Some tracers are only allowed for the top level buffer */
5381 if (!trace_ok_for_array(t, tr)) {
5382 ret = -EINVAL;
5383 goto out;
5384 }
5385
cf6ab6d9
SRRH
5386 /* If trace pipe files are being read, we can't change the tracer */
5387 if (tr->current_trace->ref) {
5388 ret = -EBUSY;
5389 goto out;
5390 }
5391
9f029e83 5392 trace_branch_disable();
613f04a0 5393
50512ab5 5394 tr->current_trace->enabled--;
613f04a0 5395
2b6080f2
SR
5396 if (tr->current_trace->reset)
5397 tr->current_trace->reset(tr);
34600f0e 5398
12883efb 5399 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 5400 tr->current_trace = &nop_trace;
34600f0e 5401
45ad21ca
SRRH
5402#ifdef CONFIG_TRACER_MAX_TRACE
5403 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5404
5405 if (had_max_tr && !t->use_max_tr) {
5406 /*
5407 * We need to make sure that the update_max_tr sees that
5408 * current_trace changed to nop_trace to keep it from
5409 * swapping the buffers after we resize it.
5410 * The update_max_tr is called from interrupts disabled
5411 * so a synchronized_sched() is sufficient.
5412 */
5413 synchronize_sched();
3209cff4 5414 free_snapshot(tr);
ef710e10 5415 }
12883efb 5416#endif
12883efb
SRRH
5417
5418#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5419 if (t->use_max_tr && !had_max_tr) {
9ccd9a81 5420 ret = tracing_alloc_snapshot_instance(tr);
d60da506
HT
5421 if (ret < 0)
5422 goto out;
ef710e10 5423 }
12883efb 5424#endif
577b785f 5425
1c80025a 5426 if (t->init) {
b6f11df2 5427 ret = tracer_init(t, tr);
1c80025a
FW
5428 if (ret)
5429 goto out;
5430 }
bc0c38d1 5431
2b6080f2 5432 tr->current_trace = t;
50512ab5 5433 tr->current_trace->enabled++;
9f029e83 5434 trace_branch_enable(tr);
bc0c38d1
SR
5435 out:
5436 mutex_unlock(&trace_types_lock);
5437
d9e54076
PZ
5438 return ret;
5439}
5440
5441static ssize_t
5442tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5443 size_t cnt, loff_t *ppos)
5444{
607e2ea1 5445 struct trace_array *tr = filp->private_data;
ee6c2c1b 5446 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5447 int i;
5448 size_t ret;
e6e7a65a
FW
5449 int err;
5450
5451 ret = cnt;
d9e54076 5452
ee6c2c1b
LZ
5453 if (cnt > MAX_TRACER_SIZE)
5454 cnt = MAX_TRACER_SIZE;
d9e54076 5455
4afe6495 5456 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5457 return -EFAULT;
5458
5459 buf[cnt] = 0;
5460
5461 /* strip ending whitespace. */
5462 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5463 buf[i] = 0;
5464
607e2ea1 5465 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5466 if (err)
5467 return err;
d9e54076 5468
cf8517cf 5469 *ppos += ret;
bc0c38d1 5470
c2931e05 5471 return ret;
bc0c38d1
SR
5472}
5473
5474static ssize_t
6508fa76
SF
5475tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5476 size_t cnt, loff_t *ppos)
bc0c38d1 5477{
bc0c38d1
SR
5478 char buf[64];
5479 int r;
5480
cffae437 5481 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5482 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5483 if (r > sizeof(buf))
5484 r = sizeof(buf);
4bf39a94 5485 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5486}
5487
5488static ssize_t
6508fa76
SF
5489tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5490 size_t cnt, loff_t *ppos)
bc0c38d1 5491{
5e39841c 5492 unsigned long val;
c6caeeb1 5493 int ret;
bc0c38d1 5494
22fe9b54
PH
5495 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5496 if (ret)
c6caeeb1 5497 return ret;
bc0c38d1
SR
5498
5499 *ptr = val * 1000;
5500
5501 return cnt;
5502}
5503
6508fa76
SF
5504static ssize_t
5505tracing_thresh_read(struct file *filp, char __user *ubuf,
5506 size_t cnt, loff_t *ppos)
5507{
5508 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5509}
5510
5511static ssize_t
5512tracing_thresh_write(struct file *filp, const char __user *ubuf,
5513 size_t cnt, loff_t *ppos)
5514{
5515 struct trace_array *tr = filp->private_data;
5516 int ret;
5517
5518 mutex_lock(&trace_types_lock);
5519 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5520 if (ret < 0)
5521 goto out;
5522
5523 if (tr->current_trace->update_thresh) {
5524 ret = tr->current_trace->update_thresh(tr);
5525 if (ret < 0)
5526 goto out;
5527 }
5528
5529 ret = cnt;
5530out:
5531 mutex_unlock(&trace_types_lock);
5532
5533 return ret;
5534}
5535
f971cc9a 5536#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5537
6508fa76
SF
5538static ssize_t
5539tracing_max_lat_read(struct file *filp, char __user *ubuf,
5540 size_t cnt, loff_t *ppos)
5541{
5542 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5543}
5544
5545static ssize_t
5546tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5547 size_t cnt, loff_t *ppos)
5548{
5549 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5550}
5551
e428abbb
CG
5552#endif
5553
b3806b43
SR
5554static int tracing_open_pipe(struct inode *inode, struct file *filp)
5555{
15544209 5556 struct trace_array *tr = inode->i_private;
b3806b43 5557 struct trace_iterator *iter;
b04cc6b1 5558 int ret = 0;
b3806b43
SR
5559
5560 if (tracing_disabled)
5561 return -ENODEV;
5562
7b85af63
SRRH
5563 if (trace_array_get(tr) < 0)
5564 return -ENODEV;
5565
b04cc6b1
FW
5566 mutex_lock(&trace_types_lock);
5567
b3806b43
SR
5568 /* create a buffer to store the information to pass to userspace */
5569 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5570 if (!iter) {
5571 ret = -ENOMEM;
f77d09a3 5572 __trace_array_put(tr);
b04cc6b1
FW
5573 goto out;
5574 }
b3806b43 5575
3a161d99 5576 trace_seq_init(&iter->seq);
d716ff71 5577 iter->trace = tr->current_trace;
d7350c3f 5578
4462344e 5579 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5580 ret = -ENOMEM;
d7350c3f 5581 goto fail;
4462344e
RR
5582 }
5583
a309720c 5584 /* trace pipe does not show start of buffer */
4462344e 5585 cpumask_setall(iter->started);
a309720c 5586
983f938a 5587 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5588 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5589
8be0709f 5590 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5591 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5592 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5593
15544209
ON
5594 iter->tr = tr;
5595 iter->trace_buffer = &tr->trace_buffer;
5596 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5597 mutex_init(&iter->mutex);
b3806b43
SR
5598 filp->private_data = iter;
5599
107bad8b
SR
5600 if (iter->trace->pipe_open)
5601 iter->trace->pipe_open(iter);
107bad8b 5602
b444786f 5603 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5604
5605 tr->current_trace->ref++;
b04cc6b1
FW
5606out:
5607 mutex_unlock(&trace_types_lock);
5608 return ret;
d7350c3f
FW
5609
5610fail:
5611 kfree(iter->trace);
5612 kfree(iter);
7b85af63 5613 __trace_array_put(tr);
d7350c3f
FW
5614 mutex_unlock(&trace_types_lock);
5615 return ret;
b3806b43
SR
5616}
5617
5618static int tracing_release_pipe(struct inode *inode, struct file *file)
5619{
5620 struct trace_iterator *iter = file->private_data;
15544209 5621 struct trace_array *tr = inode->i_private;
b3806b43 5622
b04cc6b1
FW
5623 mutex_lock(&trace_types_lock);
5624
cf6ab6d9
SRRH
5625 tr->current_trace->ref--;
5626
29bf4a5e 5627 if (iter->trace->pipe_close)
c521efd1
SR
5628 iter->trace->pipe_close(iter);
5629
b04cc6b1
FW
5630 mutex_unlock(&trace_types_lock);
5631
4462344e 5632 free_cpumask_var(iter->started);
d7350c3f 5633 mutex_destroy(&iter->mutex);
b3806b43 5634 kfree(iter);
b3806b43 5635
7b85af63
SRRH
5636 trace_array_put(tr);
5637
b3806b43
SR
5638 return 0;
5639}
5640
2a2cc8f7 5641static unsigned int
cc60cdc9 5642trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5643{
983f938a
SRRH
5644 struct trace_array *tr = iter->tr;
5645
15693458
SRRH
5646 /* Iterators are static, they should be filled or empty */
5647 if (trace_buffer_iter(iter, iter->cpu_file))
5648 return POLLIN | POLLRDNORM;
2a2cc8f7 5649
983f938a 5650 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5651 /*
5652 * Always select as readable when in blocking mode
5653 */
5654 return POLLIN | POLLRDNORM;
15693458 5655 else
12883efb 5656 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5657 filp, poll_table);
2a2cc8f7 5658}
2a2cc8f7 5659
cc60cdc9
SR
5660static unsigned int
5661tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5662{
5663 struct trace_iterator *iter = filp->private_data;
5664
5665 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5666}
5667
d716ff71 5668/* Must be called with iter->mutex held. */
ff98781b 5669static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5670{
5671 struct trace_iterator *iter = filp->private_data;
8b8b3683 5672 int ret;
b3806b43 5673
b3806b43 5674 while (trace_empty(iter)) {
2dc8f095 5675
107bad8b 5676 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5677 return -EAGAIN;
107bad8b 5678 }
2dc8f095 5679
b3806b43 5680 /*
250bfd3d 5681 * We block until we read something and tracing is disabled.
b3806b43
SR
5682 * We still block if tracing is disabled, but we have never
5683 * read anything. This allows a user to cat this file, and
5684 * then enable tracing. But after we have read something,
5685 * we give an EOF when tracing is again disabled.
5686 *
5687 * iter->pos will be 0 if we haven't read anything.
5688 */
75df6e68 5689 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
b3806b43 5690 break;
f4874261
SRRH
5691
5692 mutex_unlock(&iter->mutex);
5693
e30f53aa 5694 ret = wait_on_pipe(iter, false);
f4874261
SRRH
5695
5696 mutex_lock(&iter->mutex);
5697
8b8b3683
SRRH
5698 if (ret)
5699 return ret;
b3806b43
SR
5700 }
5701
ff98781b
EGM
5702 return 1;
5703}
5704
5705/*
5706 * Consumer reader.
5707 */
5708static ssize_t
5709tracing_read_pipe(struct file *filp, char __user *ubuf,
5710 size_t cnt, loff_t *ppos)
5711{
5712 struct trace_iterator *iter = filp->private_data;
5713 ssize_t sret;
5714
d7350c3f
FW
5715 /*
5716 * Avoid more than one consumer on a single file descriptor
5717 * This is just a matter of traces coherency, the ring buffer itself
5718 * is protected.
5719 */
5720 mutex_lock(&iter->mutex);
1245800c
SRRH
5721
5722 /* return any leftover data */
5723 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5724 if (sret != -EBUSY)
5725 goto out;
5726
5727 trace_seq_init(&iter->seq);
5728
ff98781b
EGM
5729 if (iter->trace->read) {
5730 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5731 if (sret)
5732 goto out;
5733 }
5734
5735waitagain:
5736 sret = tracing_wait_pipe(filp);
5737 if (sret <= 0)
5738 goto out;
5739
b3806b43 5740 /* stop when tracing is finished */
ff98781b
EGM
5741 if (trace_empty(iter)) {
5742 sret = 0;
107bad8b 5743 goto out;
ff98781b 5744 }
b3806b43
SR
5745
5746 if (cnt >= PAGE_SIZE)
5747 cnt = PAGE_SIZE - 1;
5748
53d0aa77 5749 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5750 memset(&iter->seq, 0,
5751 sizeof(struct trace_iterator) -
5752 offsetof(struct trace_iterator, seq));
ed5467da 5753 cpumask_clear(iter->started);
4823ed7e 5754 iter->pos = -1;
b3806b43 5755
4f535968 5756 trace_event_read_lock();
7e53bd42 5757 trace_access_lock(iter->cpu_file);
955b61e5 5758 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5759 enum print_line_t ret;
5ac48378 5760 int save_len = iter->seq.seq.len;
088b1e42 5761
f9896bf3 5762 ret = print_trace_line(iter);
2c4f035f 5763 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5764 /* don't print partial lines */
5ac48378 5765 iter->seq.seq.len = save_len;
b3806b43 5766 break;
088b1e42 5767 }
b91facc3
FW
5768 if (ret != TRACE_TYPE_NO_CONSUME)
5769 trace_consume(iter);
b3806b43 5770
5ac48378 5771 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5772 break;
ee5e51f5
JO
5773
5774 /*
5775 * Setting the full flag means we reached the trace_seq buffer
5776 * size and we should leave by partial output condition above.
5777 * One of the trace_seq_* functions is not used properly.
5778 */
5779 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5780 iter->ent->type);
b3806b43 5781 }
7e53bd42 5782 trace_access_unlock(iter->cpu_file);
4f535968 5783 trace_event_read_unlock();
b3806b43 5784
b3806b43 5785 /* Now copy what we have to the user */
6c6c2796 5786 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5787 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5788 trace_seq_init(&iter->seq);
9ff4b974
PP
5789
5790 /*
25985edc 5791 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5792 * entries, go back to wait for more entries.
5793 */
6c6c2796 5794 if (sret == -EBUSY)
9ff4b974 5795 goto waitagain;
b3806b43 5796
107bad8b 5797out:
d7350c3f 5798 mutex_unlock(&iter->mutex);
107bad8b 5799
6c6c2796 5800 return sret;
b3806b43
SR
5801}
5802
3c56819b
EGM
5803static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5804 unsigned int idx)
5805{
5806 __free_page(spd->pages[idx]);
5807}
5808
28dfef8f 5809static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5810 .can_merge = 0,
34cd4998 5811 .confirm = generic_pipe_buf_confirm,
92fdd98c 5812 .release = generic_pipe_buf_release,
34cd4998
SR
5813 .steal = generic_pipe_buf_steal,
5814 .get = generic_pipe_buf_get,
3c56819b
EGM
5815};
5816
34cd4998 5817static size_t
fa7c7f6e 5818tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5819{
5820 size_t count;
74f06bb7 5821 int save_len;
34cd4998
SR
5822 int ret;
5823
5824 /* Seq buffer is page-sized, exactly what we need. */
5825 for (;;) {
74f06bb7 5826 save_len = iter->seq.seq.len;
34cd4998 5827 ret = print_trace_line(iter);
74f06bb7
SRRH
5828
5829 if (trace_seq_has_overflowed(&iter->seq)) {
5830 iter->seq.seq.len = save_len;
34cd4998
SR
5831 break;
5832 }
74f06bb7
SRRH
5833
5834 /*
5835 * This should not be hit, because it should only
5836 * be set if the iter->seq overflowed. But check it
5837 * anyway to be safe.
5838 */
34cd4998 5839 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5840 iter->seq.seq.len = save_len;
5841 break;
5842 }
5843
5ac48378 5844 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5845 if (rem < count) {
5846 rem = 0;
5847 iter->seq.seq.len = save_len;
34cd4998
SR
5848 break;
5849 }
5850
74e7ff8c
LJ
5851 if (ret != TRACE_TYPE_NO_CONSUME)
5852 trace_consume(iter);
34cd4998 5853 rem -= count;
955b61e5 5854 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5855 rem = 0;
5856 iter->ent = NULL;
5857 break;
5858 }
5859 }
5860
5861 return rem;
5862}
5863
3c56819b
EGM
5864static ssize_t tracing_splice_read_pipe(struct file *filp,
5865 loff_t *ppos,
5866 struct pipe_inode_info *pipe,
5867 size_t len,
5868 unsigned int flags)
5869{
35f3d14d
JA
5870 struct page *pages_def[PIPE_DEF_BUFFERS];
5871 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5872 struct trace_iterator *iter = filp->private_data;
5873 struct splice_pipe_desc spd = {
35f3d14d
JA
5874 .pages = pages_def,
5875 .partial = partial_def,
34cd4998 5876 .nr_pages = 0, /* This gets updated below. */
047fe360 5877 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5878 .ops = &tracing_pipe_buf_ops,
5879 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5880 };
5881 ssize_t ret;
34cd4998 5882 size_t rem;
3c56819b
EGM
5883 unsigned int i;
5884
35f3d14d
JA
5885 if (splice_grow_spd(pipe, &spd))
5886 return -ENOMEM;
5887
d7350c3f 5888 mutex_lock(&iter->mutex);
3c56819b
EGM
5889
5890 if (iter->trace->splice_read) {
5891 ret = iter->trace->splice_read(iter, filp,
5892 ppos, pipe, len, flags);
5893 if (ret)
34cd4998 5894 goto out_err;
3c56819b
EGM
5895 }
5896
5897 ret = tracing_wait_pipe(filp);
5898 if (ret <= 0)
34cd4998 5899 goto out_err;
3c56819b 5900
955b61e5 5901 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5902 ret = -EFAULT;
34cd4998 5903 goto out_err;
3c56819b
EGM
5904 }
5905
4f535968 5906 trace_event_read_lock();
7e53bd42 5907 trace_access_lock(iter->cpu_file);
4f535968 5908
3c56819b 5909 /* Fill as many pages as possible. */
a786c06d 5910 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5911 spd.pages[i] = alloc_page(GFP_KERNEL);
5912 if (!spd.pages[i])
34cd4998 5913 break;
3c56819b 5914
fa7c7f6e 5915 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5916
5917 /* Copy the data into the page, so we can start over. */
5918 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5919 page_address(spd.pages[i]),
5ac48378 5920 trace_seq_used(&iter->seq));
3c56819b 5921 if (ret < 0) {
35f3d14d 5922 __free_page(spd.pages[i]);
3c56819b
EGM
5923 break;
5924 }
35f3d14d 5925 spd.partial[i].offset = 0;
5ac48378 5926 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5927
f9520750 5928 trace_seq_init(&iter->seq);
3c56819b
EGM
5929 }
5930
7e53bd42 5931 trace_access_unlock(iter->cpu_file);
4f535968 5932 trace_event_read_unlock();
d7350c3f 5933 mutex_unlock(&iter->mutex);
3c56819b
EGM
5934
5935 spd.nr_pages = i;
5936
a29054d9
SRRH
5937 if (i)
5938 ret = splice_to_pipe(pipe, &spd);
5939 else
5940 ret = 0;
35f3d14d 5941out:
047fe360 5942 splice_shrink_spd(&spd);
35f3d14d 5943 return ret;
3c56819b 5944
34cd4998 5945out_err:
d7350c3f 5946 mutex_unlock(&iter->mutex);
35f3d14d 5947 goto out;
3c56819b
EGM
5948}
5949
a98a3c3f
SR
5950static ssize_t
5951tracing_entries_read(struct file *filp, char __user *ubuf,
5952 size_t cnt, loff_t *ppos)
5953{
0bc392ee
ON
5954 struct inode *inode = file_inode(filp);
5955 struct trace_array *tr = inode->i_private;
5956 int cpu = tracing_get_cpu(inode);
438ced17
VN
5957 char buf[64];
5958 int r = 0;
5959 ssize_t ret;
a98a3c3f 5960
db526ca3 5961 mutex_lock(&trace_types_lock);
438ced17 5962
0bc392ee 5963 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5964 int cpu, buf_size_same;
5965 unsigned long size;
5966
5967 size = 0;
5968 buf_size_same = 1;
5969 /* check if all cpu sizes are same */
5970 for_each_tracing_cpu(cpu) {
5971 /* fill in the size from first enabled cpu */
5972 if (size == 0)
12883efb
SRRH
5973 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5974 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5975 buf_size_same = 0;
5976 break;
5977 }
5978 }
5979
5980 if (buf_size_same) {
5981 if (!ring_buffer_expanded)
5982 r = sprintf(buf, "%lu (expanded: %lu)\n",
5983 size >> 10,
5984 trace_buf_size >> 10);
5985 else
5986 r = sprintf(buf, "%lu\n", size >> 10);
5987 } else
5988 r = sprintf(buf, "X\n");
5989 } else
0bc392ee 5990 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5991
db526ca3
SR
5992 mutex_unlock(&trace_types_lock);
5993
438ced17
VN
5994 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5995 return ret;
a98a3c3f
SR
5996}
5997
5998static ssize_t
5999tracing_entries_write(struct file *filp, const char __user *ubuf,
6000 size_t cnt, loff_t *ppos)
6001{
0bc392ee
ON
6002 struct inode *inode = file_inode(filp);
6003 struct trace_array *tr = inode->i_private;
a98a3c3f 6004 unsigned long val;
4f271a2a 6005 int ret;
a98a3c3f 6006
22fe9b54
PH
6007 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6008 if (ret)
c6caeeb1 6009 return ret;
a98a3c3f
SR
6010
6011 /* must have at least 1 entry */
6012 if (!val)
6013 return -EINVAL;
6014
1696b2b0
SR
6015 /* value is in KB */
6016 val <<= 10;
0bc392ee 6017 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
6018 if (ret < 0)
6019 return ret;
a98a3c3f 6020
cf8517cf 6021 *ppos += cnt;
a98a3c3f 6022
4f271a2a
VN
6023 return cnt;
6024}
bf5e6519 6025
f81ab074
VN
6026static ssize_t
6027tracing_total_entries_read(struct file *filp, char __user *ubuf,
6028 size_t cnt, loff_t *ppos)
6029{
6030 struct trace_array *tr = filp->private_data;
6031 char buf[64];
6032 int r, cpu;
6033 unsigned long size = 0, expanded_size = 0;
6034
6035 mutex_lock(&trace_types_lock);
6036 for_each_tracing_cpu(cpu) {
12883efb 6037 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
6038 if (!ring_buffer_expanded)
6039 expanded_size += trace_buf_size >> 10;
6040 }
6041 if (ring_buffer_expanded)
6042 r = sprintf(buf, "%lu\n", size);
6043 else
6044 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6045 mutex_unlock(&trace_types_lock);
6046
6047 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6048}
6049
4f271a2a
VN
6050static ssize_t
6051tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6052 size_t cnt, loff_t *ppos)
6053{
6054 /*
6055 * There is no need to read what the user has written, this function
6056 * is just to make sure that there is no error when "echo" is used
6057 */
6058
6059 *ppos += cnt;
a98a3c3f
SR
6060
6061 return cnt;
6062}
6063
4f271a2a
VN
6064static int
6065tracing_free_buffer_release(struct inode *inode, struct file *filp)
6066{
2b6080f2
SR
6067 struct trace_array *tr = inode->i_private;
6068
cf30cf67 6069 /* disable tracing ? */
983f938a 6070 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 6071 tracer_tracing_off(tr);
4f271a2a 6072 /* resize the ring buffer to 0 */
2b6080f2 6073 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 6074
7b85af63
SRRH
6075 trace_array_put(tr);
6076
4f271a2a
VN
6077 return 0;
6078}
6079
5bf9a1ee
PP
6080static ssize_t
6081tracing_mark_write(struct file *filp, const char __user *ubuf,
6082 size_t cnt, loff_t *fpos)
6083{
2d71619c 6084 struct trace_array *tr = filp->private_data;
d696b58c
SR
6085 struct ring_buffer_event *event;
6086 struct ring_buffer *buffer;
6087 struct print_entry *entry;
6088 unsigned long irq_flags;
656c7f0d 6089 const char faulted[] = "<faulted>";
d696b58c 6090 ssize_t written;
d696b58c
SR
6091 int size;
6092 int len;
fa32e855 6093
656c7f0d
SRRH
6094/* Used in tracing_mark_raw_write() as well */
6095#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 6096
c76f0694 6097 if (tracing_disabled)
5bf9a1ee
PP
6098 return -EINVAL;
6099
983f938a 6100 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
6101 return -EINVAL;
6102
5bf9a1ee
PP
6103 if (cnt > TRACE_BUF_SIZE)
6104 cnt = TRACE_BUF_SIZE;
6105
d696b58c 6106 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 6107
d696b58c 6108 local_save_flags(irq_flags);
656c7f0d 6109 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 6110
656c7f0d
SRRH
6111 /* If less than "<faulted>", then make sure we can still add that */
6112 if (cnt < FAULTED_SIZE)
6113 size += FAULTED_SIZE - cnt;
d696b58c 6114
2d71619c 6115 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6116 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6117 irq_flags, preempt_count());
656c7f0d 6118 if (unlikely(!event))
d696b58c 6119 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6120 return -EBADF;
d696b58c
SR
6121
6122 entry = ring_buffer_event_data(event);
6123 entry->ip = _THIS_IP_;
6124
656c7f0d
SRRH
6125 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6126 if (len) {
6127 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6128 cnt = FAULTED_SIZE;
6129 written = -EFAULT;
c13d2f7c 6130 } else
656c7f0d
SRRH
6131 written = cnt;
6132 len = cnt;
5bf9a1ee 6133
d696b58c
SR
6134 if (entry->buf[cnt - 1] != '\n') {
6135 entry->buf[cnt] = '\n';
6136 entry->buf[cnt + 1] = '\0';
6137 } else
6138 entry->buf[cnt] = '\0';
6139
7ffbd48d 6140 __buffer_unlock_commit(buffer, event);
5bf9a1ee 6141
656c7f0d
SRRH
6142 if (written > 0)
6143 *fpos += written;
5bf9a1ee 6144
fa32e855
SR
6145 return written;
6146}
6147
6148/* Limit it for now to 3K (including tag) */
6149#define RAW_DATA_MAX_SIZE (1024*3)
6150
6151static ssize_t
6152tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6153 size_t cnt, loff_t *fpos)
6154{
6155 struct trace_array *tr = filp->private_data;
6156 struct ring_buffer_event *event;
6157 struct ring_buffer *buffer;
6158 struct raw_data_entry *entry;
656c7f0d 6159 const char faulted[] = "<faulted>";
fa32e855 6160 unsigned long irq_flags;
fa32e855 6161 ssize_t written;
fa32e855
SR
6162 int size;
6163 int len;
6164
656c7f0d
SRRH
6165#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6166
fa32e855
SR
6167 if (tracing_disabled)
6168 return -EINVAL;
6169
6170 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6171 return -EINVAL;
6172
6173 /* The marker must at least have a tag id */
6174 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6175 return -EINVAL;
6176
6177 if (cnt > TRACE_BUF_SIZE)
6178 cnt = TRACE_BUF_SIZE;
6179
6180 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6181
fa32e855
SR
6182 local_save_flags(irq_flags);
6183 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
6184 if (cnt < FAULT_SIZE_ID)
6185 size += FAULT_SIZE_ID - cnt;
6186
fa32e855 6187 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6188 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6189 irq_flags, preempt_count());
656c7f0d 6190 if (!event)
fa32e855 6191 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6192 return -EBADF;
fa32e855
SR
6193
6194 entry = ring_buffer_event_data(event);
6195
656c7f0d
SRRH
6196 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6197 if (len) {
6198 entry->id = -1;
6199 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6200 written = -EFAULT;
fa32e855 6201 } else
656c7f0d 6202 written = cnt;
fa32e855
SR
6203
6204 __buffer_unlock_commit(buffer, event);
6205
656c7f0d
SRRH
6206 if (written > 0)
6207 *fpos += written;
1aa54bca
MS
6208
6209 return written;
5bf9a1ee
PP
6210}
6211
13f16d20 6212static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 6213{
2b6080f2 6214 struct trace_array *tr = m->private;
5079f326
Z
6215 int i;
6216
6217 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 6218 seq_printf(m,
5079f326 6219 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
6220 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6221 i == tr->clock_id ? "]" : "");
13f16d20 6222 seq_putc(m, '\n');
5079f326 6223
13f16d20 6224 return 0;
5079f326
Z
6225}
6226
e1e232ca 6227static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6228{
5079f326
Z
6229 int i;
6230
5079f326
Z
6231 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6232 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6233 break;
6234 }
6235 if (i == ARRAY_SIZE(trace_clocks))
6236 return -EINVAL;
6237
5079f326
Z
6238 mutex_lock(&trace_types_lock);
6239
2b6080f2
SR
6240 tr->clock_id = i;
6241
12883efb 6242 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6243
60303ed3
DS
6244 /*
6245 * New clock may not be consistent with the previous clock.
6246 * Reset the buffer so that it doesn't have incomparable timestamps.
6247 */
9457158b 6248 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6249
6250#ifdef CONFIG_TRACER_MAX_TRACE
170b3b10 6251 if (tr->max_buffer.buffer)
12883efb 6252 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6253 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6254#endif
60303ed3 6255
5079f326
Z
6256 mutex_unlock(&trace_types_lock);
6257
e1e232ca
SR
6258 return 0;
6259}
6260
6261static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6262 size_t cnt, loff_t *fpos)
6263{
6264 struct seq_file *m = filp->private_data;
6265 struct trace_array *tr = m->private;
6266 char buf[64];
6267 const char *clockstr;
6268 int ret;
6269
6270 if (cnt >= sizeof(buf))
6271 return -EINVAL;
6272
4afe6495 6273 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6274 return -EFAULT;
6275
6276 buf[cnt] = 0;
6277
6278 clockstr = strstrip(buf);
6279
6280 ret = tracing_set_clock(tr, clockstr);
6281 if (ret)
6282 return ret;
6283
5079f326
Z
6284 *fpos += cnt;
6285
6286 return cnt;
6287}
6288
13f16d20
LZ
6289static int tracing_clock_open(struct inode *inode, struct file *file)
6290{
7b85af63
SRRH
6291 struct trace_array *tr = inode->i_private;
6292 int ret;
6293
13f16d20
LZ
6294 if (tracing_disabled)
6295 return -ENODEV;
2b6080f2 6296
7b85af63
SRRH
6297 if (trace_array_get(tr))
6298 return -ENODEV;
6299
6300 ret = single_open(file, tracing_clock_show, inode->i_private);
6301 if (ret < 0)
6302 trace_array_put(tr);
6303
6304 return ret;
13f16d20
LZ
6305}
6306
6de58e62
SRRH
6307struct ftrace_buffer_info {
6308 struct trace_iterator iter;
6309 void *spare;
73a757e6 6310 unsigned int spare_cpu;
6de58e62
SRRH
6311 unsigned int read;
6312};
6313
debdd57f
HT
6314#ifdef CONFIG_TRACER_SNAPSHOT
6315static int tracing_snapshot_open(struct inode *inode, struct file *file)
6316{
6484c71c 6317 struct trace_array *tr = inode->i_private;
debdd57f 6318 struct trace_iterator *iter;
2b6080f2 6319 struct seq_file *m;
debdd57f
HT
6320 int ret = 0;
6321
ff451961
SRRH
6322 if (trace_array_get(tr) < 0)
6323 return -ENODEV;
6324
debdd57f 6325 if (file->f_mode & FMODE_READ) {
6484c71c 6326 iter = __tracing_open(inode, file, true);
debdd57f
HT
6327 if (IS_ERR(iter))
6328 ret = PTR_ERR(iter);
2b6080f2
SR
6329 } else {
6330 /* Writes still need the seq_file to hold the private data */
f77d09a3 6331 ret = -ENOMEM;
2b6080f2
SR
6332 m = kzalloc(sizeof(*m), GFP_KERNEL);
6333 if (!m)
f77d09a3 6334 goto out;
2b6080f2
SR
6335 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6336 if (!iter) {
6337 kfree(m);
f77d09a3 6338 goto out;
2b6080f2 6339 }
f77d09a3
AL
6340 ret = 0;
6341
ff451961 6342 iter->tr = tr;
6484c71c
ON
6343 iter->trace_buffer = &tr->max_buffer;
6344 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6345 m->private = iter;
6346 file->private_data = m;
debdd57f 6347 }
f77d09a3 6348out:
ff451961
SRRH
6349 if (ret < 0)
6350 trace_array_put(tr);
6351
debdd57f
HT
6352 return ret;
6353}
6354
6355static ssize_t
6356tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6357 loff_t *ppos)
6358{
2b6080f2
SR
6359 struct seq_file *m = filp->private_data;
6360 struct trace_iterator *iter = m->private;
6361 struct trace_array *tr = iter->tr;
debdd57f
HT
6362 unsigned long val;
6363 int ret;
6364
6365 ret = tracing_update_buffers();
6366 if (ret < 0)
6367 return ret;
6368
6369 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6370 if (ret)
6371 return ret;
6372
6373 mutex_lock(&trace_types_lock);
6374
2b6080f2 6375 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6376 ret = -EBUSY;
6377 goto out;
6378 }
6379
6380 switch (val) {
6381 case 0:
f1affcaa
SRRH
6382 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6383 ret = -EINVAL;
6384 break;
debdd57f 6385 }
3209cff4
SRRH
6386 if (tr->allocated_snapshot)
6387 free_snapshot(tr);
debdd57f
HT
6388 break;
6389 case 1:
f1affcaa
SRRH
6390/* Only allow per-cpu swap if the ring buffer supports it */
6391#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6392 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6393 ret = -EINVAL;
6394 break;
6395 }
6396#endif
45ad21ca 6397 if (!tr->allocated_snapshot) {
9ccd9a81 6398 ret = tracing_alloc_snapshot_instance(tr);
debdd57f
HT
6399 if (ret < 0)
6400 break;
debdd57f 6401 }
debdd57f
HT
6402 local_irq_disable();
6403 /* Now, we're going to swap */
f1affcaa 6404 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6405 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6406 else
ce9bae55 6407 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6408 local_irq_enable();
6409 break;
6410 default:
45ad21ca 6411 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6412 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6413 tracing_reset_online_cpus(&tr->max_buffer);
6414 else
6415 tracing_reset(&tr->max_buffer, iter->cpu_file);
6416 }
debdd57f
HT
6417 break;
6418 }
6419
6420 if (ret >= 0) {
6421 *ppos += cnt;
6422 ret = cnt;
6423 }
6424out:
6425 mutex_unlock(&trace_types_lock);
6426 return ret;
6427}
2b6080f2
SR
6428
6429static int tracing_snapshot_release(struct inode *inode, struct file *file)
6430{
6431 struct seq_file *m = file->private_data;
ff451961
SRRH
6432 int ret;
6433
6434 ret = tracing_release(inode, file);
2b6080f2
SR
6435
6436 if (file->f_mode & FMODE_READ)
ff451961 6437 return ret;
2b6080f2
SR
6438
6439 /* If write only, the seq_file is just a stub */
6440 if (m)
6441 kfree(m->private);
6442 kfree(m);
6443
6444 return 0;
6445}
6446
6de58e62
SRRH
6447static int tracing_buffers_open(struct inode *inode, struct file *filp);
6448static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6449 size_t count, loff_t *ppos);
6450static int tracing_buffers_release(struct inode *inode, struct file *file);
6451static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6452 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6453
6454static int snapshot_raw_open(struct inode *inode, struct file *filp)
6455{
6456 struct ftrace_buffer_info *info;
6457 int ret;
6458
6459 ret = tracing_buffers_open(inode, filp);
6460 if (ret < 0)
6461 return ret;
6462
6463 info = filp->private_data;
6464
6465 if (info->iter.trace->use_max_tr) {
6466 tracing_buffers_release(inode, filp);
6467 return -EBUSY;
6468 }
6469
6470 info->iter.snapshot = true;
6471 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6472
6473 return ret;
6474}
6475
debdd57f
HT
6476#endif /* CONFIG_TRACER_SNAPSHOT */
6477
6478
6508fa76
SF
6479static const struct file_operations tracing_thresh_fops = {
6480 .open = tracing_open_generic,
6481 .read = tracing_thresh_read,
6482 .write = tracing_thresh_write,
6483 .llseek = generic_file_llseek,
6484};
6485
f971cc9a 6486#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6487static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6488 .open = tracing_open_generic,
6489 .read = tracing_max_lat_read,
6490 .write = tracing_max_lat_write,
b444786f 6491 .llseek = generic_file_llseek,
bc0c38d1 6492};
e428abbb 6493#endif
bc0c38d1 6494
5e2336a0 6495static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6496 .open = tracing_open_generic,
6497 .read = tracing_set_trace_read,
6498 .write = tracing_set_trace_write,
b444786f 6499 .llseek = generic_file_llseek,
bc0c38d1
SR
6500};
6501
5e2336a0 6502static const struct file_operations tracing_pipe_fops = {
4bf39a94 6503 .open = tracing_open_pipe,
2a2cc8f7 6504 .poll = tracing_poll_pipe,
4bf39a94 6505 .read = tracing_read_pipe,
3c56819b 6506 .splice_read = tracing_splice_read_pipe,
4bf39a94 6507 .release = tracing_release_pipe,
b444786f 6508 .llseek = no_llseek,
b3806b43
SR
6509};
6510
5e2336a0 6511static const struct file_operations tracing_entries_fops = {
0bc392ee 6512 .open = tracing_open_generic_tr,
a98a3c3f
SR
6513 .read = tracing_entries_read,
6514 .write = tracing_entries_write,
b444786f 6515 .llseek = generic_file_llseek,
0bc392ee 6516 .release = tracing_release_generic_tr,
a98a3c3f
SR
6517};
6518
f81ab074 6519static const struct file_operations tracing_total_entries_fops = {
7b85af63 6520 .open = tracing_open_generic_tr,
f81ab074
VN
6521 .read = tracing_total_entries_read,
6522 .llseek = generic_file_llseek,
7b85af63 6523 .release = tracing_release_generic_tr,
f81ab074
VN
6524};
6525
4f271a2a 6526static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6527 .open = tracing_open_generic_tr,
4f271a2a
VN
6528 .write = tracing_free_buffer_write,
6529 .release = tracing_free_buffer_release,
6530};
6531
5e2336a0 6532static const struct file_operations tracing_mark_fops = {
7b85af63 6533 .open = tracing_open_generic_tr,
5bf9a1ee 6534 .write = tracing_mark_write,
b444786f 6535 .llseek = generic_file_llseek,
7b85af63 6536 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6537};
6538
fa32e855
SR
6539static const struct file_operations tracing_mark_raw_fops = {
6540 .open = tracing_open_generic_tr,
6541 .write = tracing_mark_raw_write,
6542 .llseek = generic_file_llseek,
6543 .release = tracing_release_generic_tr,
6544};
6545
5079f326 6546static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6547 .open = tracing_clock_open,
6548 .read = seq_read,
6549 .llseek = seq_lseek,
7b85af63 6550 .release = tracing_single_release_tr,
5079f326
Z
6551 .write = tracing_clock_write,
6552};
6553
debdd57f
HT
6554#ifdef CONFIG_TRACER_SNAPSHOT
6555static const struct file_operations snapshot_fops = {
6556 .open = tracing_snapshot_open,
6557 .read = seq_read,
6558 .write = tracing_snapshot_write,
098c879e 6559 .llseek = tracing_lseek,
2b6080f2 6560 .release = tracing_snapshot_release,
debdd57f 6561};
debdd57f 6562
6de58e62
SRRH
6563static const struct file_operations snapshot_raw_fops = {
6564 .open = snapshot_raw_open,
6565 .read = tracing_buffers_read,
6566 .release = tracing_buffers_release,
6567 .splice_read = tracing_buffers_splice_read,
6568 .llseek = no_llseek,
2cadf913
SR
6569};
6570
6de58e62
SRRH
6571#endif /* CONFIG_TRACER_SNAPSHOT */
6572
2cadf913
SR
6573static int tracing_buffers_open(struct inode *inode, struct file *filp)
6574{
46ef2be0 6575 struct trace_array *tr = inode->i_private;
2cadf913 6576 struct ftrace_buffer_info *info;
7b85af63 6577 int ret;
2cadf913
SR
6578
6579 if (tracing_disabled)
6580 return -ENODEV;
6581
7b85af63
SRRH
6582 if (trace_array_get(tr) < 0)
6583 return -ENODEV;
6584
2cadf913 6585 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6586 if (!info) {
6587 trace_array_put(tr);
2cadf913 6588 return -ENOMEM;
7b85af63 6589 }
2cadf913 6590
a695cb58
SRRH
6591 mutex_lock(&trace_types_lock);
6592
cc60cdc9 6593 info->iter.tr = tr;
46ef2be0 6594 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6595 info->iter.trace = tr->current_trace;
12883efb 6596 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6597 info->spare = NULL;
2cadf913 6598 /* Force reading ring buffer for first read */
cc60cdc9 6599 info->read = (unsigned int)-1;
2cadf913
SR
6600
6601 filp->private_data = info;
6602
cf6ab6d9
SRRH
6603 tr->current_trace->ref++;
6604
a695cb58
SRRH
6605 mutex_unlock(&trace_types_lock);
6606
7b85af63
SRRH
6607 ret = nonseekable_open(inode, filp);
6608 if (ret < 0)
6609 trace_array_put(tr);
6610
6611 return ret;
2cadf913
SR
6612}
6613
cc60cdc9
SR
6614static unsigned int
6615tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6616{
6617 struct ftrace_buffer_info *info = filp->private_data;
6618 struct trace_iterator *iter = &info->iter;
6619
6620 return trace_poll(iter, filp, poll_table);
6621}
6622
2cadf913
SR
6623static ssize_t
6624tracing_buffers_read(struct file *filp, char __user *ubuf,
6625 size_t count, loff_t *ppos)
6626{
6627 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6628 struct trace_iterator *iter = &info->iter;
a7e52ad7 6629 ssize_t ret = 0;
6de58e62 6630 ssize_t size;
2cadf913 6631
2dc5d12b
SR
6632 if (!count)
6633 return 0;
6634
6de58e62 6635#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6636 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6637 return -EBUSY;
6de58e62
SRRH
6638#endif
6639
73a757e6 6640 if (!info->spare) {
12883efb
SRRH
6641 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6642 iter->cpu_file);
a7e52ad7
SRV
6643 if (IS_ERR(info->spare)) {
6644 ret = PTR_ERR(info->spare);
6645 info->spare = NULL;
6646 } else {
6647 info->spare_cpu = iter->cpu_file;
6648 }
73a757e6 6649 }
ddd538f3 6650 if (!info->spare)
a7e52ad7 6651 return ret;
ddd538f3 6652
2cadf913
SR
6653 /* Do we have previous read data to read? */
6654 if (info->read < PAGE_SIZE)
6655 goto read;
6656
b627344f 6657 again:
cc60cdc9 6658 trace_access_lock(iter->cpu_file);
12883efb 6659 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6660 &info->spare,
6661 count,
cc60cdc9
SR
6662 iter->cpu_file, 0);
6663 trace_access_unlock(iter->cpu_file);
2cadf913 6664
b627344f
SR
6665 if (ret < 0) {
6666 if (trace_empty(iter)) {
d716ff71
SRRH
6667 if ((filp->f_flags & O_NONBLOCK))
6668 return -EAGAIN;
6669
e30f53aa 6670 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
6671 if (ret)
6672 return ret;
6673
b627344f
SR
6674 goto again;
6675 }
d716ff71 6676 return 0;
b627344f 6677 }
436fc280 6678
436fc280 6679 info->read = 0;
b627344f 6680 read:
2cadf913
SR
6681 size = PAGE_SIZE - info->read;
6682 if (size > count)
6683 size = count;
6684
6685 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6686 if (ret == size)
6687 return -EFAULT;
6688
2dc5d12b
SR
6689 size -= ret;
6690
2cadf913
SR
6691 *ppos += size;
6692 info->read += size;
6693
6694 return size;
6695}
6696
6697static int tracing_buffers_release(struct inode *inode, struct file *file)
6698{
6699 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6700 struct trace_iterator *iter = &info->iter;
2cadf913 6701
a695cb58
SRRH
6702 mutex_lock(&trace_types_lock);
6703
cf6ab6d9
SRRH
6704 iter->tr->current_trace->ref--;
6705
ff451961 6706 __trace_array_put(iter->tr);
2cadf913 6707
ddd538f3 6708 if (info->spare)
73a757e6
SRV
6709 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6710 info->spare_cpu, info->spare);
2cadf913
SR
6711 kfree(info);
6712
a695cb58
SRRH
6713 mutex_unlock(&trace_types_lock);
6714
2cadf913
SR
6715 return 0;
6716}
6717
6718struct buffer_ref {
6719 struct ring_buffer *buffer;
6720 void *page;
73a757e6 6721 int cpu;
2cadf913
SR
6722 int ref;
6723};
6724
6725static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6726 struct pipe_buffer *buf)
6727{
6728 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6729
6730 if (--ref->ref)
6731 return;
6732
73a757e6 6733 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6734 kfree(ref);
6735 buf->private = 0;
6736}
6737
2cadf913
SR
6738static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6739 struct pipe_buffer *buf)
6740{
6741 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6742
6743 ref->ref++;
6744}
6745
6746/* Pipe buffer operations for a buffer. */
28dfef8f 6747static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6748 .can_merge = 0,
2cadf913
SR
6749 .confirm = generic_pipe_buf_confirm,
6750 .release = buffer_pipe_buf_release,
d55cb6cf 6751 .steal = generic_pipe_buf_steal,
2cadf913
SR
6752 .get = buffer_pipe_buf_get,
6753};
6754
6755/*
6756 * Callback from splice_to_pipe(), if we need to release some pages
6757 * at the end of the spd in case we error'ed out in filling the pipe.
6758 */
6759static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6760{
6761 struct buffer_ref *ref =
6762 (struct buffer_ref *)spd->partial[i].private;
6763
6764 if (--ref->ref)
6765 return;
6766
73a757e6 6767 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6768 kfree(ref);
6769 spd->partial[i].private = 0;
6770}
6771
6772static ssize_t
6773tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6774 struct pipe_inode_info *pipe, size_t len,
6775 unsigned int flags)
6776{
6777 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6778 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6779 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6780 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6781 struct splice_pipe_desc spd = {
35f3d14d
JA
6782 .pages = pages_def,
6783 .partial = partial_def,
047fe360 6784 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6785 .ops = &buffer_pipe_buf_ops,
6786 .spd_release = buffer_spd_release,
6787 };
6788 struct buffer_ref *ref;
6b7e633f 6789 int entries, i;
07906da7 6790 ssize_t ret = 0;
2cadf913 6791
6de58e62 6792#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6793 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6794 return -EBUSY;
6de58e62
SRRH
6795#endif
6796
d716ff71
SRRH
6797 if (*ppos & (PAGE_SIZE - 1))
6798 return -EINVAL;
93cfb3c9
LJ
6799
6800 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6801 if (len < PAGE_SIZE)
6802 return -EINVAL;
93cfb3c9
LJ
6803 len &= PAGE_MASK;
6804 }
6805
1ae2293d
AV
6806 if (splice_grow_spd(pipe, &spd))
6807 return -ENOMEM;
6808
cc60cdc9
SR
6809 again:
6810 trace_access_lock(iter->cpu_file);
12883efb 6811 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6812
a786c06d 6813 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6814 struct page *page;
6815 int r;
6816
6817 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6818 if (!ref) {
6819 ret = -ENOMEM;
2cadf913 6820 break;
07906da7 6821 }
2cadf913 6822
7267fa68 6823 ref->ref = 1;
12883efb 6824 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6825 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
a7e52ad7
SRV
6826 if (IS_ERR(ref->page)) {
6827 ret = PTR_ERR(ref->page);
6828 ref->page = NULL;
2cadf913
SR
6829 kfree(ref);
6830 break;
6831 }
73a757e6 6832 ref->cpu = iter->cpu_file;
2cadf913
SR
6833
6834 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6835 len, iter->cpu_file, 1);
2cadf913 6836 if (r < 0) {
73a757e6
SRV
6837 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6838 ref->page);
2cadf913
SR
6839 kfree(ref);
6840 break;
6841 }
6842
2cadf913
SR
6843 page = virt_to_page(ref->page);
6844
6845 spd.pages[i] = page;
6846 spd.partial[i].len = PAGE_SIZE;
6847 spd.partial[i].offset = 0;
6848 spd.partial[i].private = (unsigned long)ref;
6849 spd.nr_pages++;
93cfb3c9 6850 *ppos += PAGE_SIZE;
93459c6c 6851
12883efb 6852 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6853 }
6854
cc60cdc9 6855 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6856 spd.nr_pages = i;
6857
6858 /* did we read anything? */
6859 if (!spd.nr_pages) {
07906da7 6860 if (ret)
1ae2293d 6861 goto out;
d716ff71 6862
1ae2293d 6863 ret = -EAGAIN;
d716ff71 6864 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6865 goto out;
07906da7 6866
e30f53aa 6867 ret = wait_on_pipe(iter, true);
8b8b3683 6868 if (ret)
1ae2293d 6869 goto out;
e30f53aa 6870
cc60cdc9 6871 goto again;
2cadf913
SR
6872 }
6873
6874 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6875out:
047fe360 6876 splice_shrink_spd(&spd);
6de58e62 6877
2cadf913
SR
6878 return ret;
6879}
6880
6881static const struct file_operations tracing_buffers_fops = {
6882 .open = tracing_buffers_open,
6883 .read = tracing_buffers_read,
cc60cdc9 6884 .poll = tracing_buffers_poll,
2cadf913
SR
6885 .release = tracing_buffers_release,
6886 .splice_read = tracing_buffers_splice_read,
6887 .llseek = no_llseek,
6888};
6889
c8d77183
SR
6890static ssize_t
6891tracing_stats_read(struct file *filp, char __user *ubuf,
6892 size_t count, loff_t *ppos)
6893{
4d3435b8
ON
6894 struct inode *inode = file_inode(filp);
6895 struct trace_array *tr = inode->i_private;
12883efb 6896 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6897 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6898 struct trace_seq *s;
6899 unsigned long cnt;
c64e148a
VN
6900 unsigned long long t;
6901 unsigned long usec_rem;
c8d77183 6902
e4f2d10f 6903 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 6904 if (!s)
a646365c 6905 return -ENOMEM;
c8d77183
SR
6906
6907 trace_seq_init(s);
6908
12883efb 6909 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6910 trace_seq_printf(s, "entries: %ld\n", cnt);
6911
12883efb 6912 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6913 trace_seq_printf(s, "overrun: %ld\n", cnt);
6914
12883efb 6915 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6916 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6917
12883efb 6918 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
6919 trace_seq_printf(s, "bytes: %ld\n", cnt);
6920
58e8eedf 6921 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 6922 /* local or global for trace_clock */
12883efb 6923 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
6924 usec_rem = do_div(t, USEC_PER_SEC);
6925 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6926 t, usec_rem);
6927
12883efb 6928 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
6929 usec_rem = do_div(t, USEC_PER_SEC);
6930 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6931 } else {
6932 /* counter or tsc mode for trace_clock */
6933 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 6934 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 6935
11043d8b 6936 trace_seq_printf(s, "now ts: %llu\n",
12883efb 6937 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 6938 }
c64e148a 6939
12883efb 6940 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
6941 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6942
12883efb 6943 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
6944 trace_seq_printf(s, "read events: %ld\n", cnt);
6945
5ac48378
SRRH
6946 count = simple_read_from_buffer(ubuf, count, ppos,
6947 s->buffer, trace_seq_used(s));
c8d77183
SR
6948
6949 kfree(s);
6950
6951 return count;
6952}
6953
6954static const struct file_operations tracing_stats_fops = {
4d3435b8 6955 .open = tracing_open_generic_tr,
c8d77183 6956 .read = tracing_stats_read,
b444786f 6957 .llseek = generic_file_llseek,
4d3435b8 6958 .release = tracing_release_generic_tr,
c8d77183
SR
6959};
6960
bc0c38d1
SR
6961#ifdef CONFIG_DYNAMIC_FTRACE
6962
6963static ssize_t
b807c3d0 6964tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
6965 size_t cnt, loff_t *ppos)
6966{
6967 unsigned long *p = filp->private_data;
6a9c981b 6968 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
6969 int r;
6970
6a9c981b 6971 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
6972 buf[r++] = '\n';
6973
6a9c981b 6974 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
6975}
6976
5e2336a0 6977static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 6978 .open = tracing_open_generic,
b807c3d0 6979 .read = tracing_read_dyn_info,
b444786f 6980 .llseek = generic_file_llseek,
bc0c38d1 6981};
77fd5c15 6982#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 6983
77fd5c15
SRRH
6984#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6985static void
bca6c8d0 6986ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6987 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6988 void *data)
77fd5c15 6989{
cab50379 6990 tracing_snapshot_instance(tr);
77fd5c15 6991}
bc0c38d1 6992
77fd5c15 6993static void
bca6c8d0 6994ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6995 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6996 void *data)
bc0c38d1 6997{
6e444319 6998 struct ftrace_func_mapper *mapper = data;
1a93f8bd 6999 long *count = NULL;
77fd5c15 7000
1a93f8bd
SRV
7001 if (mapper)
7002 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7003
7004 if (count) {
7005
7006 if (*count <= 0)
7007 return;
bc0c38d1 7008
77fd5c15 7009 (*count)--;
1a93f8bd 7010 }
77fd5c15 7011
cab50379 7012 tracing_snapshot_instance(tr);
77fd5c15
SRRH
7013}
7014
7015static int
7016ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7017 struct ftrace_probe_ops *ops, void *data)
7018{
6e444319 7019 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7020 long *count = NULL;
77fd5c15
SRRH
7021
7022 seq_printf(m, "%ps:", (void *)ip);
7023
fa6f0cc7 7024 seq_puts(m, "snapshot");
77fd5c15 7025
1a93f8bd
SRV
7026 if (mapper)
7027 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7028
7029 if (count)
7030 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 7031 else
1a93f8bd 7032 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
7033
7034 return 0;
7035}
7036
1a93f8bd 7037static int
b5f081b5 7038ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7039 unsigned long ip, void *init_data, void **data)
1a93f8bd 7040{
6e444319
SRV
7041 struct ftrace_func_mapper *mapper = *data;
7042
7043 if (!mapper) {
7044 mapper = allocate_ftrace_func_mapper();
7045 if (!mapper)
7046 return -ENOMEM;
7047 *data = mapper;
7048 }
1a93f8bd 7049
6e444319 7050 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
7051}
7052
7053static void
b5f081b5 7054ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7055 unsigned long ip, void *data)
1a93f8bd 7056{
6e444319
SRV
7057 struct ftrace_func_mapper *mapper = data;
7058
7059 if (!ip) {
7060 if (!mapper)
7061 return;
7062 free_ftrace_func_mapper(mapper, NULL);
7063 return;
7064 }
1a93f8bd
SRV
7065
7066 ftrace_func_mapper_remove_ip(mapper, ip);
7067}
7068
77fd5c15
SRRH
7069static struct ftrace_probe_ops snapshot_probe_ops = {
7070 .func = ftrace_snapshot,
7071 .print = ftrace_snapshot_print,
7072};
7073
7074static struct ftrace_probe_ops snapshot_count_probe_ops = {
7075 .func = ftrace_count_snapshot,
7076 .print = ftrace_snapshot_print,
1a93f8bd
SRV
7077 .init = ftrace_snapshot_init,
7078 .free = ftrace_snapshot_free,
77fd5c15
SRRH
7079};
7080
7081static int
04ec7bb6 7082ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
7083 char *glob, char *cmd, char *param, int enable)
7084{
7085 struct ftrace_probe_ops *ops;
7086 void *count = (void *)-1;
7087 char *number;
7088 int ret;
7089
0f179765
SRV
7090 if (!tr)
7091 return -ENODEV;
7092
77fd5c15
SRRH
7093 /* hash funcs only work with set_ftrace_filter */
7094 if (!enable)
7095 return -EINVAL;
7096
7097 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7098
d3d532d7 7099 if (glob[0] == '!')
7b60f3d8 7100 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
7101
7102 if (!param)
7103 goto out_reg;
7104
7105 number = strsep(&param, ":");
7106
7107 if (!strlen(number))
7108 goto out_reg;
7109
7110 /*
7111 * We use the callback data field (which is a pointer)
7112 * as our counter.
7113 */
7114 ret = kstrtoul(number, 0, (unsigned long *)&count);
7115 if (ret)
7116 return ret;
7117
7118 out_reg:
9ccd9a81 7119 ret = tracing_alloc_snapshot_instance(tr);
df62db5b
SRV
7120 if (ret < 0)
7121 goto out;
77fd5c15 7122
4c174688 7123 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 7124
df62db5b 7125 out:
77fd5c15
SRRH
7126 return ret < 0 ? ret : 0;
7127}
7128
7129static struct ftrace_func_command ftrace_snapshot_cmd = {
7130 .name = "snapshot",
7131 .func = ftrace_trace_snapshot_callback,
7132};
7133
38de93ab 7134static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
7135{
7136 return register_ftrace_command(&ftrace_snapshot_cmd);
7137}
7138#else
38de93ab 7139static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 7140#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 7141
7eeafbca 7142static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 7143{
8434dc93
SRRH
7144 if (WARN_ON(!tr->dir))
7145 return ERR_PTR(-ENODEV);
7146
7147 /* Top directory uses NULL as the parent */
7148 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7149 return NULL;
7150
7151 /* All sub buffers have a descriptor */
2b6080f2 7152 return tr->dir;
bc0c38d1
SR
7153}
7154
2b6080f2 7155static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 7156{
b04cc6b1
FW
7157 struct dentry *d_tracer;
7158
2b6080f2
SR
7159 if (tr->percpu_dir)
7160 return tr->percpu_dir;
b04cc6b1 7161
7eeafbca 7162 d_tracer = tracing_get_dentry(tr);
14a5ae40 7163 if (IS_ERR(d_tracer))
b04cc6b1
FW
7164 return NULL;
7165
8434dc93 7166 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 7167
2b6080f2 7168 WARN_ONCE(!tr->percpu_dir,
8434dc93 7169 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 7170
2b6080f2 7171 return tr->percpu_dir;
b04cc6b1
FW
7172}
7173
649e9c70
ON
7174static struct dentry *
7175trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7176 void *data, long cpu, const struct file_operations *fops)
7177{
7178 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7179
7180 if (ret) /* See tracing_get_cpu() */
7682c918 7181 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
7182 return ret;
7183}
7184
2b6080f2 7185static void
8434dc93 7186tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 7187{
2b6080f2 7188 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 7189 struct dentry *d_cpu;
dd49a38c 7190 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 7191
0a3d7ce7
NK
7192 if (!d_percpu)
7193 return;
7194
dd49a38c 7195 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 7196 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 7197 if (!d_cpu) {
a395d6a7 7198 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
7199 return;
7200 }
b04cc6b1 7201
8656e7a2 7202 /* per cpu trace_pipe */
649e9c70 7203 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 7204 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
7205
7206 /* per cpu trace */
649e9c70 7207 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 7208 tr, cpu, &tracing_fops);
7f96f93f 7209
649e9c70 7210 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 7211 tr, cpu, &tracing_buffers_fops);
7f96f93f 7212
649e9c70 7213 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 7214 tr, cpu, &tracing_stats_fops);
438ced17 7215
649e9c70 7216 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 7217 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
7218
7219#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 7220 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 7221 tr, cpu, &snapshot_fops);
6de58e62 7222
649e9c70 7223 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 7224 tr, cpu, &snapshot_raw_fops);
f1affcaa 7225#endif
b04cc6b1
FW
7226}
7227
60a11774
SR
7228#ifdef CONFIG_FTRACE_SELFTEST
7229/* Let selftest have access to static functions in this file */
7230#include "trace_selftest.c"
7231#endif
7232
577b785f
SR
7233static ssize_t
7234trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7235 loff_t *ppos)
7236{
7237 struct trace_option_dentry *topt = filp->private_data;
7238 char *buf;
7239
7240 if (topt->flags->val & topt->opt->bit)
7241 buf = "1\n";
7242 else
7243 buf = "0\n";
7244
7245 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7246}
7247
7248static ssize_t
7249trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7250 loff_t *ppos)
7251{
7252 struct trace_option_dentry *topt = filp->private_data;
7253 unsigned long val;
577b785f
SR
7254 int ret;
7255
22fe9b54
PH
7256 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7257 if (ret)
577b785f
SR
7258 return ret;
7259
8d18eaaf
LZ
7260 if (val != 0 && val != 1)
7261 return -EINVAL;
577b785f 7262
8d18eaaf 7263 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7264 mutex_lock(&trace_types_lock);
8c1a49ae 7265 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7266 topt->opt, !val);
577b785f
SR
7267 mutex_unlock(&trace_types_lock);
7268 if (ret)
7269 return ret;
577b785f
SR
7270 }
7271
7272 *ppos += cnt;
7273
7274 return cnt;
7275}
7276
7277
7278static const struct file_operations trace_options_fops = {
7279 .open = tracing_open_generic,
7280 .read = trace_options_read,
7281 .write = trace_options_write,
b444786f 7282 .llseek = generic_file_llseek,
577b785f
SR
7283};
7284
9a38a885
SRRH
7285/*
7286 * In order to pass in both the trace_array descriptor as well as the index
7287 * to the flag that the trace option file represents, the trace_array
7288 * has a character array of trace_flags_index[], which holds the index
7289 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7290 * The address of this character array is passed to the flag option file
7291 * read/write callbacks.
7292 *
7293 * In order to extract both the index and the trace_array descriptor,
7294 * get_tr_index() uses the following algorithm.
7295 *
7296 * idx = *ptr;
7297 *
7298 * As the pointer itself contains the address of the index (remember
7299 * index[1] == 1).
7300 *
7301 * Then to get the trace_array descriptor, by subtracting that index
7302 * from the ptr, we get to the start of the index itself.
7303 *
7304 * ptr - idx == &index[0]
7305 *
7306 * Then a simple container_of() from that pointer gets us to the
7307 * trace_array descriptor.
7308 */
7309static void get_tr_index(void *data, struct trace_array **ptr,
7310 unsigned int *pindex)
7311{
7312 *pindex = *(unsigned char *)data;
7313
7314 *ptr = container_of(data - *pindex, struct trace_array,
7315 trace_flags_index);
7316}
7317
a8259075
SR
7318static ssize_t
7319trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7320 loff_t *ppos)
7321{
9a38a885
SRRH
7322 void *tr_index = filp->private_data;
7323 struct trace_array *tr;
7324 unsigned int index;
a8259075
SR
7325 char *buf;
7326
9a38a885
SRRH
7327 get_tr_index(tr_index, &tr, &index);
7328
7329 if (tr->trace_flags & (1 << index))
a8259075
SR
7330 buf = "1\n";
7331 else
7332 buf = "0\n";
7333
7334 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7335}
7336
7337static ssize_t
7338trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7339 loff_t *ppos)
7340{
9a38a885
SRRH
7341 void *tr_index = filp->private_data;
7342 struct trace_array *tr;
7343 unsigned int index;
a8259075
SR
7344 unsigned long val;
7345 int ret;
7346
9a38a885
SRRH
7347 get_tr_index(tr_index, &tr, &index);
7348
22fe9b54
PH
7349 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7350 if (ret)
a8259075
SR
7351 return ret;
7352
f2d84b65 7353 if (val != 0 && val != 1)
a8259075 7354 return -EINVAL;
69d34da2
SRRH
7355
7356 mutex_lock(&trace_types_lock);
2b6080f2 7357 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7358 mutex_unlock(&trace_types_lock);
a8259075 7359
613f04a0
SRRH
7360 if (ret < 0)
7361 return ret;
7362
a8259075
SR
7363 *ppos += cnt;
7364
7365 return cnt;
7366}
7367
a8259075
SR
7368static const struct file_operations trace_options_core_fops = {
7369 .open = tracing_open_generic,
7370 .read = trace_options_core_read,
7371 .write = trace_options_core_write,
b444786f 7372 .llseek = generic_file_llseek,
a8259075
SR
7373};
7374
5452af66 7375struct dentry *trace_create_file(const char *name,
f4ae40a6 7376 umode_t mode,
5452af66
FW
7377 struct dentry *parent,
7378 void *data,
7379 const struct file_operations *fops)
7380{
7381 struct dentry *ret;
7382
8434dc93 7383 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7384 if (!ret)
a395d6a7 7385 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7386
7387 return ret;
7388}
7389
7390
2b6080f2 7391static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7392{
7393 struct dentry *d_tracer;
a8259075 7394
2b6080f2
SR
7395 if (tr->options)
7396 return tr->options;
a8259075 7397
7eeafbca 7398 d_tracer = tracing_get_dentry(tr);
14a5ae40 7399 if (IS_ERR(d_tracer))
a8259075
SR
7400 return NULL;
7401
8434dc93 7402 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7403 if (!tr->options) {
a395d6a7 7404 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7405 return NULL;
7406 }
7407
2b6080f2 7408 return tr->options;
a8259075
SR
7409}
7410
577b785f 7411static void
2b6080f2
SR
7412create_trace_option_file(struct trace_array *tr,
7413 struct trace_option_dentry *topt,
577b785f
SR
7414 struct tracer_flags *flags,
7415 struct tracer_opt *opt)
7416{
7417 struct dentry *t_options;
577b785f 7418
2b6080f2 7419 t_options = trace_options_init_dentry(tr);
577b785f
SR
7420 if (!t_options)
7421 return;
7422
7423 topt->flags = flags;
7424 topt->opt = opt;
2b6080f2 7425 topt->tr = tr;
577b785f 7426
5452af66 7427 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7428 &trace_options_fops);
7429
577b785f
SR
7430}
7431
37aea98b 7432static void
2b6080f2 7433create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7434{
7435 struct trace_option_dentry *topts;
37aea98b 7436 struct trace_options *tr_topts;
577b785f
SR
7437 struct tracer_flags *flags;
7438 struct tracer_opt *opts;
7439 int cnt;
37aea98b 7440 int i;
577b785f
SR
7441
7442 if (!tracer)
37aea98b 7443 return;
577b785f
SR
7444
7445 flags = tracer->flags;
7446
7447 if (!flags || !flags->opts)
37aea98b
SRRH
7448 return;
7449
7450 /*
7451 * If this is an instance, only create flags for tracers
7452 * the instance may have.
7453 */
7454 if (!trace_ok_for_array(tracer, tr))
7455 return;
7456
7457 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7458 /* Make sure there's no duplicate flags. */
7459 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7460 return;
7461 }
577b785f
SR
7462
7463 opts = flags->opts;
7464
7465 for (cnt = 0; opts[cnt].name; cnt++)
7466 ;
7467
0cfe8245 7468 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7469 if (!topts)
37aea98b
SRRH
7470 return;
7471
7472 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7473 GFP_KERNEL);
7474 if (!tr_topts) {
7475 kfree(topts);
7476 return;
7477 }
7478
7479 tr->topts = tr_topts;
7480 tr->topts[tr->nr_topts].tracer = tracer;
7481 tr->topts[tr->nr_topts].topts = topts;
7482 tr->nr_topts++;
577b785f 7483
41d9c0be 7484 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7485 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7486 &opts[cnt]);
41d9c0be
SRRH
7487 WARN_ONCE(topts[cnt].entry == NULL,
7488 "Failed to create trace option: %s",
7489 opts[cnt].name);
7490 }
577b785f
SR
7491}
7492
a8259075 7493static struct dentry *
2b6080f2
SR
7494create_trace_option_core_file(struct trace_array *tr,
7495 const char *option, long index)
a8259075
SR
7496{
7497 struct dentry *t_options;
a8259075 7498
2b6080f2 7499 t_options = trace_options_init_dentry(tr);
a8259075
SR
7500 if (!t_options)
7501 return NULL;
7502
9a38a885
SRRH
7503 return trace_create_file(option, 0644, t_options,
7504 (void *)&tr->trace_flags_index[index],
7505 &trace_options_core_fops);
a8259075
SR
7506}
7507
16270145 7508static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7509{
7510 struct dentry *t_options;
16270145 7511 bool top_level = tr == &global_trace;
a8259075
SR
7512 int i;
7513
2b6080f2 7514 t_options = trace_options_init_dentry(tr);
a8259075
SR
7515 if (!t_options)
7516 return;
7517
16270145
SRRH
7518 for (i = 0; trace_options[i]; i++) {
7519 if (top_level ||
7520 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7521 create_trace_option_core_file(tr, trace_options[i], i);
7522 }
a8259075
SR
7523}
7524
499e5470
SR
7525static ssize_t
7526rb_simple_read(struct file *filp, char __user *ubuf,
7527 size_t cnt, loff_t *ppos)
7528{
348f0fc2 7529 struct trace_array *tr = filp->private_data;
499e5470
SR
7530 char buf[64];
7531 int r;
7532
10246fa3 7533 r = tracer_tracing_is_on(tr);
499e5470
SR
7534 r = sprintf(buf, "%d\n", r);
7535
7536 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7537}
7538
7539static ssize_t
7540rb_simple_write(struct file *filp, const char __user *ubuf,
7541 size_t cnt, loff_t *ppos)
7542{
348f0fc2 7543 struct trace_array *tr = filp->private_data;
12883efb 7544 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7545 unsigned long val;
7546 int ret;
7547
7548 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7549 if (ret)
7550 return ret;
7551
7552 if (buffer) {
2df8f8a6 7553 mutex_lock(&trace_types_lock);
77e73a4a
SRV
7554 if (!!val == tracer_tracing_is_on(tr)) {
7555 val = 0; /* do nothing */
7556 } else if (val) {
10246fa3 7557 tracer_tracing_on(tr);
2b6080f2
SR
7558 if (tr->current_trace->start)
7559 tr->current_trace->start(tr);
2df8f8a6 7560 } else {
10246fa3 7561 tracer_tracing_off(tr);
2b6080f2
SR
7562 if (tr->current_trace->stop)
7563 tr->current_trace->stop(tr);
2df8f8a6
SR
7564 }
7565 mutex_unlock(&trace_types_lock);
499e5470
SR
7566 }
7567
7568 (*ppos)++;
7569
7570 return cnt;
7571}
7572
7573static const struct file_operations rb_simple_fops = {
7b85af63 7574 .open = tracing_open_generic_tr,
499e5470
SR
7575 .read = rb_simple_read,
7576 .write = rb_simple_write,
7b85af63 7577 .release = tracing_release_generic_tr,
499e5470
SR
7578 .llseek = default_llseek,
7579};
7580
277ba044
SR
7581struct dentry *trace_instance_dir;
7582
7583static void
8434dc93 7584init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7585
55034cd6
SRRH
7586static int
7587allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7588{
7589 enum ring_buffer_flags rb_flags;
737223fb 7590
983f938a 7591 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7592
dced341b
SRRH
7593 buf->tr = tr;
7594
55034cd6
SRRH
7595 buf->buffer = ring_buffer_alloc(size, rb_flags);
7596 if (!buf->buffer)
7597 return -ENOMEM;
737223fb 7598
55034cd6
SRRH
7599 buf->data = alloc_percpu(struct trace_array_cpu);
7600 if (!buf->data) {
7601 ring_buffer_free(buf->buffer);
4397f045 7602 buf->buffer = NULL;
55034cd6
SRRH
7603 return -ENOMEM;
7604 }
737223fb 7605
737223fb
SRRH
7606 /* Allocate the first page for all buffers */
7607 set_buffer_entries(&tr->trace_buffer,
7608 ring_buffer_size(tr->trace_buffer.buffer, 0));
7609
55034cd6
SRRH
7610 return 0;
7611}
737223fb 7612
55034cd6
SRRH
7613static int allocate_trace_buffers(struct trace_array *tr, int size)
7614{
7615 int ret;
737223fb 7616
55034cd6
SRRH
7617 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7618 if (ret)
7619 return ret;
737223fb 7620
55034cd6
SRRH
7621#ifdef CONFIG_TRACER_MAX_TRACE
7622 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7623 allocate_snapshot ? size : 1);
7624 if (WARN_ON(ret)) {
737223fb 7625 ring_buffer_free(tr->trace_buffer.buffer);
24f2aaf9 7626 tr->trace_buffer.buffer = NULL;
55034cd6 7627 free_percpu(tr->trace_buffer.data);
24f2aaf9 7628 tr->trace_buffer.data = NULL;
55034cd6
SRRH
7629 return -ENOMEM;
7630 }
7631 tr->allocated_snapshot = allocate_snapshot;
737223fb 7632
55034cd6
SRRH
7633 /*
7634 * Only the top level trace array gets its snapshot allocated
7635 * from the kernel command line.
7636 */
7637 allocate_snapshot = false;
737223fb 7638#endif
55034cd6 7639 return 0;
737223fb
SRRH
7640}
7641
f0b70cc4
SRRH
7642static void free_trace_buffer(struct trace_buffer *buf)
7643{
7644 if (buf->buffer) {
7645 ring_buffer_free(buf->buffer);
7646 buf->buffer = NULL;
7647 free_percpu(buf->data);
7648 buf->data = NULL;
7649 }
7650}
7651
23aaa3c1
SRRH
7652static void free_trace_buffers(struct trace_array *tr)
7653{
7654 if (!tr)
7655 return;
7656
f0b70cc4 7657 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7658
7659#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7660 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7661#endif
7662}
7663
9a38a885
SRRH
7664static void init_trace_flags_index(struct trace_array *tr)
7665{
7666 int i;
7667
7668 /* Used by the trace options files */
7669 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7670 tr->trace_flags_index[i] = i;
7671}
7672
37aea98b
SRRH
7673static void __update_tracer_options(struct trace_array *tr)
7674{
7675 struct tracer *t;
7676
7677 for (t = trace_types; t; t = t->next)
7678 add_tracer_options(tr, t);
7679}
7680
7681static void update_tracer_options(struct trace_array *tr)
7682{
7683 mutex_lock(&trace_types_lock);
7684 __update_tracer_options(tr);
7685 mutex_unlock(&trace_types_lock);
7686}
7687
eae47358 7688static int instance_mkdir(const char *name)
737223fb 7689{
277ba044
SR
7690 struct trace_array *tr;
7691 int ret;
277ba044 7692
12ecef0c 7693 mutex_lock(&event_mutex);
277ba044
SR
7694 mutex_lock(&trace_types_lock);
7695
7696 ret = -EEXIST;
7697 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7698 if (tr->name && strcmp(tr->name, name) == 0)
7699 goto out_unlock;
7700 }
7701
7702 ret = -ENOMEM;
7703 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7704 if (!tr)
7705 goto out_unlock;
7706
7707 tr->name = kstrdup(name, GFP_KERNEL);
7708 if (!tr->name)
7709 goto out_free_tr;
7710
ccfe9e42
AL
7711 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7712 goto out_free_tr;
7713
20550622 7714 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7715
ccfe9e42
AL
7716 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7717
277ba044
SR
7718 raw_spin_lock_init(&tr->start_lock);
7719
0b9b12c1
SRRH
7720 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7721
277ba044
SR
7722 tr->current_trace = &nop_trace;
7723
7724 INIT_LIST_HEAD(&tr->systems);
7725 INIT_LIST_HEAD(&tr->events);
7726
737223fb 7727 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7728 goto out_free_tr;
7729
8434dc93 7730 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7731 if (!tr->dir)
7732 goto out_free_tr;
7733
7734 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7735 if (ret) {
8434dc93 7736 tracefs_remove_recursive(tr->dir);
277ba044 7737 goto out_free_tr;
609e85a7 7738 }
277ba044 7739
04ec7bb6
SRV
7740 ftrace_init_trace_array(tr);
7741
8434dc93 7742 init_tracer_tracefs(tr, tr->dir);
9a38a885 7743 init_trace_flags_index(tr);
37aea98b 7744 __update_tracer_options(tr);
277ba044
SR
7745
7746 list_add(&tr->list, &ftrace_trace_arrays);
7747
7748 mutex_unlock(&trace_types_lock);
12ecef0c 7749 mutex_unlock(&event_mutex);
277ba044
SR
7750
7751 return 0;
7752
7753 out_free_tr:
23aaa3c1 7754 free_trace_buffers(tr);
ccfe9e42 7755 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7756 kfree(tr->name);
7757 kfree(tr);
7758
7759 out_unlock:
7760 mutex_unlock(&trace_types_lock);
12ecef0c 7761 mutex_unlock(&event_mutex);
277ba044
SR
7762
7763 return ret;
7764
7765}
7766
eae47358 7767static int instance_rmdir(const char *name)
0c8916c3
SR
7768{
7769 struct trace_array *tr;
7770 int found = 0;
7771 int ret;
37aea98b 7772 int i;
0c8916c3 7773
12ecef0c 7774 mutex_lock(&event_mutex);
0c8916c3
SR
7775 mutex_lock(&trace_types_lock);
7776
7777 ret = -ENODEV;
7778 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7779 if (tr->name && strcmp(tr->name, name) == 0) {
7780 found = 1;
7781 break;
7782 }
7783 }
7784 if (!found)
7785 goto out_unlock;
7786
a695cb58 7787 ret = -EBUSY;
cf6ab6d9 7788 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7789 goto out_unlock;
7790
0c8916c3
SR
7791 list_del(&tr->list);
7792
20550622
SRRH
7793 /* Disable all the flags that were enabled coming in */
7794 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7795 if ((1 << i) & ZEROED_TRACE_FLAGS)
7796 set_tracer_flag(tr, 1 << i, 0);
7797 }
7798
6b450d25 7799 tracing_set_nop(tr);
a0e6369e 7800 clear_ftrace_function_probes(tr);
0c8916c3 7801 event_trace_del_tracer(tr);
d879d0b8 7802 ftrace_clear_pids(tr);
591dffda 7803 ftrace_destroy_function_files(tr);
681a4a2f 7804 tracefs_remove_recursive(tr->dir);
a9fcaaac 7805 free_trace_buffers(tr);
0c8916c3 7806
37aea98b
SRRH
7807 for (i = 0; i < tr->nr_topts; i++) {
7808 kfree(tr->topts[i].topts);
7809 }
7810 kfree(tr->topts);
7811
db9108e0 7812 free_cpumask_var(tr->tracing_cpumask);
0c8916c3
SR
7813 kfree(tr->name);
7814 kfree(tr);
7815
7816 ret = 0;
7817
7818 out_unlock:
7819 mutex_unlock(&trace_types_lock);
12ecef0c 7820 mutex_unlock(&event_mutex);
0c8916c3
SR
7821
7822 return ret;
7823}
7824
277ba044
SR
7825static __init void create_trace_instances(struct dentry *d_tracer)
7826{
eae47358
SRRH
7827 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7828 instance_mkdir,
7829 instance_rmdir);
277ba044
SR
7830 if (WARN_ON(!trace_instance_dir))
7831 return;
277ba044
SR
7832}
7833
2b6080f2 7834static void
8434dc93 7835init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7836{
121aaee7 7837 int cpu;
2b6080f2 7838
607e2ea1
SRRH
7839 trace_create_file("available_tracers", 0444, d_tracer,
7840 tr, &show_traces_fops);
7841
7842 trace_create_file("current_tracer", 0644, d_tracer,
7843 tr, &set_tracer_fops);
7844
ccfe9e42
AL
7845 trace_create_file("tracing_cpumask", 0644, d_tracer,
7846 tr, &tracing_cpumask_fops);
7847
2b6080f2
SR
7848 trace_create_file("trace_options", 0644, d_tracer,
7849 tr, &tracing_iter_fops);
7850
7851 trace_create_file("trace", 0644, d_tracer,
6484c71c 7852 tr, &tracing_fops);
2b6080f2
SR
7853
7854 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 7855 tr, &tracing_pipe_fops);
2b6080f2
SR
7856
7857 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 7858 tr, &tracing_entries_fops);
2b6080f2
SR
7859
7860 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7861 tr, &tracing_total_entries_fops);
7862
238ae93d 7863 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
7864 tr, &tracing_free_buffer_fops);
7865
7866 trace_create_file("trace_marker", 0220, d_tracer,
7867 tr, &tracing_mark_fops);
7868
fa32e855
SR
7869 trace_create_file("trace_marker_raw", 0220, d_tracer,
7870 tr, &tracing_mark_raw_fops);
7871
2b6080f2
SR
7872 trace_create_file("trace_clock", 0644, d_tracer, tr,
7873 &trace_clock_fops);
7874
7875 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 7876 tr, &rb_simple_fops);
ce9bae55 7877
16270145
SRRH
7878 create_trace_options_dir(tr);
7879
f971cc9a 7880#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
7881 trace_create_file("tracing_max_latency", 0644, d_tracer,
7882 &tr->max_latency, &tracing_max_lat_fops);
7883#endif
7884
591dffda
SRRH
7885 if (ftrace_create_function_files(tr, d_tracer))
7886 WARN(1, "Could not allocate function filter files");
7887
ce9bae55
SRRH
7888#ifdef CONFIG_TRACER_SNAPSHOT
7889 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 7890 tr, &snapshot_fops);
ce9bae55 7891#endif
121aaee7
SRRH
7892
7893 for_each_tracing_cpu(cpu)
8434dc93 7894 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 7895
345ddcc8 7896 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
7897}
7898
93faccbb 7899static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
7900{
7901 struct vfsmount *mnt;
7902 struct file_system_type *type;
7903
7904 /*
7905 * To maintain backward compatibility for tools that mount
7906 * debugfs to get to the tracing facility, tracefs is automatically
7907 * mounted to the debugfs/tracing directory.
7908 */
7909 type = get_fs_type("tracefs");
7910 if (!type)
7911 return NULL;
93faccbb 7912 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
7913 put_filesystem(type);
7914 if (IS_ERR(mnt))
7915 return NULL;
7916 mntget(mnt);
7917
7918 return mnt;
7919}
7920
7eeafbca
SRRH
7921/**
7922 * tracing_init_dentry - initialize top level trace array
7923 *
7924 * This is called when creating files or directories in the tracing
7925 * directory. It is called via fs_initcall() by any of the boot up code
7926 * and expects to return the dentry of the top level tracing directory.
7927 */
7928struct dentry *tracing_init_dentry(void)
7929{
7930 struct trace_array *tr = &global_trace;
7931
f76180bc 7932 /* The top level trace array uses NULL as parent */
7eeafbca 7933 if (tr->dir)
f76180bc 7934 return NULL;
7eeafbca 7935
8b129199
JW
7936 if (WARN_ON(!tracefs_initialized()) ||
7937 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7938 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
7939 return ERR_PTR(-ENODEV);
7940
f76180bc
SRRH
7941 /*
7942 * As there may still be users that expect the tracing
7943 * files to exist in debugfs/tracing, we must automount
7944 * the tracefs file system there, so older tools still
7945 * work with the newer kerenl.
7946 */
7947 tr->dir = debugfs_create_automount("tracing", NULL,
7948 trace_automount, NULL);
7eeafbca
SRRH
7949 if (!tr->dir) {
7950 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7951 return ERR_PTR(-ENOMEM);
7952 }
7953
8434dc93 7954 return NULL;
7eeafbca
SRRH
7955}
7956
00f4b652
JL
7957extern struct trace_eval_map *__start_ftrace_eval_maps[];
7958extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 7959
5f60b351 7960static void __init trace_eval_init(void)
0c564a53 7961{
3673b8e4
SRRH
7962 int len;
7963
02fd7f68 7964 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 7965 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
7966}
7967
7968#ifdef CONFIG_MODULES
f57a4143 7969static void trace_module_add_evals(struct module *mod)
3673b8e4 7970{
99be647c 7971 if (!mod->num_trace_evals)
3673b8e4
SRRH
7972 return;
7973
7974 /*
7975 * Modules with bad taint do not have events created, do
7976 * not bother with enums either.
7977 */
7978 if (trace_module_has_bad_taint(mod))
7979 return;
7980
f57a4143 7981 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
7982}
7983
681bec03 7984#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 7985static void trace_module_remove_evals(struct module *mod)
9828413d 7986{
23bf8cb8
JL
7987 union trace_eval_map_item *map;
7988 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 7989
99be647c 7990 if (!mod->num_trace_evals)
9828413d
SRRH
7991 return;
7992
1793ed93 7993 mutex_lock(&trace_eval_mutex);
9828413d 7994
23bf8cb8 7995 map = trace_eval_maps;
9828413d
SRRH
7996
7997 while (map) {
7998 if (map->head.mod == mod)
7999 break;
5f60b351 8000 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
8001 last = &map->tail.next;
8002 map = map->tail.next;
8003 }
8004 if (!map)
8005 goto out;
8006
5f60b351 8007 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
8008 kfree(map);
8009 out:
1793ed93 8010 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
8011}
8012#else
f57a4143 8013static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 8014#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 8015
3673b8e4
SRRH
8016static int trace_module_notify(struct notifier_block *self,
8017 unsigned long val, void *data)
8018{
8019 struct module *mod = data;
8020
8021 switch (val) {
8022 case MODULE_STATE_COMING:
f57a4143 8023 trace_module_add_evals(mod);
3673b8e4 8024 break;
9828413d 8025 case MODULE_STATE_GOING:
f57a4143 8026 trace_module_remove_evals(mod);
9828413d 8027 break;
3673b8e4
SRRH
8028 }
8029
8030 return 0;
0c564a53
SRRH
8031}
8032
3673b8e4
SRRH
8033static struct notifier_block trace_module_nb = {
8034 .notifier_call = trace_module_notify,
8035 .priority = 0,
8036};
9828413d 8037#endif /* CONFIG_MODULES */
3673b8e4 8038
8434dc93 8039static __init int tracer_init_tracefs(void)
bc0c38d1
SR
8040{
8041 struct dentry *d_tracer;
bc0c38d1 8042
7e53bd42
LJ
8043 trace_access_lock_init();
8044
bc0c38d1 8045 d_tracer = tracing_init_dentry();
14a5ae40 8046 if (IS_ERR(d_tracer))
ed6f1c99 8047 return 0;
bc0c38d1 8048
8434dc93 8049 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 8050 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 8051
5452af66 8052 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 8053 &global_trace, &tracing_thresh_fops);
a8259075 8054
339ae5d3 8055 trace_create_file("README", 0444, d_tracer,
5452af66
FW
8056 NULL, &tracing_readme_fops);
8057
69abe6a5
AP
8058 trace_create_file("saved_cmdlines", 0444, d_tracer,
8059 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 8060
939c7a4f
YY
8061 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8062 NULL, &tracing_saved_cmdlines_size_fops);
8063
99c621d7
MS
8064 trace_create_file("saved_tgids", 0444, d_tracer,
8065 NULL, &tracing_saved_tgids_fops);
8066
5f60b351 8067 trace_eval_init();
0c564a53 8068
f57a4143 8069 trace_create_eval_file(d_tracer);
9828413d 8070
3673b8e4
SRRH
8071#ifdef CONFIG_MODULES
8072 register_module_notifier(&trace_module_nb);
8073#endif
8074
bc0c38d1 8075#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
8076 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8077 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 8078#endif
b04cc6b1 8079
277ba044 8080 create_trace_instances(d_tracer);
5452af66 8081
37aea98b 8082 update_tracer_options(&global_trace);
09d23a1d 8083
b5ad384e 8084 return 0;
bc0c38d1
SR
8085}
8086
3f5a54e3
SR
8087static int trace_panic_handler(struct notifier_block *this,
8088 unsigned long event, void *unused)
8089{
944ac425 8090 if (ftrace_dump_on_oops)
cecbca96 8091 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8092 return NOTIFY_OK;
8093}
8094
8095static struct notifier_block trace_panic_notifier = {
8096 .notifier_call = trace_panic_handler,
8097 .next = NULL,
8098 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8099};
8100
8101static int trace_die_handler(struct notifier_block *self,
8102 unsigned long val,
8103 void *data)
8104{
8105 switch (val) {
8106 case DIE_OOPS:
944ac425 8107 if (ftrace_dump_on_oops)
cecbca96 8108 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8109 break;
8110 default:
8111 break;
8112 }
8113 return NOTIFY_OK;
8114}
8115
8116static struct notifier_block trace_die_notifier = {
8117 .notifier_call = trace_die_handler,
8118 .priority = 200
8119};
8120
8121/*
8122 * printk is set to max of 1024, we really don't need it that big.
8123 * Nothing should be printing 1000 characters anyway.
8124 */
8125#define TRACE_MAX_PRINT 1000
8126
8127/*
8128 * Define here KERN_TRACE so that we have one place to modify
8129 * it if we decide to change what log level the ftrace dump
8130 * should be at.
8131 */
428aee14 8132#define KERN_TRACE KERN_EMERG
3f5a54e3 8133
955b61e5 8134void
3f5a54e3
SR
8135trace_printk_seq(struct trace_seq *s)
8136{
8137 /* Probably should print a warning here. */
3a161d99
SRRH
8138 if (s->seq.len >= TRACE_MAX_PRINT)
8139 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 8140
820b75f6
SRRH
8141 /*
8142 * More paranoid code. Although the buffer size is set to
8143 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8144 * an extra layer of protection.
8145 */
8146 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8147 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
8148
8149 /* should be zero ended, but we are paranoid. */
3a161d99 8150 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
8151
8152 printk(KERN_TRACE "%s", s->buffer);
8153
f9520750 8154 trace_seq_init(s);
3f5a54e3
SR
8155}
8156
955b61e5
JW
8157void trace_init_global_iter(struct trace_iterator *iter)
8158{
8159 iter->tr = &global_trace;
2b6080f2 8160 iter->trace = iter->tr->current_trace;
ae3b5093 8161 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 8162 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
8163
8164 if (iter->trace && iter->trace->open)
8165 iter->trace->open(iter);
8166
8167 /* Annotate start of buffers if we had overruns */
8168 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8169 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8170
8171 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8172 if (trace_clocks[iter->tr->clock_id].in_ns)
8173 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
8174}
8175
7fe70b57 8176void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 8177{
3f5a54e3
SR
8178 /* use static because iter can be a bit big for the stack */
8179 static struct trace_iterator iter;
7fe70b57 8180 static atomic_t dump_running;
983f938a 8181 struct trace_array *tr = &global_trace;
cf586b61 8182 unsigned int old_userobj;
d769041f
SR
8183 unsigned long flags;
8184 int cnt = 0, cpu;
3f5a54e3 8185
7fe70b57
SRRH
8186 /* Only allow one dump user at a time. */
8187 if (atomic_inc_return(&dump_running) != 1) {
8188 atomic_dec(&dump_running);
8189 return;
8190 }
3f5a54e3 8191
7fe70b57
SRRH
8192 /*
8193 * Always turn off tracing when we dump.
8194 * We don't need to show trace output of what happens
8195 * between multiple crashes.
8196 *
8197 * If the user does a sysrq-z, then they can re-enable
8198 * tracing with echo 1 > tracing_on.
8199 */
0ee6b6cf 8200 tracing_off();
cf586b61 8201
7fe70b57 8202 local_irq_save(flags);
151a7331 8203 printk_nmi_direct_enter();
3f5a54e3 8204
38dbe0b1 8205 /* Simulate the iterator */
955b61e5
JW
8206 trace_init_global_iter(&iter);
8207
d769041f 8208 for_each_tracing_cpu(cpu) {
5e2d5ef8 8209 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
8210 }
8211
983f938a 8212 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 8213
b54d3de9 8214 /* don't look at user memory in panic mode */
983f938a 8215 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 8216
cecbca96
FW
8217 switch (oops_dump_mode) {
8218 case DUMP_ALL:
ae3b5093 8219 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8220 break;
8221 case DUMP_ORIG:
8222 iter.cpu_file = raw_smp_processor_id();
8223 break;
8224 case DUMP_NONE:
8225 goto out_enable;
8226 default:
8227 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 8228 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8229 }
8230
8231 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 8232
7fe70b57
SRRH
8233 /* Did function tracer already get disabled? */
8234 if (ftrace_is_dead()) {
8235 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8236 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8237 }
8238
3f5a54e3
SR
8239 /*
8240 * We need to stop all tracing on all CPUS to read the
8241 * the next buffer. This is a bit expensive, but is
8242 * not done often. We fill all what we can read,
8243 * and then release the locks again.
8244 */
8245
3f5a54e3
SR
8246 while (!trace_empty(&iter)) {
8247
8248 if (!cnt)
8249 printk(KERN_TRACE "---------------------------------\n");
8250
8251 cnt++;
8252
8253 /* reset all but tr, trace, and overruns */
8254 memset(&iter.seq, 0,
8255 sizeof(struct trace_iterator) -
8256 offsetof(struct trace_iterator, seq));
8257 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8258 iter.pos = -1;
8259
955b61e5 8260 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8261 int ret;
8262
8263 ret = print_trace_line(&iter);
8264 if (ret != TRACE_TYPE_NO_CONSUME)
8265 trace_consume(&iter);
3f5a54e3 8266 }
b892e5c8 8267 touch_nmi_watchdog();
3f5a54e3
SR
8268
8269 trace_printk_seq(&iter.seq);
8270 }
8271
8272 if (!cnt)
8273 printk(KERN_TRACE " (ftrace buffer empty)\n");
8274 else
8275 printk(KERN_TRACE "---------------------------------\n");
8276
cecbca96 8277 out_enable:
983f938a 8278 tr->trace_flags |= old_userobj;
cf586b61 8279
7fe70b57
SRRH
8280 for_each_tracing_cpu(cpu) {
8281 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8282 }
151a7331
PM
8283 atomic_dec(&dump_running);
8284 printk_nmi_direct_exit();
cd891ae0 8285 local_irq_restore(flags);
3f5a54e3 8286}
a8eecf22 8287EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8288
7e465baa
TZ
8289int trace_run_command(const char *buf, int (*createfn)(int, char **))
8290{
8291 char **argv;
8292 int argc, ret;
8293
8294 argc = 0;
8295 ret = 0;
8296 argv = argv_split(GFP_KERNEL, buf, &argc);
8297 if (!argv)
8298 return -ENOMEM;
8299
8300 if (argc)
8301 ret = createfn(argc, argv);
8302
8303 argv_free(argv);
8304
8305 return ret;
8306}
8307
8308#define WRITE_BUFSIZE 4096
8309
8310ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8311 size_t count, loff_t *ppos,
8312 int (*createfn)(int, char **))
8313{
8314 char *kbuf, *buf, *tmp;
8315 int ret = 0;
8316 size_t done = 0;
8317 size_t size;
8318
8319 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8320 if (!kbuf)
8321 return -ENOMEM;
8322
8323 while (done < count) {
8324 size = count - done;
8325
8326 if (size >= WRITE_BUFSIZE)
8327 size = WRITE_BUFSIZE - 1;
8328
8329 if (copy_from_user(kbuf, buffer + done, size)) {
8330 ret = -EFAULT;
8331 goto out;
8332 }
8333 kbuf[size] = '\0';
8334 buf = kbuf;
8335 do {
8336 tmp = strchr(buf, '\n');
8337 if (tmp) {
8338 *tmp = '\0';
8339 size = tmp - buf + 1;
8340 } else {
8341 size = strlen(buf);
8342 if (done + size < count) {
8343 if (buf != kbuf)
8344 break;
8345 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8346 pr_warn("Line length is too long: Should be less than %d\n",
8347 WRITE_BUFSIZE - 2);
8348 ret = -EINVAL;
8349 goto out;
8350 }
8351 }
8352 done += size;
8353
8354 /* Remove comments */
8355 tmp = strchr(buf, '#');
8356
8357 if (tmp)
8358 *tmp = '\0';
8359
8360 ret = trace_run_command(buf, createfn);
8361 if (ret)
8362 goto out;
8363 buf += size;
8364
8365 } while (done < count);
8366 }
8367 ret = done;
8368
8369out:
8370 kfree(kbuf);
8371
8372 return ret;
8373}
8374
3928a8a2 8375__init static int tracer_alloc_buffers(void)
bc0c38d1 8376{
73c5162a 8377 int ring_buf_size;
9e01c1b7 8378 int ret = -ENOMEM;
4c11d7ae 8379
b5e87c05
SRRH
8380 /*
8381 * Make sure we don't accidently add more trace options
8382 * than we have bits for.
8383 */
9a38a885 8384 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8385
9e01c1b7
RR
8386 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8387 goto out;
8388
ccfe9e42 8389 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8390 goto out_free_buffer_mask;
4c11d7ae 8391
07d777fe
SR
8392 /* Only allocate trace_printk buffers if a trace_printk exists */
8393 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8394 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8395 trace_printk_init_buffers();
8396
73c5162a
SR
8397 /* To save memory, keep the ring buffer size to its minimum */
8398 if (ring_buffer_expanded)
8399 ring_buf_size = trace_buf_size;
8400 else
8401 ring_buf_size = 1;
8402
9e01c1b7 8403 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8404 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8405
2b6080f2
SR
8406 raw_spin_lock_init(&global_trace.start_lock);
8407
b32614c0
SAS
8408 /*
8409 * The prepare callbacks allocates some memory for the ring buffer. We
8410 * don't free the buffer if the if the CPU goes down. If we were to free
8411 * the buffer, then the user would lose any trace that was in the
8412 * buffer. The memory will be removed once the "instance" is removed.
8413 */
8414 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8415 "trace/RB:preapre", trace_rb_cpu_prepare,
8416 NULL);
8417 if (ret < 0)
8418 goto out_free_cpumask;
2c4a33ab 8419 /* Used for event triggers */
147d88e0 8420 ret = -ENOMEM;
2c4a33ab
SRRH
8421 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8422 if (!temp_buffer)
b32614c0 8423 goto out_rm_hp_state;
2c4a33ab 8424
939c7a4f
YY
8425 if (trace_create_savedcmd() < 0)
8426 goto out_free_temp_buffer;
8427
9e01c1b7 8428 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8429 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8430 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8431 WARN_ON(1);
939c7a4f 8432 goto out_free_savedcmd;
4c11d7ae 8433 }
a7603ff4 8434
499e5470
SR
8435 if (global_trace.buffer_disabled)
8436 tracing_off();
4c11d7ae 8437
e1e232ca
SR
8438 if (trace_boot_clock) {
8439 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8440 if (ret < 0)
a395d6a7
JP
8441 pr_warn("Trace clock %s not defined, going back to default\n",
8442 trace_boot_clock);
e1e232ca
SR
8443 }
8444
ca164318
SRRH
8445 /*
8446 * register_tracer() might reference current_trace, so it
8447 * needs to be set before we register anything. This is
8448 * just a bootstrap of current_trace anyway.
8449 */
2b6080f2
SR
8450 global_trace.current_trace = &nop_trace;
8451
0b9b12c1
SRRH
8452 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8453
4104d326
SRRH
8454 ftrace_init_global_array_ops(&global_trace);
8455
9a38a885
SRRH
8456 init_trace_flags_index(&global_trace);
8457
ca164318
SRRH
8458 register_tracer(&nop_trace);
8459
dbeafd0d
SRV
8460 /* Function tracing may start here (via kernel command line) */
8461 init_function_trace();
8462
60a11774
SR
8463 /* All seems OK, enable tracing */
8464 tracing_disabled = 0;
3928a8a2 8465
3f5a54e3
SR
8466 atomic_notifier_chain_register(&panic_notifier_list,
8467 &trace_panic_notifier);
8468
8469 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8470
ae63b31e
SR
8471 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8472
8473 INIT_LIST_HEAD(&global_trace.systems);
8474 INIT_LIST_HEAD(&global_trace.events);
8475 list_add(&global_trace.list, &ftrace_trace_arrays);
8476
a4d1e688 8477 apply_trace_boot_options();
7bcfaf54 8478
77fd5c15
SRRH
8479 register_snapshot_cmd();
8480
2fc1dfbe 8481 return 0;
3f5a54e3 8482
939c7a4f
YY
8483out_free_savedcmd:
8484 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8485out_free_temp_buffer:
8486 ring_buffer_free(temp_buffer);
b32614c0
SAS
8487out_rm_hp_state:
8488 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8489out_free_cpumask:
ccfe9e42 8490 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8491out_free_buffer_mask:
8492 free_cpumask_var(tracing_buffer_mask);
8493out:
8494 return ret;
bc0c38d1 8495}
b2821ae6 8496
e725c731 8497void __init early_trace_init(void)
5f893b26 8498{
0daa2302
SRRH
8499 if (tracepoint_printk) {
8500 tracepoint_print_iter =
8501 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8502 if (WARN_ON(!tracepoint_print_iter))
8503 tracepoint_printk = 0;
42391745
SRRH
8504 else
8505 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8506 }
5f893b26 8507 tracer_alloc_buffers();
e725c731
SRV
8508}
8509
8510void __init trace_init(void)
8511{
0c564a53 8512 trace_event_init();
5f893b26
SRRH
8513}
8514
b2821ae6
SR
8515__init static int clear_boot_tracer(void)
8516{
8517 /*
8518 * The default tracer at boot buffer is an init section.
8519 * This function is called in lateinit. If we did not
8520 * find the boot tracer, then clear it out, to prevent
8521 * later registration from accessing the buffer that is
8522 * about to be freed.
8523 */
8524 if (!default_bootup_tracer)
8525 return 0;
8526
8527 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8528 default_bootup_tracer);
8529 default_bootup_tracer = NULL;
8530
8531 return 0;
8532}
8533
8434dc93 8534fs_initcall(tracer_init_tracefs);
4bb0f0e7 8535late_initcall_sync(clear_boot_tracer);