]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
tracing: Add inter-event hist trigger Documentation
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
76c813e2 28#include <linux/vmalloc.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
478409dd 43#include <linux/trace.h>
8bd75c77 44#include <linux/sched/rt.h>
86387f7e 45
bc0c38d1 46#include "trace.h"
f0868d1e 47#include "trace_output.h"
bc0c38d1 48
73c5162a
SR
49/*
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
55034cd6 53bool ring_buffer_expanded;
73c5162a 54
8e1b82e0
FW
55/*
56 * We need to change this state when a selftest is running.
ff32504f
FW
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
5e1607a0 59 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
60 * at the same time, giving false positive or negative results.
61 */
8e1b82e0 62static bool __read_mostly tracing_selftest_running;
ff32504f 63
b2821ae6
SR
64/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
020e5f85 67bool __read_mostly tracing_selftest_disabled;
b2821ae6 68
0daa2302
SRRH
69/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
42391745 72static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 73
adf9f195
FW
74/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
8c1a49ae
SRRH
79static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
81{
82 return 0;
83}
0f048701 84
7ffbd48d
SR
85/*
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
d914ba37 90static DEFINE_PER_CPU(bool, trace_taskinfo_save);
7ffbd48d 91
0f048701
SR
92/*
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
4fd27358 98static int tracing_disabled = 1;
0f048701 99
955b61e5 100cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 101
944ac425
SR
102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 116 */
cecbca96
FW
117
118enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 119
de7edd31
SRRH
120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
681bec03
JL
123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
23bf8cb8 125struct trace_eval_map_head {
9828413d
SRRH
126 struct module *mod;
127 unsigned long length;
128};
129
23bf8cb8 130union trace_eval_map_item;
9828413d 131
23bf8cb8 132struct trace_eval_map_tail {
9828413d
SRRH
133 /*
134 * "end" is first and points to NULL as it must be different
00f4b652 135 * than "mod" or "eval_string"
9828413d 136 */
23bf8cb8 137 union trace_eval_map_item *next;
9828413d
SRRH
138 const char *end; /* points to NULL */
139};
140
1793ed93 141static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
142
143/*
23bf8cb8 144 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
681bec03 148 * pointer to the next array of saved eval_map items.
9828413d 149 */
23bf8cb8 150union trace_eval_map_item {
00f4b652 151 struct trace_eval_map map;
23bf8cb8
JL
152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
9828413d
SRRH
154};
155
23bf8cb8 156static union trace_eval_map_item *trace_eval_maps;
681bec03 157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 158
607e2ea1 159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 160
ee6c2c1b
LZ
161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 163static char *default_bootup_tracer;
d9e54076 164
55034cd6
SRRH
165static bool allocate_snapshot;
166
1beee96b 167static int __init set_cmdline_ftrace(char *str)
d9e54076 168{
67012ab1 169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 170 default_bootup_tracer = bootup_tracer_buf;
73c5162a 171 /* We are using ftrace early, expand it */
55034cd6 172 ring_buffer_expanded = true;
d9e54076
PZ
173 return 1;
174}
1beee96b 175__setup("ftrace=", set_cmdline_ftrace);
d9e54076 176
944ac425
SR
177static int __init set_ftrace_dump_on_oops(char *str)
178{
cecbca96
FW
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
944ac425
SR
190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 192
de7edd31
SRRH
193static int __init stop_trace_on_warning(char *str)
194{
933ff9f2
LCG
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
de7edd31
SRRH
197 return 1;
198}
933ff9f2 199__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 200
3209cff4 201static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
3209cff4 208__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 209
7bcfaf54
SR
210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
212
213static int __init set_trace_boot_options(char *str)
214{
67012ab1 215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
e1e232ca
SR
220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
0daa2302
SRRH
231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
de7edd31 238
a5a1d1c2 239unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
983f938a
SRRH
246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
16270145
SRRH
254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
20550622
SRRH
258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
1e10486f 260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 261
4fcdae83 262/*
67d04bb2
JF
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
4fcdae83 265 */
983f938a
SRRH
266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
bc0c38d1 269
ae63b31e 270LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 271
ff451961
SRRH
272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
2425bcb9 303int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
0fc1b09f 309 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
310 return 1;
311 }
312
313 return 0;
eb02ce01
TZ
314}
315
76c813e2
SRRH
316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
d8275c45
SR
322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
5a93bae2 365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
d8275c45
SR
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
5cc8976b
SRRH
400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
76c813e2
SRRH
471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
67f20b08
WY
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
76c813e2 516 set_bit(pid, pid_list->pids);
76c813e2
SRRH
517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
76c813e2
SRRH
533 ret = -EINVAL;
534 if (kstrtoul(parser.buffer, 0, &val))
535 break;
536 if (val >= pid_list->pid_max)
537 break;
538
539 pid = (pid_t)val;
540
541 set_bit(pid, pid_list->pids);
542 nr_pids++;
543
544 trace_parser_clear(&parser);
545 ret = 0;
546 }
547 trace_parser_put(&parser);
548
549 if (ret < 0) {
550 trace_free_pid_list(pid_list);
551 return ret;
552 }
553
554 if (!nr_pids) {
555 /* Cleared the list of pids */
556 trace_free_pid_list(pid_list);
557 read = ret;
558 pid_list = NULL;
559 }
560
561 *new_pid_list = pid_list;
562
563 return read;
564}
565
a5a1d1c2 566static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
567{
568 u64 ts;
569
570 /* Early boot up does not have a buffer yet */
9457158b 571 if (!buf->buffer)
37886f6a
SR
572 return trace_clock_local();
573
9457158b
AL
574 ts = ring_buffer_time_stamp(buf->buffer, cpu);
575 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
576
577 return ts;
578}
bc0c38d1 579
a5a1d1c2 580u64 ftrace_now(int cpu)
9457158b
AL
581{
582 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
583}
584
10246fa3
SRRH
585/**
586 * tracing_is_enabled - Show if global_trace has been disabled
587 *
588 * Shows if the global trace has been enabled or not. It uses the
589 * mirror flag "buffer_disabled" to be used in fast paths such as for
590 * the irqsoff tracer. But it may be inaccurate due to races. If you
591 * need to know the accurate state, use tracing_is_on() which is a little
592 * slower, but accurate.
593 */
9036990d
SR
594int tracing_is_enabled(void)
595{
10246fa3
SRRH
596 /*
597 * For quick access (irqsoff uses this in fast path), just
598 * return the mirror variable of the state of the ring buffer.
599 * It's a little racy, but we don't really care.
600 */
601 smp_rmb();
602 return !global_trace.buffer_disabled;
9036990d
SR
603}
604
4fcdae83 605/*
3928a8a2
SR
606 * trace_buf_size is the size in bytes that is allocated
607 * for a buffer. Note, the number of bytes is always rounded
608 * to page size.
3f5a54e3
SR
609 *
610 * This number is purposely set to a low number of 16384.
611 * If the dump on oops happens, it will be much appreciated
612 * to not have to wait for all that output. Anyway this can be
613 * boot time and run time configurable.
4fcdae83 614 */
3928a8a2 615#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 616
3928a8a2 617static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 618
4fcdae83 619/* trace_types holds a link list of available tracers. */
bc0c38d1 620static struct tracer *trace_types __read_mostly;
4fcdae83 621
4fcdae83
SR
622/*
623 * trace_types_lock is used to protect the trace_types list.
4fcdae83 624 */
a8227415 625DEFINE_MUTEX(trace_types_lock);
4fcdae83 626
7e53bd42
LJ
627/*
628 * serialize the access of the ring buffer
629 *
630 * ring buffer serializes readers, but it is low level protection.
631 * The validity of the events (which returns by ring_buffer_peek() ..etc)
632 * are not protected by ring buffer.
633 *
634 * The content of events may become garbage if we allow other process consumes
635 * these events concurrently:
636 * A) the page of the consumed events may become a normal page
637 * (not reader page) in ring buffer, and this page will be rewrited
638 * by events producer.
639 * B) The page of the consumed events may become a page for splice_read,
640 * and this page will be returned to system.
641 *
642 * These primitives allow multi process access to different cpu ring buffer
643 * concurrently.
644 *
645 * These primitives don't distinguish read-only and read-consume access.
646 * Multi read-only access are also serialized.
647 */
648
649#ifdef CONFIG_SMP
650static DECLARE_RWSEM(all_cpu_access_lock);
651static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
652
653static inline void trace_access_lock(int cpu)
654{
ae3b5093 655 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
656 /* gain it for accessing the whole ring buffer. */
657 down_write(&all_cpu_access_lock);
658 } else {
659 /* gain it for accessing a cpu ring buffer. */
660
ae3b5093 661 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
662 down_read(&all_cpu_access_lock);
663
664 /* Secondly block other access to this @cpu ring buffer. */
665 mutex_lock(&per_cpu(cpu_access_lock, cpu));
666 }
667}
668
669static inline void trace_access_unlock(int cpu)
670{
ae3b5093 671 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
672 up_write(&all_cpu_access_lock);
673 } else {
674 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
675 up_read(&all_cpu_access_lock);
676 }
677}
678
679static inline void trace_access_lock_init(void)
680{
681 int cpu;
682
683 for_each_possible_cpu(cpu)
684 mutex_init(&per_cpu(cpu_access_lock, cpu));
685}
686
687#else
688
689static DEFINE_MUTEX(access_lock);
690
691static inline void trace_access_lock(int cpu)
692{
693 (void)cpu;
694 mutex_lock(&access_lock);
695}
696
697static inline void trace_access_unlock(int cpu)
698{
699 (void)cpu;
700 mutex_unlock(&access_lock);
701}
702
703static inline void trace_access_lock_init(void)
704{
705}
706
707#endif
708
d78a4614
SRRH
709#ifdef CONFIG_STACKTRACE
710static void __ftrace_trace_stack(struct ring_buffer *buffer,
711 unsigned long flags,
712 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
713static inline void ftrace_trace_stack(struct trace_array *tr,
714 struct ring_buffer *buffer,
73dddbb5
SRRH
715 unsigned long flags,
716 int skip, int pc, struct pt_regs *regs);
ca475e83 717
d78a4614
SRRH
718#else
719static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
720 unsigned long flags,
721 int skip, int pc, struct pt_regs *regs)
722{
723}
2d34f489
SRRH
724static inline void ftrace_trace_stack(struct trace_array *tr,
725 struct ring_buffer *buffer,
73dddbb5
SRRH
726 unsigned long flags,
727 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
728{
729}
730
d78a4614
SRRH
731#endif
732
3e9a8aad
SRRH
733static __always_inline void
734trace_event_setup(struct ring_buffer_event *event,
735 int type, unsigned long flags, int pc)
736{
737 struct trace_entry *ent = ring_buffer_event_data(event);
738
739 tracing_generic_entry_update(ent, flags, pc);
740 ent->type = type;
741}
742
743static __always_inline struct ring_buffer_event *
744__trace_buffer_lock_reserve(struct ring_buffer *buffer,
745 int type,
746 unsigned long len,
747 unsigned long flags, int pc)
748{
749 struct ring_buffer_event *event;
750
751 event = ring_buffer_lock_reserve(buffer, len);
752 if (event != NULL)
753 trace_event_setup(event, type, flags, pc);
754
755 return event;
756}
757
2290f2c5 758void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
759{
760 if (tr->trace_buffer.buffer)
761 ring_buffer_record_on(tr->trace_buffer.buffer);
762 /*
763 * This flag is looked at when buffers haven't been allocated
764 * yet, or by some tracers (like irqsoff), that just want to
765 * know if the ring buffer has been disabled, but it can handle
766 * races of where it gets disabled but we still do a record.
767 * As the check is in the fast path of the tracers, it is more
768 * important to be fast than accurate.
769 */
770 tr->buffer_disabled = 0;
771 /* Make the flag seen by readers */
772 smp_wmb();
773}
774
499e5470
SR
775/**
776 * tracing_on - enable tracing buffers
777 *
778 * This function enables tracing buffers that may have been
779 * disabled with tracing_off.
780 */
781void tracing_on(void)
782{
10246fa3 783 tracer_tracing_on(&global_trace);
499e5470
SR
784}
785EXPORT_SYMBOL_GPL(tracing_on);
786
52ffabe3
SRRH
787
788static __always_inline void
789__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
790{
d914ba37 791 __this_cpu_write(trace_taskinfo_save, true);
52ffabe3
SRRH
792
793 /* If this is the temp buffer, we need to commit fully */
794 if (this_cpu_read(trace_buffered_event) == event) {
795 /* Length is in event->array[0] */
796 ring_buffer_write(buffer, event->array[0], &event->array[1]);
797 /* Release the temp buffer */
798 this_cpu_dec(trace_buffered_event_cnt);
799 } else
800 ring_buffer_unlock_commit(buffer, event);
801}
802
09ae7234
SRRH
803/**
804 * __trace_puts - write a constant string into the trace buffer.
805 * @ip: The address of the caller
806 * @str: The constant string to write
807 * @size: The size of the string.
808 */
809int __trace_puts(unsigned long ip, const char *str, int size)
810{
811 struct ring_buffer_event *event;
812 struct ring_buffer *buffer;
813 struct print_entry *entry;
814 unsigned long irq_flags;
815 int alloc;
8abfb872
J
816 int pc;
817
983f938a 818 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
819 return 0;
820
8abfb872 821 pc = preempt_count();
09ae7234 822
3132e107
SRRH
823 if (unlikely(tracing_selftest_running || tracing_disabled))
824 return 0;
825
09ae7234
SRRH
826 alloc = sizeof(*entry) + size + 2; /* possible \n added */
827
828 local_save_flags(irq_flags);
829 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
830 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
831 irq_flags, pc);
09ae7234
SRRH
832 if (!event)
833 return 0;
834
835 entry = ring_buffer_event_data(event);
836 entry->ip = ip;
837
838 memcpy(&entry->buf, str, size);
839
840 /* Add a newline if necessary */
841 if (entry->buf[size - 1] != '\n') {
842 entry->buf[size] = '\n';
843 entry->buf[size + 1] = '\0';
844 } else
845 entry->buf[size] = '\0';
846
847 __buffer_unlock_commit(buffer, event);
2d34f489 848 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
849
850 return size;
851}
852EXPORT_SYMBOL_GPL(__trace_puts);
853
854/**
855 * __trace_bputs - write the pointer to a constant string into trace buffer
856 * @ip: The address of the caller
857 * @str: The constant string to write to the buffer to
858 */
859int __trace_bputs(unsigned long ip, const char *str)
860{
861 struct ring_buffer_event *event;
862 struct ring_buffer *buffer;
863 struct bputs_entry *entry;
864 unsigned long irq_flags;
865 int size = sizeof(struct bputs_entry);
8abfb872
J
866 int pc;
867
983f938a 868 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
869 return 0;
870
8abfb872 871 pc = preempt_count();
09ae7234 872
3132e107
SRRH
873 if (unlikely(tracing_selftest_running || tracing_disabled))
874 return 0;
875
09ae7234
SRRH
876 local_save_flags(irq_flags);
877 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
878 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
879 irq_flags, pc);
09ae7234
SRRH
880 if (!event)
881 return 0;
882
883 entry = ring_buffer_event_data(event);
884 entry->ip = ip;
885 entry->str = str;
886
887 __buffer_unlock_commit(buffer, event);
2d34f489 888 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
889
890 return 1;
891}
892EXPORT_SYMBOL_GPL(__trace_bputs);
893
ad909e21 894#ifdef CONFIG_TRACER_SNAPSHOT
cab50379 895static void tracing_snapshot_instance(struct trace_array *tr)
ad909e21 896{
ad909e21
SRRH
897 struct tracer *tracer = tr->current_trace;
898 unsigned long flags;
899
1b22e382
SRRH
900 if (in_nmi()) {
901 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
902 internal_trace_puts("*** snapshot is being ignored ***\n");
903 return;
904 }
905
ad909e21 906 if (!tr->allocated_snapshot) {
ca268da6
SRRH
907 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
908 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
909 tracing_off();
910 return;
911 }
912
913 /* Note, snapshot can not be used when the tracer uses it */
914 if (tracer->use_max_tr) {
ca268da6
SRRH
915 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
916 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
917 return;
918 }
919
920 local_irq_save(flags);
921 update_max_tr(tr, current, smp_processor_id());
922 local_irq_restore(flags);
923}
cab50379
SRV
924
925/**
5a93bae2 926 * tracing_snapshot - take a snapshot of the current buffer.
cab50379
SRV
927 *
928 * This causes a swap between the snapshot buffer and the current live
929 * tracing buffer. You can use this to take snapshots of the live
930 * trace when some condition is triggered, but continue to trace.
931 *
932 * Note, make sure to allocate the snapshot with either
933 * a tracing_snapshot_alloc(), or by doing it manually
934 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
935 *
936 * If the snapshot buffer is not allocated, it will stop tracing.
937 * Basically making a permanent snapshot.
938 */
939void tracing_snapshot(void)
940{
941 struct trace_array *tr = &global_trace;
942
943 tracing_snapshot_instance(tr);
944}
1b22e382 945EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
946
947static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
948 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
949static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
950
951static int alloc_snapshot(struct trace_array *tr)
952{
953 int ret;
954
955 if (!tr->allocated_snapshot) {
956
957 /* allocate spare buffer */
958 ret = resize_buffer_duplicate_size(&tr->max_buffer,
959 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
960 if (ret < 0)
961 return ret;
962
963 tr->allocated_snapshot = true;
964 }
965
966 return 0;
967}
968
ad1438a0 969static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
970{
971 /*
972 * We don't free the ring buffer. instead, resize it because
973 * The max_tr ring buffer has some state (e.g. ring->clock) and
974 * we want preserve it.
975 */
976 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
977 set_buffer_entries(&tr->max_buffer, 1);
978 tracing_reset_online_cpus(&tr->max_buffer);
979 tr->allocated_snapshot = false;
980}
ad909e21 981
93e31ffb
TZ
982/**
983 * tracing_alloc_snapshot - allocate snapshot buffer.
984 *
985 * This only allocates the snapshot buffer if it isn't already
986 * allocated - it doesn't also take a snapshot.
987 *
988 * This is meant to be used in cases where the snapshot buffer needs
989 * to be set up for events that can't sleep but need to be able to
990 * trigger a snapshot.
991 */
992int tracing_alloc_snapshot(void)
993{
994 struct trace_array *tr = &global_trace;
995 int ret;
996
997 ret = alloc_snapshot(tr);
998 WARN_ON(ret < 0);
999
1000 return ret;
1001}
1002EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1003
ad909e21 1004/**
5a93bae2 1005 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
ad909e21 1006 *
5a93bae2 1007 * This is similar to tracing_snapshot(), but it will allocate the
ad909e21
SRRH
1008 * snapshot buffer if it isn't already allocated. Use this only
1009 * where it is safe to sleep, as the allocation may sleep.
1010 *
1011 * This causes a swap between the snapshot buffer and the current live
1012 * tracing buffer. You can use this to take snapshots of the live
1013 * trace when some condition is triggered, but continue to trace.
1014 */
1015void tracing_snapshot_alloc(void)
1016{
ad909e21
SRRH
1017 int ret;
1018
93e31ffb
TZ
1019 ret = tracing_alloc_snapshot();
1020 if (ret < 0)
3209cff4 1021 return;
ad909e21
SRRH
1022
1023 tracing_snapshot();
1024}
1b22e382 1025EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1026#else
1027void tracing_snapshot(void)
1028{
1029 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1030}
1b22e382 1031EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1032int tracing_alloc_snapshot(void)
1033{
1034 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1035 return -ENODEV;
1036}
1037EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1038void tracing_snapshot_alloc(void)
1039{
1040 /* Give warning */
1041 tracing_snapshot();
1042}
1b22e382 1043EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1044#endif /* CONFIG_TRACER_SNAPSHOT */
1045
2290f2c5 1046void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1047{
1048 if (tr->trace_buffer.buffer)
1049 ring_buffer_record_off(tr->trace_buffer.buffer);
1050 /*
1051 * This flag is looked at when buffers haven't been allocated
1052 * yet, or by some tracers (like irqsoff), that just want to
1053 * know if the ring buffer has been disabled, but it can handle
1054 * races of where it gets disabled but we still do a record.
1055 * As the check is in the fast path of the tracers, it is more
1056 * important to be fast than accurate.
1057 */
1058 tr->buffer_disabled = 1;
1059 /* Make the flag seen by readers */
1060 smp_wmb();
1061}
1062
499e5470
SR
1063/**
1064 * tracing_off - turn off tracing buffers
1065 *
1066 * This function stops the tracing buffers from recording data.
1067 * It does not disable any overhead the tracers themselves may
1068 * be causing. This function simply causes all recording to
1069 * the ring buffers to fail.
1070 */
1071void tracing_off(void)
1072{
10246fa3 1073 tracer_tracing_off(&global_trace);
499e5470
SR
1074}
1075EXPORT_SYMBOL_GPL(tracing_off);
1076
de7edd31
SRRH
1077void disable_trace_on_warning(void)
1078{
1079 if (__disable_trace_on_warning)
1080 tracing_off();
1081}
1082
10246fa3
SRRH
1083/**
1084 * tracer_tracing_is_on - show real state of ring buffer enabled
1085 * @tr : the trace array to know if ring buffer is enabled
1086 *
1087 * Shows real state of the ring buffer if it is enabled or not.
1088 */
e7c15cd8 1089int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1090{
1091 if (tr->trace_buffer.buffer)
1092 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1093 return !tr->buffer_disabled;
1094}
1095
499e5470
SR
1096/**
1097 * tracing_is_on - show state of ring buffers enabled
1098 */
1099int tracing_is_on(void)
1100{
10246fa3 1101 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1102}
1103EXPORT_SYMBOL_GPL(tracing_is_on);
1104
3928a8a2 1105static int __init set_buf_size(char *str)
bc0c38d1 1106{
3928a8a2 1107 unsigned long buf_size;
c6caeeb1 1108
bc0c38d1
SR
1109 if (!str)
1110 return 0;
9d612bef 1111 buf_size = memparse(str, &str);
c6caeeb1 1112 /* nr_entries can not be zero */
9d612bef 1113 if (buf_size == 0)
c6caeeb1 1114 return 0;
3928a8a2 1115 trace_buf_size = buf_size;
bc0c38d1
SR
1116 return 1;
1117}
3928a8a2 1118__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1119
0e950173
TB
1120static int __init set_tracing_thresh(char *str)
1121{
87abb3b1 1122 unsigned long threshold;
0e950173
TB
1123 int ret;
1124
1125 if (!str)
1126 return 0;
bcd83ea6 1127 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1128 if (ret < 0)
1129 return 0;
87abb3b1 1130 tracing_thresh = threshold * 1000;
0e950173
TB
1131 return 1;
1132}
1133__setup("tracing_thresh=", set_tracing_thresh);
1134
57f50be1
SR
1135unsigned long nsecs_to_usecs(unsigned long nsecs)
1136{
1137 return nsecs / 1000;
1138}
1139
a3418a36
SRRH
1140/*
1141 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1142 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1143 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1144 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1145 */
1146#undef C
1147#define C(a, b) b
1148
4fcdae83 1149/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1150static const char *trace_options[] = {
a3418a36 1151 TRACE_FLAGS
bc0c38d1
SR
1152 NULL
1153};
1154
5079f326
Z
1155static struct {
1156 u64 (*func)(void);
1157 const char *name;
8be0709f 1158 int in_ns; /* is this clock in nanoseconds? */
5079f326 1159} trace_clocks[] = {
1b3e5c09
TG
1160 { trace_clock_local, "local", 1 },
1161 { trace_clock_global, "global", 1 },
1162 { trace_clock_counter, "counter", 0 },
e7fda6c4 1163 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1164 { trace_clock, "perf", 1 },
1165 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1166 { ktime_get_raw_fast_ns, "mono_raw", 1 },
80ec3552 1167 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1168 ARCH_TRACE_CLOCKS
5079f326
Z
1169};
1170
860f9f6b
TZ
1171bool trace_clock_in_ns(struct trace_array *tr)
1172{
1173 if (trace_clocks[tr->clock_id].in_ns)
1174 return true;
1175
1176 return false;
1177}
1178
b63f39ea 1179/*
1180 * trace_parser_get_init - gets the buffer for trace parser
1181 */
1182int trace_parser_get_init(struct trace_parser *parser, int size)
1183{
1184 memset(parser, 0, sizeof(*parser));
1185
1186 parser->buffer = kmalloc(size, GFP_KERNEL);
1187 if (!parser->buffer)
1188 return 1;
1189
1190 parser->size = size;
1191 return 0;
1192}
1193
1194/*
1195 * trace_parser_put - frees the buffer for trace parser
1196 */
1197void trace_parser_put(struct trace_parser *parser)
1198{
1199 kfree(parser->buffer);
0e684b65 1200 parser->buffer = NULL;
b63f39ea 1201}
1202
1203/*
1204 * trace_get_user - reads the user input string separated by space
1205 * (matched by isspace(ch))
1206 *
1207 * For each string found the 'struct trace_parser' is updated,
1208 * and the function returns.
1209 *
1210 * Returns number of bytes read.
1211 *
1212 * See kernel/trace/trace.h for 'struct trace_parser' details.
1213 */
1214int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1215 size_t cnt, loff_t *ppos)
1216{
1217 char ch;
1218 size_t read = 0;
1219 ssize_t ret;
1220
1221 if (!*ppos)
1222 trace_parser_clear(parser);
1223
1224 ret = get_user(ch, ubuf++);
1225 if (ret)
1226 goto out;
1227
1228 read++;
1229 cnt--;
1230
1231 /*
1232 * The parser is not finished with the last write,
1233 * continue reading the user input without skipping spaces.
1234 */
1235 if (!parser->cont) {
1236 /* skip white space */
1237 while (cnt && isspace(ch)) {
1238 ret = get_user(ch, ubuf++);
1239 if (ret)
1240 goto out;
1241 read++;
1242 cnt--;
1243 }
1244
76638d96
CD
1245 parser->idx = 0;
1246
b63f39ea 1247 /* only spaces were written */
921a7acd 1248 if (isspace(ch) || !ch) {
b63f39ea 1249 *ppos += read;
1250 ret = read;
1251 goto out;
1252 }
b63f39ea 1253 }
1254
1255 /* read the non-space input */
921a7acd 1256 while (cnt && !isspace(ch) && ch) {
3c235a33 1257 if (parser->idx < parser->size - 1)
b63f39ea 1258 parser->buffer[parser->idx++] = ch;
1259 else {
1260 ret = -EINVAL;
1261 goto out;
1262 }
1263 ret = get_user(ch, ubuf++);
1264 if (ret)
1265 goto out;
1266 read++;
1267 cnt--;
1268 }
1269
1270 /* We either got finished input or we have to wait for another call. */
921a7acd 1271 if (isspace(ch) || !ch) {
b63f39ea 1272 parser->buffer[parser->idx] = 0;
1273 parser->cont = false;
057db848 1274 } else if (parser->idx < parser->size - 1) {
b63f39ea 1275 parser->cont = true;
1276 parser->buffer[parser->idx++] = ch;
f4d0706c
CD
1277 /* Make sure the parsed string always terminates with '\0'. */
1278 parser->buffer[parser->idx] = 0;
057db848
SR
1279 } else {
1280 ret = -EINVAL;
1281 goto out;
b63f39ea 1282 }
1283
1284 *ppos += read;
1285 ret = read;
1286
1287out:
1288 return ret;
1289}
1290
3a161d99 1291/* TODO add a seq_buf_to_buffer() */
b8b94265 1292static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1293{
1294 int len;
3c56819b 1295
5ac48378 1296 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1297 return -EBUSY;
1298
5ac48378 1299 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1300 if (cnt > len)
1301 cnt = len;
3a161d99 1302 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1303
3a161d99 1304 s->seq.readpos += cnt;
3c56819b
EGM
1305 return cnt;
1306}
1307
0e950173
TB
1308unsigned long __read_mostly tracing_thresh;
1309
5d4a9dba 1310#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1311/*
1312 * Copy the new maximum trace into the separate maximum-trace
1313 * structure. (this way the maximum trace is permanently saved,
5a93bae2 1314 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
5d4a9dba
SR
1315 */
1316static void
1317__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1318{
12883efb
SRRH
1319 struct trace_buffer *trace_buf = &tr->trace_buffer;
1320 struct trace_buffer *max_buf = &tr->max_buffer;
1321 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1322 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1323
12883efb
SRRH
1324 max_buf->cpu = cpu;
1325 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1326
6d9b3fa5 1327 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1328 max_data->critical_start = data->critical_start;
1329 max_data->critical_end = data->critical_end;
5d4a9dba 1330
1acaa1b2 1331 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1332 max_data->pid = tsk->pid;
f17a5194
SRRH
1333 /*
1334 * If tsk == current, then use current_uid(), as that does not use
1335 * RCU. The irq tracer can be called out of RCU scope.
1336 */
1337 if (tsk == current)
1338 max_data->uid = current_uid();
1339 else
1340 max_data->uid = task_uid(tsk);
1341
8248ac05
SR
1342 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1343 max_data->policy = tsk->policy;
1344 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1345
1346 /* record this tasks comm */
1347 tracing_record_cmdline(tsk);
1348}
1349
4fcdae83
SR
1350/**
1351 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1352 * @tr: tracer
1353 * @tsk: the task with the latency
1354 * @cpu: The cpu that initiated the trace.
1355 *
1356 * Flip the buffers between the @tr and the max_tr and record information
1357 * about which task was the cause of this latency.
1358 */
e309b41d 1359void
bc0c38d1
SR
1360update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1361{
2721e72d 1362 struct ring_buffer *buf;
bc0c38d1 1363
2b6080f2 1364 if (tr->stop_count)
b8de7bd1
SR
1365 return;
1366
4c11d7ae 1367 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1368
45ad21ca 1369 if (!tr->allocated_snapshot) {
debdd57f 1370 /* Only the nop tracer should hit this when disabling */
2b6080f2 1371 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1372 return;
debdd57f 1373 }
34600f0e 1374
0b9b12c1 1375 arch_spin_lock(&tr->max_lock);
3928a8a2 1376
12883efb
SRRH
1377 buf = tr->trace_buffer.buffer;
1378 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1379 tr->max_buffer.buffer = buf;
3928a8a2 1380
bc0c38d1 1381 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1382 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1383}
1384
1385/**
1386 * update_max_tr_single - only copy one trace over, and reset the rest
1387 * @tr - tracer
1388 * @tsk - task with the latency
1389 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1390 *
1391 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1392 */
e309b41d 1393void
bc0c38d1
SR
1394update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1395{
3928a8a2 1396 int ret;
bc0c38d1 1397
2b6080f2 1398 if (tr->stop_count)
b8de7bd1
SR
1399 return;
1400
4c11d7ae 1401 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1402 if (!tr->allocated_snapshot) {
2930e04d 1403 /* Only the nop tracer should hit this when disabling */
9e8529af 1404 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1405 return;
2930e04d 1406 }
ef710e10 1407
0b9b12c1 1408 arch_spin_lock(&tr->max_lock);
bc0c38d1 1409
12883efb 1410 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1411
e8165dbb
SR
1412 if (ret == -EBUSY) {
1413 /*
1414 * We failed to swap the buffer due to a commit taking
1415 * place on this CPU. We fail to record, but we reset
1416 * the max trace buffer (no one writes directly to it)
1417 * and flag that it failed.
1418 */
12883efb 1419 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1420 "Failed to swap buffers due to commit in progress\n");
1421 }
1422
e8165dbb 1423 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1424
1425 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1426 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1427}
5d4a9dba 1428#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1429
e30f53aa 1430static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1431{
15693458
SRRH
1432 /* Iterators are static, they should be filled or empty */
1433 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1434 return 0;
0d5c6e1c 1435
e30f53aa
RV
1436 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1437 full);
0d5c6e1c
SR
1438}
1439
f4e781c0 1440#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1441static bool selftests_can_run;
1442
1443struct trace_selftests {
1444 struct list_head list;
1445 struct tracer *type;
1446};
1447
1448static LIST_HEAD(postponed_selftests);
1449
1450static int save_selftest(struct tracer *type)
1451{
1452 struct trace_selftests *selftest;
1453
1454 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1455 if (!selftest)
1456 return -ENOMEM;
1457
1458 selftest->type = type;
1459 list_add(&selftest->list, &postponed_selftests);
1460 return 0;
1461}
1462
f4e781c0
SRRH
1463static int run_tracer_selftest(struct tracer *type)
1464{
1465 struct trace_array *tr = &global_trace;
1466 struct tracer *saved_tracer = tr->current_trace;
1467 int ret;
0d5c6e1c 1468
f4e781c0
SRRH
1469 if (!type->selftest || tracing_selftest_disabled)
1470 return 0;
0d5c6e1c 1471
9afecfbb
SRV
1472 /*
1473 * If a tracer registers early in boot up (before scheduling is
1474 * initialized and such), then do not run its selftests yet.
1475 * Instead, run it a little later in the boot process.
1476 */
1477 if (!selftests_can_run)
1478 return save_selftest(type);
1479
0d5c6e1c 1480 /*
f4e781c0
SRRH
1481 * Run a selftest on this tracer.
1482 * Here we reset the trace buffer, and set the current
1483 * tracer to be this tracer. The tracer can then run some
1484 * internal tracing to verify that everything is in order.
1485 * If we fail, we do not register this tracer.
0d5c6e1c 1486 */
f4e781c0 1487 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1488
f4e781c0
SRRH
1489 tr->current_trace = type;
1490
1491#ifdef CONFIG_TRACER_MAX_TRACE
1492 if (type->use_max_tr) {
1493 /* If we expanded the buffers, make sure the max is expanded too */
1494 if (ring_buffer_expanded)
1495 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1496 RING_BUFFER_ALL_CPUS);
1497 tr->allocated_snapshot = true;
1498 }
1499#endif
1500
1501 /* the test is responsible for initializing and enabling */
1502 pr_info("Testing tracer %s: ", type->name);
1503 ret = type->selftest(type, tr);
1504 /* the test is responsible for resetting too */
1505 tr->current_trace = saved_tracer;
1506 if (ret) {
1507 printk(KERN_CONT "FAILED!\n");
1508 /* Add the warning after printing 'FAILED' */
1509 WARN_ON(1);
1510 return -1;
1511 }
1512 /* Only reset on passing, to avoid touching corrupted buffers */
1513 tracing_reset_online_cpus(&tr->trace_buffer);
1514
1515#ifdef CONFIG_TRACER_MAX_TRACE
1516 if (type->use_max_tr) {
1517 tr->allocated_snapshot = false;
0d5c6e1c 1518
f4e781c0
SRRH
1519 /* Shrink the max buffer again */
1520 if (ring_buffer_expanded)
1521 ring_buffer_resize(tr->max_buffer.buffer, 1,
1522 RING_BUFFER_ALL_CPUS);
1523 }
1524#endif
1525
1526 printk(KERN_CONT "PASSED\n");
1527 return 0;
1528}
9afecfbb
SRV
1529
1530static __init int init_trace_selftests(void)
1531{
1532 struct trace_selftests *p, *n;
1533 struct tracer *t, **last;
1534 int ret;
1535
1536 selftests_can_run = true;
1537
1538 mutex_lock(&trace_types_lock);
1539
1540 if (list_empty(&postponed_selftests))
1541 goto out;
1542
1543 pr_info("Running postponed tracer tests:\n");
1544
1545 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1546 ret = run_tracer_selftest(p->type);
1547 /* If the test fails, then warn and remove from available_tracers */
1548 if (ret < 0) {
1549 WARN(1, "tracer: %s failed selftest, disabling\n",
1550 p->type->name);
1551 last = &trace_types;
1552 for (t = trace_types; t; t = t->next) {
1553 if (t == p->type) {
1554 *last = t->next;
1555 break;
1556 }
1557 last = &t->next;
1558 }
1559 }
1560 list_del(&p->list);
1561 kfree(p);
1562 }
1563
1564 out:
1565 mutex_unlock(&trace_types_lock);
1566
1567 return 0;
1568}
b9ef0326 1569core_initcall(init_trace_selftests);
f4e781c0
SRRH
1570#else
1571static inline int run_tracer_selftest(struct tracer *type)
1572{
1573 return 0;
0d5c6e1c 1574}
f4e781c0 1575#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1576
41d9c0be
SRRH
1577static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1578
a4d1e688
JW
1579static void __init apply_trace_boot_options(void);
1580
4fcdae83
SR
1581/**
1582 * register_tracer - register a tracer with the ftrace system.
1583 * @type - the plugin for the tracer
1584 *
1585 * Register a new plugin tracer.
1586 */
a4d1e688 1587int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1588{
1589 struct tracer *t;
bc0c38d1
SR
1590 int ret = 0;
1591
1592 if (!type->name) {
1593 pr_info("Tracer must have a name\n");
1594 return -1;
1595 }
1596
24a461d5 1597 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1598 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1599 return -1;
1600 }
1601
bc0c38d1 1602 mutex_lock(&trace_types_lock);
86fa2f60 1603
8e1b82e0
FW
1604 tracing_selftest_running = true;
1605
bc0c38d1
SR
1606 for (t = trace_types; t; t = t->next) {
1607 if (strcmp(type->name, t->name) == 0) {
1608 /* already found */
ee6c2c1b 1609 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1610 type->name);
1611 ret = -1;
1612 goto out;
1613 }
1614 }
1615
adf9f195
FW
1616 if (!type->set_flag)
1617 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1618 if (!type->flags) {
1619 /*allocate a dummy tracer_flags*/
1620 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1621 if (!type->flags) {
1622 ret = -ENOMEM;
1623 goto out;
1624 }
d39cdd20
CH
1625 type->flags->val = 0;
1626 type->flags->opts = dummy_tracer_opt;
1627 } else
adf9f195
FW
1628 if (!type->flags->opts)
1629 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1630
d39cdd20
CH
1631 /* store the tracer for __set_tracer_option */
1632 type->flags->trace = type;
1633
f4e781c0
SRRH
1634 ret = run_tracer_selftest(type);
1635 if (ret < 0)
1636 goto out;
60a11774 1637
bc0c38d1
SR
1638 type->next = trace_types;
1639 trace_types = type;
41d9c0be 1640 add_tracer_options(&global_trace, type);
60a11774 1641
bc0c38d1 1642 out:
8e1b82e0 1643 tracing_selftest_running = false;
bc0c38d1
SR
1644 mutex_unlock(&trace_types_lock);
1645
dac74940
SR
1646 if (ret || !default_bootup_tracer)
1647 goto out_unlock;
1648
ee6c2c1b 1649 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1650 goto out_unlock;
1651
1652 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1653 /* Do we want this tracer to start on bootup? */
607e2ea1 1654 tracing_set_tracer(&global_trace, type->name);
dac74940 1655 default_bootup_tracer = NULL;
a4d1e688
JW
1656
1657 apply_trace_boot_options();
1658
dac74940 1659 /* disable other selftests, since this will break it. */
55034cd6 1660 tracing_selftest_disabled = true;
b2821ae6 1661#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1662 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1663 type->name);
b2821ae6 1664#endif
b2821ae6 1665
dac74940 1666 out_unlock:
bc0c38d1
SR
1667 return ret;
1668}
1669
12883efb 1670void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1671{
12883efb 1672 struct ring_buffer *buffer = buf->buffer;
f633903a 1673
a5416411
HT
1674 if (!buffer)
1675 return;
1676
f633903a
SR
1677 ring_buffer_record_disable(buffer);
1678
1679 /* Make sure all commits have finished */
1680 synchronize_sched();
68179686 1681 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1682
1683 ring_buffer_record_enable(buffer);
1684}
1685
12883efb 1686void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1687{
12883efb 1688 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1689 int cpu;
1690
a5416411
HT
1691 if (!buffer)
1692 return;
1693
621968cd
SR
1694 ring_buffer_record_disable(buffer);
1695
1696 /* Make sure all commits have finished */
1697 synchronize_sched();
1698
9457158b 1699 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1700
1701 for_each_online_cpu(cpu)
68179686 1702 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1703
1704 ring_buffer_record_enable(buffer);
213cc060
PE
1705}
1706
09d8091c 1707/* Must have trace_types_lock held */
873c642f 1708void tracing_reset_all_online_cpus(void)
9456f0fa 1709{
873c642f
SRRH
1710 struct trace_array *tr;
1711
873c642f 1712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
065e63f9
SRV
1713 if (!tr->clear_trace)
1714 continue;
1715 tr->clear_trace = false;
12883efb
SRRH
1716 tracing_reset_online_cpus(&tr->trace_buffer);
1717#ifdef CONFIG_TRACER_MAX_TRACE
1718 tracing_reset_online_cpus(&tr->max_buffer);
1719#endif
873c642f 1720 }
9456f0fa
SR
1721}
1722
d914ba37
JF
1723static int *tgid_map;
1724
939c7a4f 1725#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1726#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1727static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1728struct saved_cmdlines_buffer {
1729 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1730 unsigned *map_cmdline_to_pid;
1731 unsigned cmdline_num;
1732 int cmdline_idx;
1733 char *saved_cmdlines;
1734};
1735static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1736
25b0b44a 1737/* temporary disable recording */
d914ba37 1738static atomic_t trace_record_taskinfo_disabled __read_mostly;
bc0c38d1 1739
939c7a4f
YY
1740static inline char *get_saved_cmdlines(int idx)
1741{
1742 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1743}
1744
1745static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1746{
939c7a4f
YY
1747 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1748}
1749
1750static int allocate_cmdlines_buffer(unsigned int val,
1751 struct saved_cmdlines_buffer *s)
1752{
1753 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1754 GFP_KERNEL);
1755 if (!s->map_cmdline_to_pid)
1756 return -ENOMEM;
1757
1758 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1759 if (!s->saved_cmdlines) {
1760 kfree(s->map_cmdline_to_pid);
1761 return -ENOMEM;
1762 }
1763
1764 s->cmdline_idx = 0;
1765 s->cmdline_num = val;
1766 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1767 sizeof(s->map_pid_to_cmdline));
1768 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1769 val * sizeof(*s->map_cmdline_to_pid));
1770
1771 return 0;
1772}
1773
1774static int trace_create_savedcmd(void)
1775{
1776 int ret;
1777
a6af8fbf 1778 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1779 if (!savedcmd)
1780 return -ENOMEM;
1781
1782 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1783 if (ret < 0) {
1784 kfree(savedcmd);
1785 savedcmd = NULL;
1786 return -ENOMEM;
1787 }
1788
1789 return 0;
bc0c38d1
SR
1790}
1791
b5130b1e
CE
1792int is_tracing_stopped(void)
1793{
2b6080f2 1794 return global_trace.stop_count;
b5130b1e
CE
1795}
1796
0f048701
SR
1797/**
1798 * tracing_start - quick start of the tracer
1799 *
1800 * If tracing is enabled but was stopped by tracing_stop,
1801 * this will start the tracer back up.
1802 */
1803void tracing_start(void)
1804{
1805 struct ring_buffer *buffer;
1806 unsigned long flags;
1807
1808 if (tracing_disabled)
1809 return;
1810
2b6080f2
SR
1811 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1812 if (--global_trace.stop_count) {
1813 if (global_trace.stop_count < 0) {
b06a8301
SR
1814 /* Someone screwed up their debugging */
1815 WARN_ON_ONCE(1);
2b6080f2 1816 global_trace.stop_count = 0;
b06a8301 1817 }
0f048701
SR
1818 goto out;
1819 }
1820
a2f80714 1821 /* Prevent the buffers from switching */
0b9b12c1 1822 arch_spin_lock(&global_trace.max_lock);
0f048701 1823
12883efb 1824 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1825 if (buffer)
1826 ring_buffer_record_enable(buffer);
1827
12883efb
SRRH
1828#ifdef CONFIG_TRACER_MAX_TRACE
1829 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1830 if (buffer)
1831 ring_buffer_record_enable(buffer);
12883efb 1832#endif
0f048701 1833
0b9b12c1 1834 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1835
0f048701 1836 out:
2b6080f2
SR
1837 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1838}
1839
1840static void tracing_start_tr(struct trace_array *tr)
1841{
1842 struct ring_buffer *buffer;
1843 unsigned long flags;
1844
1845 if (tracing_disabled)
1846 return;
1847
1848 /* If global, we need to also start the max tracer */
1849 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1850 return tracing_start();
1851
1852 raw_spin_lock_irqsave(&tr->start_lock, flags);
1853
1854 if (--tr->stop_count) {
1855 if (tr->stop_count < 0) {
1856 /* Someone screwed up their debugging */
1857 WARN_ON_ONCE(1);
1858 tr->stop_count = 0;
1859 }
1860 goto out;
1861 }
1862
12883efb 1863 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1864 if (buffer)
1865 ring_buffer_record_enable(buffer);
1866
1867 out:
1868 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1869}
1870
1871/**
1872 * tracing_stop - quick stop of the tracer
1873 *
1874 * Light weight way to stop tracing. Use in conjunction with
1875 * tracing_start.
1876 */
1877void tracing_stop(void)
1878{
1879 struct ring_buffer *buffer;
1880 unsigned long flags;
1881
2b6080f2
SR
1882 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1883 if (global_trace.stop_count++)
0f048701
SR
1884 goto out;
1885
a2f80714 1886 /* Prevent the buffers from switching */
0b9b12c1 1887 arch_spin_lock(&global_trace.max_lock);
a2f80714 1888
12883efb 1889 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1890 if (buffer)
1891 ring_buffer_record_disable(buffer);
1892
12883efb
SRRH
1893#ifdef CONFIG_TRACER_MAX_TRACE
1894 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1895 if (buffer)
1896 ring_buffer_record_disable(buffer);
12883efb 1897#endif
0f048701 1898
0b9b12c1 1899 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1900
0f048701 1901 out:
2b6080f2
SR
1902 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1903}
1904
1905static void tracing_stop_tr(struct trace_array *tr)
1906{
1907 struct ring_buffer *buffer;
1908 unsigned long flags;
1909
1910 /* If global, we need to also stop the max tracer */
1911 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1912 return tracing_stop();
1913
1914 raw_spin_lock_irqsave(&tr->start_lock, flags);
1915 if (tr->stop_count++)
1916 goto out;
1917
12883efb 1918 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1919 if (buffer)
1920 ring_buffer_record_disable(buffer);
1921
1922 out:
1923 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1924}
1925
379cfdac 1926static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1927{
a635cf04 1928 unsigned pid, idx;
bc0c38d1 1929
eaf260ac
JF
1930 /* treat recording of idle task as a success */
1931 if (!tsk->pid)
1932 return 1;
1933
1934 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1935 return 0;
bc0c38d1
SR
1936
1937 /*
1938 * It's not the end of the world if we don't get
1939 * the lock, but we also don't want to spin
1940 * nor do we want to disable interrupts,
1941 * so if we miss here, then better luck next time.
1942 */
0199c4e6 1943 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1944 return 0;
bc0c38d1 1945
939c7a4f 1946 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1947 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1948 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1949
a635cf04
CE
1950 /*
1951 * Check whether the cmdline buffer at idx has a pid
1952 * mapped. We are going to overwrite that entry so we
1953 * need to clear the map_pid_to_cmdline. Otherwise we
1954 * would read the new comm for the old pid.
1955 */
939c7a4f 1956 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1957 if (pid != NO_CMDLINE_MAP)
939c7a4f 1958 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1959
939c7a4f
YY
1960 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1961 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1962
939c7a4f 1963 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1964 }
1965
939c7a4f 1966 set_cmdline(idx, tsk->comm);
bc0c38d1 1967
0199c4e6 1968 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1969
1970 return 1;
bc0c38d1
SR
1971}
1972
4c27e756 1973static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1974{
bc0c38d1
SR
1975 unsigned map;
1976
4ca53085
SR
1977 if (!pid) {
1978 strcpy(comm, "<idle>");
1979 return;
1980 }
bc0c38d1 1981
74bf4076
SR
1982 if (WARN_ON_ONCE(pid < 0)) {
1983 strcpy(comm, "<XXX>");
1984 return;
1985 }
1986
4ca53085
SR
1987 if (pid > PID_MAX_DEFAULT) {
1988 strcpy(comm, "<...>");
1989 return;
1990 }
bc0c38d1 1991
939c7a4f 1992 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1993 if (map != NO_CMDLINE_MAP)
e09e2867 1994 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
1995 else
1996 strcpy(comm, "<...>");
4c27e756
SRRH
1997}
1998
1999void trace_find_cmdline(int pid, char comm[])
2000{
2001 preempt_disable();
2002 arch_spin_lock(&trace_cmdline_lock);
2003
2004 __trace_find_cmdline(pid, comm);
bc0c38d1 2005
0199c4e6 2006 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 2007 preempt_enable();
bc0c38d1
SR
2008}
2009
d914ba37
JF
2010int trace_find_tgid(int pid)
2011{
2012 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2013 return 0;
2014
2015 return tgid_map[pid];
2016}
2017
2018static int trace_save_tgid(struct task_struct *tsk)
2019{
bd45d34d
JF
2020 /* treat recording of idle task as a success */
2021 if (!tsk->pid)
2022 return 1;
2023
2024 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
d914ba37
JF
2025 return 0;
2026
2027 tgid_map[tsk->pid] = tsk->tgid;
2028 return 1;
2029}
2030
2031static bool tracing_record_taskinfo_skip(int flags)
2032{
2033 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2034 return true;
2035 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2036 return true;
2037 if (!__this_cpu_read(trace_taskinfo_save))
2038 return true;
2039 return false;
2040}
2041
2042/**
2043 * tracing_record_taskinfo - record the task info of a task
2044 *
2045 * @task - task to record
2046 * @flags - TRACE_RECORD_CMDLINE for recording comm
2047 * - TRACE_RECORD_TGID for recording tgid
2048 */
2049void tracing_record_taskinfo(struct task_struct *task, int flags)
2050{
29b1a8ad
JF
2051 bool done;
2052
d914ba37
JF
2053 if (tracing_record_taskinfo_skip(flags))
2054 return;
29b1a8ad
JF
2055
2056 /*
2057 * Record as much task information as possible. If some fail, continue
2058 * to try to record the others.
2059 */
2060 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2061 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2062
2063 /* If recording any information failed, retry again soon. */
2064 if (!done)
d914ba37
JF
2065 return;
2066
2067 __this_cpu_write(trace_taskinfo_save, false);
2068}
2069
2070/**
2071 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2072 *
2073 * @prev - previous task during sched_switch
2074 * @next - next task during sched_switch
2075 * @flags - TRACE_RECORD_CMDLINE for recording comm
2076 * TRACE_RECORD_TGID for recording tgid
2077 */
2078void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2079 struct task_struct *next, int flags)
bc0c38d1 2080{
29b1a8ad
JF
2081 bool done;
2082
d914ba37
JF
2083 if (tracing_record_taskinfo_skip(flags))
2084 return;
2085
29b1a8ad
JF
2086 /*
2087 * Record as much task information as possible. If some fail, continue
2088 * to try to record the others.
2089 */
2090 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2091 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2092 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2093 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
bc0c38d1 2094
29b1a8ad
JF
2095 /* If recording any information failed, retry again soon. */
2096 if (!done)
7ffbd48d
SR
2097 return;
2098
d914ba37
JF
2099 __this_cpu_write(trace_taskinfo_save, false);
2100}
2101
2102/* Helpers to record a specific task information */
2103void tracing_record_cmdline(struct task_struct *task)
2104{
2105 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2106}
2107
2108void tracing_record_tgid(struct task_struct *task)
2109{
2110 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
bc0c38d1
SR
2111}
2112
af0009fc
SRV
2113/*
2114 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2115 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2116 * simplifies those functions and keeps them in sync.
2117 */
2118enum print_line_t trace_handle_return(struct trace_seq *s)
2119{
2120 return trace_seq_has_overflowed(s) ?
2121 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2122}
2123EXPORT_SYMBOL_GPL(trace_handle_return);
2124
45dcd8b8 2125void
38697053
SR
2126tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2127 int pc)
bc0c38d1
SR
2128{
2129 struct task_struct *tsk = current;
bc0c38d1 2130
777e208d
SR
2131 entry->preempt_count = pc & 0xff;
2132 entry->pid = (tsk) ? tsk->pid : 0;
2133 entry->flags =
9244489a 2134#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2135 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2136#else
2137 TRACE_FLAG_IRQS_NOSUPPORT |
2138#endif
7e6867bf 2139 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2140 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2141 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2142 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2143 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2144}
f413cdb8 2145EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2146
e77405ad
SR
2147struct ring_buffer_event *
2148trace_buffer_lock_reserve(struct ring_buffer *buffer,
2149 int type,
2150 unsigned long len,
2151 unsigned long flags, int pc)
51a763dd 2152{
3e9a8aad 2153 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2154}
2155
2156DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2157DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2158static int trace_buffered_event_ref;
2159
2160/**
2161 * trace_buffered_event_enable - enable buffering events
2162 *
2163 * When events are being filtered, it is quicker to use a temporary
2164 * buffer to write the event data into if there's a likely chance
2165 * that it will not be committed. The discard of the ring buffer
2166 * is not as fast as committing, and is much slower than copying
2167 * a commit.
2168 *
2169 * When an event is to be filtered, allocate per cpu buffers to
2170 * write the event data into, and if the event is filtered and discarded
2171 * it is simply dropped, otherwise, the entire data is to be committed
2172 * in one shot.
2173 */
2174void trace_buffered_event_enable(void)
2175{
2176 struct ring_buffer_event *event;
2177 struct page *page;
2178 int cpu;
51a763dd 2179
0fc1b09f
SRRH
2180 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2181
2182 if (trace_buffered_event_ref++)
2183 return;
2184
2185 for_each_tracing_cpu(cpu) {
2186 page = alloc_pages_node(cpu_to_node(cpu),
2187 GFP_KERNEL | __GFP_NORETRY, 0);
2188 if (!page)
2189 goto failed;
2190
2191 event = page_address(page);
2192 memset(event, 0, sizeof(*event));
2193
2194 per_cpu(trace_buffered_event, cpu) = event;
2195
2196 preempt_disable();
2197 if (cpu == smp_processor_id() &&
2198 this_cpu_read(trace_buffered_event) !=
2199 per_cpu(trace_buffered_event, cpu))
2200 WARN_ON_ONCE(1);
2201 preempt_enable();
51a763dd
ACM
2202 }
2203
0fc1b09f
SRRH
2204 return;
2205 failed:
2206 trace_buffered_event_disable();
2207}
2208
2209static void enable_trace_buffered_event(void *data)
2210{
2211 /* Probably not needed, but do it anyway */
2212 smp_rmb();
2213 this_cpu_dec(trace_buffered_event_cnt);
2214}
2215
2216static void disable_trace_buffered_event(void *data)
2217{
2218 this_cpu_inc(trace_buffered_event_cnt);
2219}
2220
2221/**
2222 * trace_buffered_event_disable - disable buffering events
2223 *
2224 * When a filter is removed, it is faster to not use the buffered
2225 * events, and to commit directly into the ring buffer. Free up
2226 * the temp buffers when there are no more users. This requires
2227 * special synchronization with current events.
2228 */
2229void trace_buffered_event_disable(void)
2230{
2231 int cpu;
2232
2233 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2234
2235 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2236 return;
2237
2238 if (--trace_buffered_event_ref)
2239 return;
2240
2241 preempt_disable();
2242 /* For each CPU, set the buffer as used. */
2243 smp_call_function_many(tracing_buffer_mask,
2244 disable_trace_buffered_event, NULL, 1);
2245 preempt_enable();
2246
2247 /* Wait for all current users to finish */
2248 synchronize_sched();
2249
2250 for_each_tracing_cpu(cpu) {
2251 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2252 per_cpu(trace_buffered_event, cpu) = NULL;
2253 }
2254 /*
2255 * Make sure trace_buffered_event is NULL before clearing
2256 * trace_buffered_event_cnt.
2257 */
2258 smp_wmb();
2259
2260 preempt_disable();
2261 /* Do the work on each cpu */
2262 smp_call_function_many(tracing_buffer_mask,
2263 enable_trace_buffered_event, NULL, 1);
2264 preempt_enable();
51a763dd 2265}
51a763dd 2266
2c4a33ab
SRRH
2267static struct ring_buffer *temp_buffer;
2268
ccb469a1
SR
2269struct ring_buffer_event *
2270trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2271 struct trace_event_file *trace_file,
ccb469a1
SR
2272 int type, unsigned long len,
2273 unsigned long flags, int pc)
2274{
2c4a33ab 2275 struct ring_buffer_event *entry;
0fc1b09f 2276 int val;
2c4a33ab 2277
7f1d2f82 2278 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f 2279
00b41452 2280 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
0fc1b09f
SRRH
2281 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2282 (entry = this_cpu_read(trace_buffered_event))) {
2283 /* Try to use the per cpu buffer first */
2284 val = this_cpu_inc_return(trace_buffered_event_cnt);
2285 if (val == 1) {
2286 trace_event_setup(entry, type, flags, pc);
2287 entry->array[0] = len;
2288 return entry;
2289 }
2290 this_cpu_dec(trace_buffered_event_cnt);
2291 }
2292
3e9a8aad
SRRH
2293 entry = __trace_buffer_lock_reserve(*current_rb,
2294 type, len, flags, pc);
2c4a33ab
SRRH
2295 /*
2296 * If tracing is off, but we have triggers enabled
2297 * we still need to look at the event data. Use the temp_buffer
2298 * to store the trace event for the tigger to use. It's recusive
2299 * safe and will not be recorded anywhere.
2300 */
5d6ad960 2301 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2302 *current_rb = temp_buffer;
3e9a8aad
SRRH
2303 entry = __trace_buffer_lock_reserve(*current_rb,
2304 type, len, flags, pc);
2c4a33ab
SRRH
2305 }
2306 return entry;
ccb469a1
SR
2307}
2308EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2309
42391745
SRRH
2310static DEFINE_SPINLOCK(tracepoint_iter_lock);
2311static DEFINE_MUTEX(tracepoint_printk_mutex);
2312
2313static void output_printk(struct trace_event_buffer *fbuffer)
2314{
2315 struct trace_event_call *event_call;
2316 struct trace_event *event;
2317 unsigned long flags;
2318 struct trace_iterator *iter = tracepoint_print_iter;
2319
2320 /* We should never get here if iter is NULL */
2321 if (WARN_ON_ONCE(!iter))
2322 return;
2323
2324 event_call = fbuffer->trace_file->event_call;
2325 if (!event_call || !event_call->event.funcs ||
2326 !event_call->event.funcs->trace)
2327 return;
2328
2329 event = &fbuffer->trace_file->event_call->event;
2330
2331 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2332 trace_seq_init(&iter->seq);
2333 iter->ent = fbuffer->entry;
2334 event_call->event.funcs->trace(iter, 0, event);
2335 trace_seq_putc(&iter->seq, 0);
2336 printk("%s", iter->seq.buffer);
2337
2338 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2339}
2340
2341int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2342 void __user *buffer, size_t *lenp,
2343 loff_t *ppos)
2344{
2345 int save_tracepoint_printk;
2346 int ret;
2347
2348 mutex_lock(&tracepoint_printk_mutex);
2349 save_tracepoint_printk = tracepoint_printk;
2350
2351 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2352
2353 /*
2354 * This will force exiting early, as tracepoint_printk
2355 * is always zero when tracepoint_printk_iter is not allocated
2356 */
2357 if (!tracepoint_print_iter)
2358 tracepoint_printk = 0;
2359
2360 if (save_tracepoint_printk == tracepoint_printk)
2361 goto out;
2362
2363 if (tracepoint_printk)
2364 static_key_enable(&tracepoint_printk_key.key);
2365 else
2366 static_key_disable(&tracepoint_printk_key.key);
2367
2368 out:
2369 mutex_unlock(&tracepoint_printk_mutex);
2370
2371 return ret;
2372}
2373
2374void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2375{
2376 if (static_key_false(&tracepoint_printk_key.key))
2377 output_printk(fbuffer);
2378
2379 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2380 fbuffer->event, fbuffer->entry,
2381 fbuffer->flags, fbuffer->pc);
2382}
2383EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2384
2ee5b92a
SRV
2385/*
2386 * Skip 3:
2387 *
2388 * trace_buffer_unlock_commit_regs()
2389 * trace_event_buffer_commit()
2390 * trace_event_raw_event_xxx()
2391*/
2392# define STACK_SKIP 3
2393
b7f0c959
SRRH
2394void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2395 struct ring_buffer *buffer,
0d5c6e1c
SR
2396 struct ring_buffer_event *event,
2397 unsigned long flags, int pc,
2398 struct pt_regs *regs)
1fd8df2c 2399{
7ffbd48d 2400 __buffer_unlock_commit(buffer, event);
1fd8df2c 2401
be54f69c 2402 /*
2ee5b92a 2403 * If regs is not set, then skip the necessary functions.
be54f69c
SRRH
2404 * Note, we can still get here via blktrace, wakeup tracer
2405 * and mmiotrace, but that's ok if they lose a function or
2ee5b92a 2406 * two. They are not that meaningful.
be54f69c 2407 */
2ee5b92a 2408 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
1fd8df2c
MH
2409 ftrace_trace_userstack(buffer, flags, pc);
2410}
1fd8df2c 2411
52ffabe3
SRRH
2412/*
2413 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2414 */
2415void
2416trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2417 struct ring_buffer_event *event)
2418{
2419 __buffer_unlock_commit(buffer, event);
2420}
2421
478409dd
CZ
2422static void
2423trace_process_export(struct trace_export *export,
2424 struct ring_buffer_event *event)
2425{
2426 struct trace_entry *entry;
2427 unsigned int size = 0;
2428
2429 entry = ring_buffer_event_data(event);
2430 size = ring_buffer_event_length(event);
a773d419 2431 export->write(export, entry, size);
478409dd
CZ
2432}
2433
2434static DEFINE_MUTEX(ftrace_export_lock);
2435
2436static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2437
2438static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2439
2440static inline void ftrace_exports_enable(void)
2441{
2442 static_branch_enable(&ftrace_exports_enabled);
2443}
2444
2445static inline void ftrace_exports_disable(void)
2446{
2447 static_branch_disable(&ftrace_exports_enabled);
2448}
2449
2450void ftrace_exports(struct ring_buffer_event *event)
2451{
2452 struct trace_export *export;
2453
2454 preempt_disable_notrace();
2455
2456 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2457 while (export) {
2458 trace_process_export(export, event);
2459 export = rcu_dereference_raw_notrace(export->next);
2460 }
2461
2462 preempt_enable_notrace();
2463}
2464
2465static inline void
2466add_trace_export(struct trace_export **list, struct trace_export *export)
2467{
2468 rcu_assign_pointer(export->next, *list);
2469 /*
2470 * We are entering export into the list but another
2471 * CPU might be walking that list. We need to make sure
2472 * the export->next pointer is valid before another CPU sees
2473 * the export pointer included into the list.
2474 */
2475 rcu_assign_pointer(*list, export);
2476}
2477
2478static inline int
2479rm_trace_export(struct trace_export **list, struct trace_export *export)
2480{
2481 struct trace_export **p;
2482
2483 for (p = list; *p != NULL; p = &(*p)->next)
2484 if (*p == export)
2485 break;
2486
2487 if (*p != export)
2488 return -1;
2489
2490 rcu_assign_pointer(*p, (*p)->next);
2491
2492 return 0;
2493}
2494
2495static inline void
2496add_ftrace_export(struct trace_export **list, struct trace_export *export)
2497{
2498 if (*list == NULL)
2499 ftrace_exports_enable();
2500
2501 add_trace_export(list, export);
2502}
2503
2504static inline int
2505rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2506{
2507 int ret;
2508
2509 ret = rm_trace_export(list, export);
2510 if (*list == NULL)
2511 ftrace_exports_disable();
2512
2513 return ret;
2514}
2515
2516int register_ftrace_export(struct trace_export *export)
2517{
2518 if (WARN_ON_ONCE(!export->write))
2519 return -1;
2520
2521 mutex_lock(&ftrace_export_lock);
2522
2523 add_ftrace_export(&ftrace_exports_list, export);
2524
2525 mutex_unlock(&ftrace_export_lock);
2526
2527 return 0;
2528}
2529EXPORT_SYMBOL_GPL(register_ftrace_export);
2530
2531int unregister_ftrace_export(struct trace_export *export)
2532{
2533 int ret;
2534
2535 mutex_lock(&ftrace_export_lock);
2536
2537 ret = rm_ftrace_export(&ftrace_exports_list, export);
2538
2539 mutex_unlock(&ftrace_export_lock);
2540
2541 return ret;
2542}
2543EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2544
e309b41d 2545void
7be42151 2546trace_function(struct trace_array *tr,
38697053
SR
2547 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2548 int pc)
bc0c38d1 2549{
2425bcb9 2550 struct trace_event_call *call = &event_function;
12883efb 2551 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2552 struct ring_buffer_event *event;
777e208d 2553 struct ftrace_entry *entry;
bc0c38d1 2554
3e9a8aad
SRRH
2555 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2556 flags, pc);
3928a8a2
SR
2557 if (!event)
2558 return;
2559 entry = ring_buffer_event_data(event);
777e208d
SR
2560 entry->ip = ip;
2561 entry->parent_ip = parent_ip;
e1112b4d 2562
478409dd
CZ
2563 if (!call_filter_check_discard(call, entry, buffer, event)) {
2564 if (static_branch_unlikely(&ftrace_exports_enabled))
2565 ftrace_exports(event);
7ffbd48d 2566 __buffer_unlock_commit(buffer, event);
478409dd 2567 }
bc0c38d1
SR
2568}
2569
c0a0d0d3 2570#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2571
2572#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2573struct ftrace_stack {
2574 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2575};
2576
2577static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2578static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2579
e77405ad 2580static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2581 unsigned long flags,
1fd8df2c 2582 int skip, int pc, struct pt_regs *regs)
86387f7e 2583{
2425bcb9 2584 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2585 struct ring_buffer_event *event;
777e208d 2586 struct stack_entry *entry;
86387f7e 2587 struct stack_trace trace;
4a9bd3f1
SR
2588 int use_stack;
2589 int size = FTRACE_STACK_ENTRIES;
2590
2591 trace.nr_entries = 0;
2592 trace.skip = skip;
2593
be54f69c 2594 /*
2ee5b92a 2595 * Add one, for this function and the call to save_stack_trace()
be54f69c
SRRH
2596 * If regs is set, then these functions will not be in the way.
2597 */
2ee5b92a 2598#ifndef CONFIG_UNWINDER_ORC
be54f69c 2599 if (!regs)
2ee5b92a
SRV
2600 trace.skip++;
2601#endif
be54f69c 2602
4a9bd3f1
SR
2603 /*
2604 * Since events can happen in NMIs there's no safe way to
2605 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2606 * or NMI comes in, it will just have to use the default
2607 * FTRACE_STACK_SIZE.
2608 */
2609 preempt_disable_notrace();
2610
82146529 2611 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2612 /*
2613 * We don't need any atomic variables, just a barrier.
2614 * If an interrupt comes in, we don't care, because it would
2615 * have exited and put the counter back to what we want.
2616 * We just need a barrier to keep gcc from moving things
2617 * around.
2618 */
2619 barrier();
2620 if (use_stack == 1) {
bdffd893 2621 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2622 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2623
2624 if (regs)
2625 save_stack_trace_regs(regs, &trace);
2626 else
2627 save_stack_trace(&trace);
2628
2629 if (trace.nr_entries > size)
2630 size = trace.nr_entries;
2631 } else
2632 /* From now on, use_stack is a boolean */
2633 use_stack = 0;
2634
2635 size *= sizeof(unsigned long);
86387f7e 2636
3e9a8aad
SRRH
2637 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2638 sizeof(*entry) + size, flags, pc);
3928a8a2 2639 if (!event)
4a9bd3f1
SR
2640 goto out;
2641 entry = ring_buffer_event_data(event);
86387f7e 2642
4a9bd3f1
SR
2643 memset(&entry->caller, 0, size);
2644
2645 if (use_stack)
2646 memcpy(&entry->caller, trace.entries,
2647 trace.nr_entries * sizeof(unsigned long));
2648 else {
2649 trace.max_entries = FTRACE_STACK_ENTRIES;
2650 trace.entries = entry->caller;
2651 if (regs)
2652 save_stack_trace_regs(regs, &trace);
2653 else
2654 save_stack_trace(&trace);
2655 }
2656
2657 entry->size = trace.nr_entries;
86387f7e 2658
f306cc82 2659 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2660 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2661
2662 out:
2663 /* Again, don't let gcc optimize things here */
2664 barrier();
82146529 2665 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2666 preempt_enable_notrace();
2667
f0a920d5
IM
2668}
2669
2d34f489
SRRH
2670static inline void ftrace_trace_stack(struct trace_array *tr,
2671 struct ring_buffer *buffer,
73dddbb5
SRRH
2672 unsigned long flags,
2673 int skip, int pc, struct pt_regs *regs)
53614991 2674{
2d34f489 2675 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2676 return;
2677
73dddbb5 2678 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2679}
2680
c0a0d0d3
FW
2681void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2682 int pc)
38697053 2683{
a33d7d94
SRV
2684 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2685
2686 if (rcu_is_watching()) {
2687 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2688 return;
2689 }
2690
2691 /*
2692 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2693 * but if the above rcu_is_watching() failed, then the NMI
2694 * triggered someplace critical, and rcu_irq_enter() should
2695 * not be called from NMI.
2696 */
2697 if (unlikely(in_nmi()))
2698 return;
2699
a33d7d94
SRV
2700 rcu_irq_enter_irqson();
2701 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2702 rcu_irq_exit_irqson();
38697053
SR
2703}
2704
03889384
SR
2705/**
2706 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2707 * @skip: Number of functions to skip (helper handlers)
03889384 2708 */
c142be8e 2709void trace_dump_stack(int skip)
03889384
SR
2710{
2711 unsigned long flags;
2712
2713 if (tracing_disabled || tracing_selftest_running)
e36c5458 2714 return;
03889384
SR
2715
2716 local_save_flags(flags);
2717
2ee5b92a
SRV
2718#ifndef CONFIG_UNWINDER_ORC
2719 /* Skip 1 to skip this function. */
2720 skip++;
2721#endif
c142be8e
SRRH
2722 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2723 flags, skip, preempt_count(), NULL);
03889384
SR
2724}
2725
91e86e56
SR
2726static DEFINE_PER_CPU(int, user_stack_count);
2727
e77405ad
SR
2728void
2729ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2730{
2425bcb9 2731 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2732 struct ring_buffer_event *event;
02b67518
TE
2733 struct userstack_entry *entry;
2734 struct stack_trace trace;
02b67518 2735
983f938a 2736 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2737 return;
2738
b6345879
SR
2739 /*
2740 * NMIs can not handle page faults, even with fix ups.
2741 * The save user stack can (and often does) fault.
2742 */
2743 if (unlikely(in_nmi()))
2744 return;
02b67518 2745
91e86e56
SR
2746 /*
2747 * prevent recursion, since the user stack tracing may
2748 * trigger other kernel events.
2749 */
2750 preempt_disable();
2751 if (__this_cpu_read(user_stack_count))
2752 goto out;
2753
2754 __this_cpu_inc(user_stack_count);
2755
3e9a8aad
SRRH
2756 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2757 sizeof(*entry), flags, pc);
02b67518 2758 if (!event)
1dbd1951 2759 goto out_drop_count;
02b67518 2760 entry = ring_buffer_event_data(event);
02b67518 2761
48659d31 2762 entry->tgid = current->tgid;
02b67518
TE
2763 memset(&entry->caller, 0, sizeof(entry->caller));
2764
2765 trace.nr_entries = 0;
2766 trace.max_entries = FTRACE_STACK_ENTRIES;
2767 trace.skip = 0;
2768 trace.entries = entry->caller;
2769
2770 save_stack_trace_user(&trace);
f306cc82 2771 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2772 __buffer_unlock_commit(buffer, event);
91e86e56 2773
1dbd1951 2774 out_drop_count:
91e86e56 2775 __this_cpu_dec(user_stack_count);
91e86e56
SR
2776 out:
2777 preempt_enable();
02b67518
TE
2778}
2779
4fd27358
HE
2780#ifdef UNUSED
2781static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2782{
7be42151 2783 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2784}
4fd27358 2785#endif /* UNUSED */
02b67518 2786
c0a0d0d3
FW
2787#endif /* CONFIG_STACKTRACE */
2788
07d777fe
SR
2789/* created for use with alloc_percpu */
2790struct trace_buffer_struct {
e2ace001
AL
2791 int nesting;
2792 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2793};
2794
2795static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2796
2797/*
e2ace001
AL
2798 * Thise allows for lockless recording. If we're nested too deeply, then
2799 * this returns NULL.
07d777fe
SR
2800 */
2801static char *get_trace_buf(void)
2802{
e2ace001 2803 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2804
e2ace001 2805 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2806 return NULL;
2807
3d9622c1
SRV
2808 buffer->nesting++;
2809
2810 /* Interrupts must see nesting incremented before we use the buffer */
2811 barrier();
2812 return &buffer->buffer[buffer->nesting][0];
e2ace001
AL
2813}
2814
2815static void put_trace_buf(void)
2816{
3d9622c1
SRV
2817 /* Don't let the decrement of nesting leak before this */
2818 barrier();
e2ace001 2819 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2820}
2821
2822static int alloc_percpu_trace_buffer(void)
2823{
2824 struct trace_buffer_struct *buffers;
07d777fe
SR
2825
2826 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2827 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2828 return -ENOMEM;
07d777fe
SR
2829
2830 trace_percpu_buffer = buffers;
07d777fe 2831 return 0;
07d777fe
SR
2832}
2833
81698831
SR
2834static int buffers_allocated;
2835
07d777fe
SR
2836void trace_printk_init_buffers(void)
2837{
07d777fe
SR
2838 if (buffers_allocated)
2839 return;
2840
2841 if (alloc_percpu_trace_buffer())
2842 return;
2843
2184db46
SR
2844 /* trace_printk() is for debug use only. Don't use it in production. */
2845
a395d6a7
JP
2846 pr_warn("\n");
2847 pr_warn("**********************************************************\n");
2848 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2849 pr_warn("** **\n");
2850 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2851 pr_warn("** **\n");
2852 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2853 pr_warn("** unsafe for production use. **\n");
2854 pr_warn("** **\n");
2855 pr_warn("** If you see this message and you are not debugging **\n");
2856 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2857 pr_warn("** **\n");
2858 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2859 pr_warn("**********************************************************\n");
07d777fe 2860
b382ede6
SR
2861 /* Expand the buffers to set size */
2862 tracing_update_buffers();
2863
07d777fe 2864 buffers_allocated = 1;
81698831
SR
2865
2866 /*
2867 * trace_printk_init_buffers() can be called by modules.
2868 * If that happens, then we need to start cmdline recording
2869 * directly here. If the global_trace.buffer is already
2870 * allocated here, then this was called by module code.
2871 */
12883efb 2872 if (global_trace.trace_buffer.buffer)
81698831
SR
2873 tracing_start_cmdline_record();
2874}
2875
2876void trace_printk_start_comm(void)
2877{
2878 /* Start tracing comms if trace printk is set */
2879 if (!buffers_allocated)
2880 return;
2881 tracing_start_cmdline_record();
2882}
2883
2884static void trace_printk_start_stop_comm(int enabled)
2885{
2886 if (!buffers_allocated)
2887 return;
2888
2889 if (enabled)
2890 tracing_start_cmdline_record();
2891 else
2892 tracing_stop_cmdline_record();
07d777fe
SR
2893}
2894
769b0441 2895/**
48ead020 2896 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2897 *
2898 */
40ce74f1 2899int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2900{
2425bcb9 2901 struct trace_event_call *call = &event_bprint;
769b0441 2902 struct ring_buffer_event *event;
e77405ad 2903 struct ring_buffer *buffer;
769b0441 2904 struct trace_array *tr = &global_trace;
48ead020 2905 struct bprint_entry *entry;
769b0441 2906 unsigned long flags;
07d777fe
SR
2907 char *tbuffer;
2908 int len = 0, size, pc;
769b0441
FW
2909
2910 if (unlikely(tracing_selftest_running || tracing_disabled))
2911 return 0;
2912
2913 /* Don't pollute graph traces with trace_vprintk internals */
2914 pause_graph_tracing();
2915
2916 pc = preempt_count();
5168ae50 2917 preempt_disable_notrace();
769b0441 2918
07d777fe
SR
2919 tbuffer = get_trace_buf();
2920 if (!tbuffer) {
2921 len = 0;
e2ace001 2922 goto out_nobuffer;
07d777fe 2923 }
769b0441 2924
07d777fe 2925 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2926
07d777fe
SR
2927 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2928 goto out;
769b0441 2929
07d777fe 2930 local_save_flags(flags);
769b0441 2931 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2932 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2933 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2934 flags, pc);
769b0441 2935 if (!event)
07d777fe 2936 goto out;
769b0441
FW
2937 entry = ring_buffer_event_data(event);
2938 entry->ip = ip;
769b0441
FW
2939 entry->fmt = fmt;
2940
07d777fe 2941 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2942 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2943 __buffer_unlock_commit(buffer, event);
2d34f489 2944 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2945 }
769b0441 2946
769b0441 2947out:
e2ace001
AL
2948 put_trace_buf();
2949
2950out_nobuffer:
5168ae50 2951 preempt_enable_notrace();
769b0441
FW
2952 unpause_graph_tracing();
2953
2954 return len;
2955}
48ead020
FW
2956EXPORT_SYMBOL_GPL(trace_vbprintk);
2957
12883efb
SRRH
2958static int
2959__trace_array_vprintk(struct ring_buffer *buffer,
2960 unsigned long ip, const char *fmt, va_list args)
48ead020 2961{
2425bcb9 2962 struct trace_event_call *call = &event_print;
48ead020 2963 struct ring_buffer_event *event;
07d777fe 2964 int len = 0, size, pc;
48ead020 2965 struct print_entry *entry;
07d777fe
SR
2966 unsigned long flags;
2967 char *tbuffer;
48ead020
FW
2968
2969 if (tracing_disabled || tracing_selftest_running)
2970 return 0;
2971
07d777fe
SR
2972 /* Don't pollute graph traces with trace_vprintk internals */
2973 pause_graph_tracing();
2974
48ead020
FW
2975 pc = preempt_count();
2976 preempt_disable_notrace();
48ead020 2977
07d777fe
SR
2978
2979 tbuffer = get_trace_buf();
2980 if (!tbuffer) {
2981 len = 0;
e2ace001 2982 goto out_nobuffer;
07d777fe 2983 }
48ead020 2984
3558a5ac 2985 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2986
07d777fe 2987 local_save_flags(flags);
48ead020 2988 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2989 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2990 flags, pc);
48ead020 2991 if (!event)
07d777fe 2992 goto out;
48ead020 2993 entry = ring_buffer_event_data(event);
c13d2f7c 2994 entry->ip = ip;
48ead020 2995
3558a5ac 2996 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2997 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2998 __buffer_unlock_commit(buffer, event);
2d34f489 2999 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 3000 }
e2ace001
AL
3001
3002out:
3003 put_trace_buf();
3004
3005out_nobuffer:
48ead020 3006 preempt_enable_notrace();
07d777fe 3007 unpause_graph_tracing();
48ead020
FW
3008
3009 return len;
3010}
659372d3 3011
12883efb
SRRH
3012int trace_array_vprintk(struct trace_array *tr,
3013 unsigned long ip, const char *fmt, va_list args)
3014{
3015 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3016}
3017
3018int trace_array_printk(struct trace_array *tr,
3019 unsigned long ip, const char *fmt, ...)
3020{
3021 int ret;
3022 va_list ap;
3023
983f938a 3024 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3025 return 0;
3026
3027 va_start(ap, fmt);
3028 ret = trace_array_vprintk(tr, ip, fmt, ap);
3029 va_end(ap);
3030 return ret;
3031}
3032
3033int trace_array_printk_buf(struct ring_buffer *buffer,
3034 unsigned long ip, const char *fmt, ...)
3035{
3036 int ret;
3037 va_list ap;
3038
983f938a 3039 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3040 return 0;
3041
3042 va_start(ap, fmt);
3043 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3044 va_end(ap);
3045 return ret;
3046}
3047
659372d3
SR
3048int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3049{
a813a159 3050 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 3051}
769b0441
FW
3052EXPORT_SYMBOL_GPL(trace_vprintk);
3053
e2ac8ef5 3054static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 3055{
6d158a81
SR
3056 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3057
5a90f577 3058 iter->idx++;
6d158a81
SR
3059 if (buf_iter)
3060 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
3061}
3062
e309b41d 3063static struct trace_entry *
bc21b478
SR
3064peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3065 unsigned long *lost_events)
dd0e545f 3066{
3928a8a2 3067 struct ring_buffer_event *event;
6d158a81 3068 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 3069
d769041f
SR
3070 if (buf_iter)
3071 event = ring_buffer_iter_peek(buf_iter, ts);
3072 else
12883efb 3073 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 3074 lost_events);
d769041f 3075
4a9bd3f1
SR
3076 if (event) {
3077 iter->ent_size = ring_buffer_event_length(event);
3078 return ring_buffer_event_data(event);
3079 }
3080 iter->ent_size = 0;
3081 return NULL;
dd0e545f 3082}
d769041f 3083
dd0e545f 3084static struct trace_entry *
bc21b478
SR
3085__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3086 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 3087{
12883efb 3088 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 3089 struct trace_entry *ent, *next = NULL;
aa27497c 3090 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 3091 int cpu_file = iter->cpu_file;
3928a8a2 3092 u64 next_ts = 0, ts;
bc0c38d1 3093 int next_cpu = -1;
12b5da34 3094 int next_size = 0;
bc0c38d1
SR
3095 int cpu;
3096
b04cc6b1
FW
3097 /*
3098 * If we are in a per_cpu trace file, don't bother by iterating over
3099 * all cpu and peek directly.
3100 */
ae3b5093 3101 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
3102 if (ring_buffer_empty_cpu(buffer, cpu_file))
3103 return NULL;
bc21b478 3104 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
3105 if (ent_cpu)
3106 *ent_cpu = cpu_file;
3107
3108 return ent;
3109 }
3110
ab46428c 3111 for_each_tracing_cpu(cpu) {
dd0e545f 3112
3928a8a2
SR
3113 if (ring_buffer_empty_cpu(buffer, cpu))
3114 continue;
dd0e545f 3115
bc21b478 3116 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3117
cdd31cd2
IM
3118 /*
3119 * Pick the entry with the smallest timestamp:
3120 */
3928a8a2 3121 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3122 next = ent;
3123 next_cpu = cpu;
3928a8a2 3124 next_ts = ts;
bc21b478 3125 next_lost = lost_events;
12b5da34 3126 next_size = iter->ent_size;
bc0c38d1
SR
3127 }
3128 }
3129
12b5da34
SR
3130 iter->ent_size = next_size;
3131
bc0c38d1
SR
3132 if (ent_cpu)
3133 *ent_cpu = next_cpu;
3134
3928a8a2
SR
3135 if (ent_ts)
3136 *ent_ts = next_ts;
3137
bc21b478
SR
3138 if (missing_events)
3139 *missing_events = next_lost;
3140
bc0c38d1
SR
3141 return next;
3142}
3143
dd0e545f 3144/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3145struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3146 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3147{
bc21b478 3148 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3149}
3150
3151/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3152void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3153{
bc21b478
SR
3154 iter->ent = __find_next_entry(iter, &iter->cpu,
3155 &iter->lost_events, &iter->ts);
dd0e545f 3156
3928a8a2 3157 if (iter->ent)
e2ac8ef5 3158 trace_iterator_increment(iter);
dd0e545f 3159
3928a8a2 3160 return iter->ent ? iter : NULL;
b3806b43 3161}
bc0c38d1 3162
e309b41d 3163static void trace_consume(struct trace_iterator *iter)
b3806b43 3164{
12883efb 3165 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3166 &iter->lost_events);
bc0c38d1
SR
3167}
3168
e309b41d 3169static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3170{
3171 struct trace_iterator *iter = m->private;
bc0c38d1 3172 int i = (int)*pos;
4e3c3333 3173 void *ent;
bc0c38d1 3174
a63ce5b3
SR
3175 WARN_ON_ONCE(iter->leftover);
3176
bc0c38d1
SR
3177 (*pos)++;
3178
3179 /* can't go backwards */
3180 if (iter->idx > i)
3181 return NULL;
3182
3183 if (iter->idx < 0)
955b61e5 3184 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3185 else
3186 ent = iter;
3187
3188 while (ent && iter->idx < i)
955b61e5 3189 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3190
3191 iter->pos = *pos;
3192
bc0c38d1
SR
3193 return ent;
3194}
3195
955b61e5 3196void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3197{
2f26ebd5
SR
3198 struct ring_buffer_event *event;
3199 struct ring_buffer_iter *buf_iter;
3200 unsigned long entries = 0;
3201 u64 ts;
3202
12883efb 3203 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3204
6d158a81
SR
3205 buf_iter = trace_buffer_iter(iter, cpu);
3206 if (!buf_iter)
2f26ebd5
SR
3207 return;
3208
2f26ebd5
SR
3209 ring_buffer_iter_reset(buf_iter);
3210
3211 /*
3212 * We could have the case with the max latency tracers
3213 * that a reset never took place on a cpu. This is evident
3214 * by the timestamp being before the start of the buffer.
3215 */
3216 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3217 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3218 break;
3219 entries++;
3220 ring_buffer_read(buf_iter, NULL);
3221 }
3222
12883efb 3223 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3224}
3225
d7350c3f 3226/*
d7350c3f
FW
3227 * The current tracer is copied to avoid a global locking
3228 * all around.
3229 */
bc0c38d1
SR
3230static void *s_start(struct seq_file *m, loff_t *pos)
3231{
3232 struct trace_iterator *iter = m->private;
2b6080f2 3233 struct trace_array *tr = iter->tr;
b04cc6b1 3234 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3235 void *p = NULL;
3236 loff_t l = 0;
3928a8a2 3237 int cpu;
bc0c38d1 3238
2fd196ec
HT
3239 /*
3240 * copy the tracer to avoid using a global lock all around.
3241 * iter->trace is a copy of current_trace, the pointer to the
3242 * name may be used instead of a strcmp(), as iter->trace->name
3243 * will point to the same string as current_trace->name.
3244 */
bc0c38d1 3245 mutex_lock(&trace_types_lock);
2b6080f2
SR
3246 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3247 *iter->trace = *tr->current_trace;
d7350c3f 3248 mutex_unlock(&trace_types_lock);
bc0c38d1 3249
12883efb 3250#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3251 if (iter->snapshot && iter->trace->use_max_tr)
3252 return ERR_PTR(-EBUSY);
12883efb 3253#endif
debdd57f
HT
3254
3255 if (!iter->snapshot)
d914ba37 3256 atomic_inc(&trace_record_taskinfo_disabled);
bc0c38d1 3257
bc0c38d1
SR
3258 if (*pos != iter->pos) {
3259 iter->ent = NULL;
3260 iter->cpu = 0;
3261 iter->idx = -1;
3262
ae3b5093 3263 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3264 for_each_tracing_cpu(cpu)
2f26ebd5 3265 tracing_iter_reset(iter, cpu);
b04cc6b1 3266 } else
2f26ebd5 3267 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3268
ac91d854 3269 iter->leftover = 0;
bc0c38d1
SR
3270 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3271 ;
3272
3273 } else {
a63ce5b3
SR
3274 /*
3275 * If we overflowed the seq_file before, then we want
3276 * to just reuse the trace_seq buffer again.
3277 */
3278 if (iter->leftover)
3279 p = iter;
3280 else {
3281 l = *pos - 1;
3282 p = s_next(m, p, &l);
3283 }
bc0c38d1
SR
3284 }
3285
4f535968 3286 trace_event_read_lock();
7e53bd42 3287 trace_access_lock(cpu_file);
bc0c38d1
SR
3288 return p;
3289}
3290
3291static void s_stop(struct seq_file *m, void *p)
3292{
7e53bd42
LJ
3293 struct trace_iterator *iter = m->private;
3294
12883efb 3295#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3296 if (iter->snapshot && iter->trace->use_max_tr)
3297 return;
12883efb 3298#endif
debdd57f
HT
3299
3300 if (!iter->snapshot)
d914ba37 3301 atomic_dec(&trace_record_taskinfo_disabled);
12883efb 3302
7e53bd42 3303 trace_access_unlock(iter->cpu_file);
4f535968 3304 trace_event_read_unlock();
bc0c38d1
SR
3305}
3306
39eaf7ef 3307static void
12883efb
SRRH
3308get_total_entries(struct trace_buffer *buf,
3309 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3310{
3311 unsigned long count;
3312 int cpu;
3313
3314 *total = 0;
3315 *entries = 0;
3316
3317 for_each_tracing_cpu(cpu) {
12883efb 3318 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3319 /*
3320 * If this buffer has skipped entries, then we hold all
3321 * entries for the trace and we need to ignore the
3322 * ones before the time stamp.
3323 */
12883efb
SRRH
3324 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3325 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3326 /* total is the same as the entries */
3327 *total += count;
3328 } else
3329 *total += count +
12883efb 3330 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3331 *entries += count;
3332 }
3333}
3334
e309b41d 3335static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3336{
d79ac28f
RV
3337 seq_puts(m, "# _------=> CPU# \n"
3338 "# / _-----=> irqs-off \n"
3339 "# | / _----=> need-resched \n"
3340 "# || / _---=> hardirq/softirq \n"
3341 "# ||| / _--=> preempt-depth \n"
3342 "# |||| / delay \n"
3343 "# cmd pid ||||| time | caller \n"
3344 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3345}
3346
12883efb 3347static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3348{
39eaf7ef
SR
3349 unsigned long total;
3350 unsigned long entries;
3351
12883efb 3352 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3353 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3354 entries, total, num_online_cpus());
3355 seq_puts(m, "#\n");
3356}
3357
441dae8f
JF
3358static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3359 unsigned int flags)
39eaf7ef 3360{
441dae8f
JF
3361 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3362
12883efb 3363 print_event_info(buf, m);
441dae8f
JF
3364
3365 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3366 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
bc0c38d1
SR
3367}
3368
441dae8f
JF
3369static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3370 unsigned int flags)
77271ce4 3371{
441dae8f 3372 bool tgid = flags & TRACE_ITER_RECORD_TGID;
b11fb737
SRV
3373 const char tgid_space[] = " ";
3374 const char space[] = " ";
3375
3376 seq_printf(m, "# %s _-----=> irqs-off\n",
3377 tgid ? tgid_space : space);
3378 seq_printf(m, "# %s / _----=> need-resched\n",
3379 tgid ? tgid_space : space);
3380 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3381 tgid ? tgid_space : space);
3382 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3383 tgid ? tgid_space : space);
3384 seq_printf(m, "# %s||| / delay\n",
3385 tgid ? tgid_space : space);
3386 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3387 tgid ? " TGID " : space);
3388 seq_printf(m, "# | | | %s|||| | |\n",
3389 tgid ? " | " : space);
77271ce4 3390}
bc0c38d1 3391
62b915f1 3392void
bc0c38d1
SR
3393print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3394{
983f938a 3395 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3396 struct trace_buffer *buf = iter->trace_buffer;
3397 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3398 struct tracer *type = iter->trace;
39eaf7ef
SR
3399 unsigned long entries;
3400 unsigned long total;
bc0c38d1
SR
3401 const char *name = "preemption";
3402
d840f718 3403 name = type->name;
bc0c38d1 3404
12883efb 3405 get_total_entries(buf, &total, &entries);
bc0c38d1 3406
888b55dc 3407 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3408 name, UTS_RELEASE);
888b55dc 3409 seq_puts(m, "# -----------------------------------"
bc0c38d1 3410 "---------------------------------\n");
888b55dc 3411 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3412 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3413 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3414 entries,
4c11d7ae 3415 total,
12883efb 3416 buf->cpu,
bc0c38d1
SR
3417#if defined(CONFIG_PREEMPT_NONE)
3418 "server",
3419#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3420 "desktop",
b5c21b45 3421#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3422 "preempt",
3423#else
3424 "unknown",
3425#endif
3426 /* These are reserved for later use */
3427 0, 0, 0, 0);
3428#ifdef CONFIG_SMP
3429 seq_printf(m, " #P:%d)\n", num_online_cpus());
3430#else
3431 seq_puts(m, ")\n");
3432#endif
888b55dc
KM
3433 seq_puts(m, "# -----------------\n");
3434 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3435 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3436 data->comm, data->pid,
3437 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3438 data->policy, data->rt_priority);
888b55dc 3439 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3440
3441 if (data->critical_start) {
888b55dc 3442 seq_puts(m, "# => started at: ");
214023c3
SR
3443 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3444 trace_print_seq(m, &iter->seq);
888b55dc 3445 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3446 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3447 trace_print_seq(m, &iter->seq);
8248ac05 3448 seq_puts(m, "\n#\n");
bc0c38d1
SR
3449 }
3450
888b55dc 3451 seq_puts(m, "#\n");
bc0c38d1
SR
3452}
3453
a309720c
SR
3454static void test_cpu_buff_start(struct trace_iterator *iter)
3455{
3456 struct trace_seq *s = &iter->seq;
983f938a 3457 struct trace_array *tr = iter->tr;
a309720c 3458
983f938a 3459 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3460 return;
3461
3462 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3463 return;
3464
4dbbe2d8
MK
3465 if (cpumask_available(iter->started) &&
3466 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3467 return;
3468
12883efb 3469 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3470 return;
3471
4dbbe2d8 3472 if (cpumask_available(iter->started))
919cd979 3473 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3474
3475 /* Don't print started cpu buffer for the first entry of the trace */
3476 if (iter->idx > 1)
3477 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3478 iter->cpu);
a309720c
SR
3479}
3480
2c4f035f 3481static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3482{
983f938a 3483 struct trace_array *tr = iter->tr;
214023c3 3484 struct trace_seq *s = &iter->seq;
983f938a 3485 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3486 struct trace_entry *entry;
f633cef0 3487 struct trace_event *event;
bc0c38d1 3488
4e3c3333 3489 entry = iter->ent;
dd0e545f 3490
a309720c
SR
3491 test_cpu_buff_start(iter);
3492
c4a8e8be 3493 event = ftrace_find_event(entry->type);
bc0c38d1 3494
983f938a 3495 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3496 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3497 trace_print_lat_context(iter);
3498 else
3499 trace_print_context(iter);
c4a8e8be 3500 }
bc0c38d1 3501
19a7fe20
SRRH
3502 if (trace_seq_has_overflowed(s))
3503 return TRACE_TYPE_PARTIAL_LINE;
3504
268ccda0 3505 if (event)
a9a57763 3506 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3507
19a7fe20 3508 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3509
19a7fe20 3510 return trace_handle_return(s);
bc0c38d1
SR
3511}
3512
2c4f035f 3513static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3514{
983f938a 3515 struct trace_array *tr = iter->tr;
f9896bf3
IM
3516 struct trace_seq *s = &iter->seq;
3517 struct trace_entry *entry;
f633cef0 3518 struct trace_event *event;
f9896bf3
IM
3519
3520 entry = iter->ent;
dd0e545f 3521
983f938a 3522 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3523 trace_seq_printf(s, "%d %d %llu ",
3524 entry->pid, iter->cpu, iter->ts);
3525
3526 if (trace_seq_has_overflowed(s))
3527 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3528
f633cef0 3529 event = ftrace_find_event(entry->type);
268ccda0 3530 if (event)
a9a57763 3531 return event->funcs->raw(iter, 0, event);
d9793bd8 3532
19a7fe20 3533 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3534
19a7fe20 3535 return trace_handle_return(s);
f9896bf3
IM
3536}
3537
2c4f035f 3538static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3539{
983f938a 3540 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3541 struct trace_seq *s = &iter->seq;
3542 unsigned char newline = '\n';
3543 struct trace_entry *entry;
f633cef0 3544 struct trace_event *event;
5e3ca0ec
IM
3545
3546 entry = iter->ent;
dd0e545f 3547
983f938a 3548 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3549 SEQ_PUT_HEX_FIELD(s, entry->pid);
3550 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3551 SEQ_PUT_HEX_FIELD(s, iter->ts);
3552 if (trace_seq_has_overflowed(s))
3553 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3554 }
5e3ca0ec 3555
f633cef0 3556 event = ftrace_find_event(entry->type);
268ccda0 3557 if (event) {
a9a57763 3558 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3559 if (ret != TRACE_TYPE_HANDLED)
3560 return ret;
3561 }
7104f300 3562
19a7fe20 3563 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3564
19a7fe20 3565 return trace_handle_return(s);
5e3ca0ec
IM
3566}
3567
2c4f035f 3568static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3569{
983f938a 3570 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3571 struct trace_seq *s = &iter->seq;
3572 struct trace_entry *entry;
f633cef0 3573 struct trace_event *event;
cb0f12aa
IM
3574
3575 entry = iter->ent;
dd0e545f 3576
983f938a 3577 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3578 SEQ_PUT_FIELD(s, entry->pid);
3579 SEQ_PUT_FIELD(s, iter->cpu);
3580 SEQ_PUT_FIELD(s, iter->ts);
3581 if (trace_seq_has_overflowed(s))
3582 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3583 }
cb0f12aa 3584
f633cef0 3585 event = ftrace_find_event(entry->type);
a9a57763
SR
3586 return event ? event->funcs->binary(iter, 0, event) :
3587 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3588}
3589
62b915f1 3590int trace_empty(struct trace_iterator *iter)
bc0c38d1 3591{
6d158a81 3592 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3593 int cpu;
3594
9aba60fe 3595 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3596 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3597 cpu = iter->cpu_file;
6d158a81
SR
3598 buf_iter = trace_buffer_iter(iter, cpu);
3599 if (buf_iter) {
3600 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3601 return 0;
3602 } else {
12883efb 3603 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3604 return 0;
3605 }
3606 return 1;
3607 }
3608
ab46428c 3609 for_each_tracing_cpu(cpu) {
6d158a81
SR
3610 buf_iter = trace_buffer_iter(iter, cpu);
3611 if (buf_iter) {
3612 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3613 return 0;
3614 } else {
12883efb 3615 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3616 return 0;
3617 }
bc0c38d1 3618 }
d769041f 3619
797d3712 3620 return 1;
bc0c38d1
SR
3621}
3622
4f535968 3623/* Called with trace_event_read_lock() held. */
955b61e5 3624enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3625{
983f938a
SRRH
3626 struct trace_array *tr = iter->tr;
3627 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3628 enum print_line_t ret;
3629
19a7fe20
SRRH
3630 if (iter->lost_events) {
3631 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3632 iter->cpu, iter->lost_events);
3633 if (trace_seq_has_overflowed(&iter->seq))
3634 return TRACE_TYPE_PARTIAL_LINE;
3635 }
bc21b478 3636
2c4f035f
FW
3637 if (iter->trace && iter->trace->print_line) {
3638 ret = iter->trace->print_line(iter);
3639 if (ret != TRACE_TYPE_UNHANDLED)
3640 return ret;
3641 }
72829bc3 3642
09ae7234
SRRH
3643 if (iter->ent->type == TRACE_BPUTS &&
3644 trace_flags & TRACE_ITER_PRINTK &&
3645 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3646 return trace_print_bputs_msg_only(iter);
3647
48ead020
FW
3648 if (iter->ent->type == TRACE_BPRINT &&
3649 trace_flags & TRACE_ITER_PRINTK &&
3650 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3651 return trace_print_bprintk_msg_only(iter);
48ead020 3652
66896a85
FW
3653 if (iter->ent->type == TRACE_PRINT &&
3654 trace_flags & TRACE_ITER_PRINTK &&
3655 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3656 return trace_print_printk_msg_only(iter);
66896a85 3657
cb0f12aa
IM
3658 if (trace_flags & TRACE_ITER_BIN)
3659 return print_bin_fmt(iter);
3660
5e3ca0ec
IM
3661 if (trace_flags & TRACE_ITER_HEX)
3662 return print_hex_fmt(iter);
3663
f9896bf3
IM
3664 if (trace_flags & TRACE_ITER_RAW)
3665 return print_raw_fmt(iter);
3666
f9896bf3
IM
3667 return print_trace_fmt(iter);
3668}
3669
7e9a49ef
JO
3670void trace_latency_header(struct seq_file *m)
3671{
3672 struct trace_iterator *iter = m->private;
983f938a 3673 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3674
3675 /* print nothing if the buffers are empty */
3676 if (trace_empty(iter))
3677 return;
3678
3679 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3680 print_trace_header(m, iter);
3681
983f938a 3682 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3683 print_lat_help_header(m);
3684}
3685
62b915f1
JO
3686void trace_default_header(struct seq_file *m)
3687{
3688 struct trace_iterator *iter = m->private;
983f938a
SRRH
3689 struct trace_array *tr = iter->tr;
3690 unsigned long trace_flags = tr->trace_flags;
62b915f1 3691
f56e7f8e
JO
3692 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3693 return;
3694
62b915f1
JO
3695 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3696 /* print nothing if the buffers are empty */
3697 if (trace_empty(iter))
3698 return;
3699 print_trace_header(m, iter);
3700 if (!(trace_flags & TRACE_ITER_VERBOSE))
3701 print_lat_help_header(m);
3702 } else {
77271ce4
SR
3703 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3704 if (trace_flags & TRACE_ITER_IRQ_INFO)
441dae8f
JF
3705 print_func_help_header_irq(iter->trace_buffer,
3706 m, trace_flags);
77271ce4 3707 else
441dae8f
JF
3708 print_func_help_header(iter->trace_buffer, m,
3709 trace_flags);
77271ce4 3710 }
62b915f1
JO
3711 }
3712}
3713
e0a413f6
SR
3714static void test_ftrace_alive(struct seq_file *m)
3715{
3716 if (!ftrace_is_dead())
3717 return;
d79ac28f
RV
3718 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3719 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3720}
3721
d8741e2e 3722#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3723static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3724{
d79ac28f
RV
3725 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3726 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3727 "# Takes a snapshot of the main buffer.\n"
3728 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3729 "# (Doesn't have to be '2' works with any number that\n"
3730 "# is not a '0' or '1')\n");
d8741e2e 3731}
f1affcaa
SRRH
3732
3733static void show_snapshot_percpu_help(struct seq_file *m)
3734{
fa6f0cc7 3735 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3736#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3737 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3738 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3739#else
d79ac28f
RV
3740 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3741 "# Must use main snapshot file to allocate.\n");
f1affcaa 3742#endif
d79ac28f
RV
3743 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3744 "# (Doesn't have to be '2' works with any number that\n"
3745 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3746}
3747
d8741e2e
SRRH
3748static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3749{
45ad21ca 3750 if (iter->tr->allocated_snapshot)
fa6f0cc7 3751 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3752 else
fa6f0cc7 3753 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3754
fa6f0cc7 3755 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3756 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3757 show_snapshot_main_help(m);
3758 else
3759 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3760}
3761#else
3762/* Should never be called */
3763static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3764#endif
3765
bc0c38d1
SR
3766static int s_show(struct seq_file *m, void *v)
3767{
3768 struct trace_iterator *iter = v;
a63ce5b3 3769 int ret;
bc0c38d1
SR
3770
3771 if (iter->ent == NULL) {
3772 if (iter->tr) {
3773 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3774 seq_puts(m, "#\n");
e0a413f6 3775 test_ftrace_alive(m);
bc0c38d1 3776 }
d8741e2e
SRRH
3777 if (iter->snapshot && trace_empty(iter))
3778 print_snapshot_help(m, iter);
3779 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3780 iter->trace->print_header(m);
62b915f1
JO
3781 else
3782 trace_default_header(m);
3783
a63ce5b3
SR
3784 } else if (iter->leftover) {
3785 /*
3786 * If we filled the seq_file buffer earlier, we
3787 * want to just show it now.
3788 */
3789 ret = trace_print_seq(m, &iter->seq);
3790
3791 /* ret should this time be zero, but you never know */
3792 iter->leftover = ret;
3793
bc0c38d1 3794 } else {
f9896bf3 3795 print_trace_line(iter);
a63ce5b3
SR
3796 ret = trace_print_seq(m, &iter->seq);
3797 /*
3798 * If we overflow the seq_file buffer, then it will
3799 * ask us for this data again at start up.
3800 * Use that instead.
3801 * ret is 0 if seq_file write succeeded.
3802 * -1 otherwise.
3803 */
3804 iter->leftover = ret;
bc0c38d1
SR
3805 }
3806
3807 return 0;
3808}
3809
649e9c70
ON
3810/*
3811 * Should be used after trace_array_get(), trace_types_lock
3812 * ensures that i_cdev was already initialized.
3813 */
3814static inline int tracing_get_cpu(struct inode *inode)
3815{
3816 if (inode->i_cdev) /* See trace_create_cpu_file() */
3817 return (long)inode->i_cdev - 1;
3818 return RING_BUFFER_ALL_CPUS;
3819}
3820
88e9d34c 3821static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3822 .start = s_start,
3823 .next = s_next,
3824 .stop = s_stop,
3825 .show = s_show,
bc0c38d1
SR
3826};
3827
e309b41d 3828static struct trace_iterator *
6484c71c 3829__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3830{
6484c71c 3831 struct trace_array *tr = inode->i_private;
bc0c38d1 3832 struct trace_iterator *iter;
50e18b94 3833 int cpu;
bc0c38d1 3834
85a2f9b4
SR
3835 if (tracing_disabled)
3836 return ERR_PTR(-ENODEV);
60a11774 3837
50e18b94 3838 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3839 if (!iter)
3840 return ERR_PTR(-ENOMEM);
bc0c38d1 3841
72917235 3842 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3843 GFP_KERNEL);
93574fcc
DC
3844 if (!iter->buffer_iter)
3845 goto release;
3846
d7350c3f
FW
3847 /*
3848 * We make a copy of the current tracer to avoid concurrent
3849 * changes on it while we are reading.
3850 */
bc0c38d1 3851 mutex_lock(&trace_types_lock);
d7350c3f 3852 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3853 if (!iter->trace)
d7350c3f 3854 goto fail;
85a2f9b4 3855
2b6080f2 3856 *iter->trace = *tr->current_trace;
d7350c3f 3857
79f55997 3858 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3859 goto fail;
3860
12883efb
SRRH
3861 iter->tr = tr;
3862
3863#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3864 /* Currently only the top directory has a snapshot */
3865 if (tr->current_trace->print_max || snapshot)
12883efb 3866 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3867 else
12883efb
SRRH
3868#endif
3869 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3870 iter->snapshot = snapshot;
bc0c38d1 3871 iter->pos = -1;
6484c71c 3872 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3873 mutex_init(&iter->mutex);
bc0c38d1 3874
8bba1bf5
MM
3875 /* Notify the tracer early; before we stop tracing. */
3876 if (iter->trace && iter->trace->open)
a93751ca 3877 iter->trace->open(iter);
8bba1bf5 3878
12ef7d44 3879 /* Annotate start of buffers if we had overruns */
12883efb 3880 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3881 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3882
8be0709f 3883 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3884 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3885 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3886
debdd57f
HT
3887 /* stop the trace while dumping if we are not opening "snapshot" */
3888 if (!iter->snapshot)
2b6080f2 3889 tracing_stop_tr(tr);
2f26ebd5 3890
ae3b5093 3891 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3892 for_each_tracing_cpu(cpu) {
b04cc6b1 3893 iter->buffer_iter[cpu] =
12883efb 3894 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3895 }
3896 ring_buffer_read_prepare_sync();
3897 for_each_tracing_cpu(cpu) {
3898 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3899 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3900 }
3901 } else {
3902 cpu = iter->cpu_file;
3928a8a2 3903 iter->buffer_iter[cpu] =
12883efb 3904 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3905 ring_buffer_read_prepare_sync();
3906 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3907 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3908 }
3909
bc0c38d1
SR
3910 mutex_unlock(&trace_types_lock);
3911
bc0c38d1 3912 return iter;
3928a8a2 3913
d7350c3f 3914 fail:
3928a8a2 3915 mutex_unlock(&trace_types_lock);
d7350c3f 3916 kfree(iter->trace);
6d158a81 3917 kfree(iter->buffer_iter);
93574fcc 3918release:
50e18b94
JO
3919 seq_release_private(inode, file);
3920 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3921}
3922
3923int tracing_open_generic(struct inode *inode, struct file *filp)
3924{
60a11774
SR
3925 if (tracing_disabled)
3926 return -ENODEV;
3927
bc0c38d1
SR
3928 filp->private_data = inode->i_private;
3929 return 0;
3930}
3931
2e86421d
GB
3932bool tracing_is_disabled(void)
3933{
3934 return (tracing_disabled) ? true: false;
3935}
3936
7b85af63
SRRH
3937/*
3938 * Open and update trace_array ref count.
3939 * Must have the current trace_array passed to it.
3940 */
dcc30223 3941static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3942{
3943 struct trace_array *tr = inode->i_private;
3944
3945 if (tracing_disabled)
3946 return -ENODEV;
3947
3948 if (trace_array_get(tr) < 0)
3949 return -ENODEV;
3950
3951 filp->private_data = inode->i_private;
3952
3953 return 0;
7b85af63
SRRH
3954}
3955
4fd27358 3956static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3957{
6484c71c 3958 struct trace_array *tr = inode->i_private;
907f2784 3959 struct seq_file *m = file->private_data;
4acd4d00 3960 struct trace_iterator *iter;
3928a8a2 3961 int cpu;
bc0c38d1 3962
ff451961 3963 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3964 trace_array_put(tr);
4acd4d00 3965 return 0;
ff451961 3966 }
4acd4d00 3967
6484c71c 3968 /* Writes do not use seq_file */
4acd4d00 3969 iter = m->private;
bc0c38d1 3970 mutex_lock(&trace_types_lock);
a695cb58 3971
3928a8a2
SR
3972 for_each_tracing_cpu(cpu) {
3973 if (iter->buffer_iter[cpu])
3974 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3975 }
3976
bc0c38d1
SR
3977 if (iter->trace && iter->trace->close)
3978 iter->trace->close(iter);
3979
debdd57f
HT
3980 if (!iter->snapshot)
3981 /* reenable tracing if it was previously enabled */
2b6080f2 3982 tracing_start_tr(tr);
f77d09a3
AL
3983
3984 __trace_array_put(tr);
3985
bc0c38d1
SR
3986 mutex_unlock(&trace_types_lock);
3987
d7350c3f 3988 mutex_destroy(&iter->mutex);
b0dfa978 3989 free_cpumask_var(iter->started);
d7350c3f 3990 kfree(iter->trace);
6d158a81 3991 kfree(iter->buffer_iter);
50e18b94 3992 seq_release_private(inode, file);
ff451961 3993
bc0c38d1
SR
3994 return 0;
3995}
3996
7b85af63
SRRH
3997static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3998{
3999 struct trace_array *tr = inode->i_private;
4000
4001 trace_array_put(tr);
bc0c38d1
SR
4002 return 0;
4003}
4004
7b85af63
SRRH
4005static int tracing_single_release_tr(struct inode *inode, struct file *file)
4006{
4007 struct trace_array *tr = inode->i_private;
4008
4009 trace_array_put(tr);
4010
4011 return single_release(inode, file);
4012}
4013
bc0c38d1
SR
4014static int tracing_open(struct inode *inode, struct file *file)
4015{
6484c71c 4016 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
4017 struct trace_iterator *iter;
4018 int ret = 0;
bc0c38d1 4019
ff451961
SRRH
4020 if (trace_array_get(tr) < 0)
4021 return -ENODEV;
4022
4acd4d00 4023 /* If this file was open for write, then erase contents */
6484c71c
ON
4024 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4025 int cpu = tracing_get_cpu(inode);
8dd33bcb
BY
4026 struct trace_buffer *trace_buf = &tr->trace_buffer;
4027
4028#ifdef CONFIG_TRACER_MAX_TRACE
4029 if (tr->current_trace->print_max)
4030 trace_buf = &tr->max_buffer;
4031#endif
6484c71c
ON
4032
4033 if (cpu == RING_BUFFER_ALL_CPUS)
8dd33bcb 4034 tracing_reset_online_cpus(trace_buf);
4acd4d00 4035 else
8dd33bcb 4036 tracing_reset(trace_buf, cpu);
4acd4d00 4037 }
bc0c38d1 4038
4acd4d00 4039 if (file->f_mode & FMODE_READ) {
6484c71c 4040 iter = __tracing_open(inode, file, false);
4acd4d00
SR
4041 if (IS_ERR(iter))
4042 ret = PTR_ERR(iter);
983f938a 4043 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
4044 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4045 }
ff451961
SRRH
4046
4047 if (ret < 0)
4048 trace_array_put(tr);
4049
bc0c38d1
SR
4050 return ret;
4051}
4052
607e2ea1
SRRH
4053/*
4054 * Some tracers are not suitable for instance buffers.
4055 * A tracer is always available for the global array (toplevel)
4056 * or if it explicitly states that it is.
4057 */
4058static bool
4059trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4060{
4061 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4062}
4063
4064/* Find the next tracer that this trace array may use */
4065static struct tracer *
4066get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4067{
4068 while (t && !trace_ok_for_array(t, tr))
4069 t = t->next;
4070
4071 return t;
4072}
4073
e309b41d 4074static void *
bc0c38d1
SR
4075t_next(struct seq_file *m, void *v, loff_t *pos)
4076{
607e2ea1 4077 struct trace_array *tr = m->private;
f129e965 4078 struct tracer *t = v;
bc0c38d1
SR
4079
4080 (*pos)++;
4081
4082 if (t)
607e2ea1 4083 t = get_tracer_for_array(tr, t->next);
bc0c38d1 4084
bc0c38d1
SR
4085 return t;
4086}
4087
4088static void *t_start(struct seq_file *m, loff_t *pos)
4089{
607e2ea1 4090 struct trace_array *tr = m->private;
f129e965 4091 struct tracer *t;
bc0c38d1
SR
4092 loff_t l = 0;
4093
4094 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
4095
4096 t = get_tracer_for_array(tr, trace_types);
4097 for (; t && l < *pos; t = t_next(m, t, &l))
4098 ;
bc0c38d1
SR
4099
4100 return t;
4101}
4102
4103static void t_stop(struct seq_file *m, void *p)
4104{
4105 mutex_unlock(&trace_types_lock);
4106}
4107
4108static int t_show(struct seq_file *m, void *v)
4109{
4110 struct tracer *t = v;
4111
4112 if (!t)
4113 return 0;
4114
fa6f0cc7 4115 seq_puts(m, t->name);
bc0c38d1
SR
4116 if (t->next)
4117 seq_putc(m, ' ');
4118 else
4119 seq_putc(m, '\n');
4120
4121 return 0;
4122}
4123
88e9d34c 4124static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
4125 .start = t_start,
4126 .next = t_next,
4127 .stop = t_stop,
4128 .show = t_show,
bc0c38d1
SR
4129};
4130
4131static int show_traces_open(struct inode *inode, struct file *file)
4132{
607e2ea1
SRRH
4133 struct trace_array *tr = inode->i_private;
4134 struct seq_file *m;
4135 int ret;
4136
60a11774
SR
4137 if (tracing_disabled)
4138 return -ENODEV;
4139
607e2ea1
SRRH
4140 ret = seq_open(file, &show_traces_seq_ops);
4141 if (ret)
4142 return ret;
4143
4144 m = file->private_data;
4145 m->private = tr;
4146
4147 return 0;
bc0c38d1
SR
4148}
4149
4acd4d00
SR
4150static ssize_t
4151tracing_write_stub(struct file *filp, const char __user *ubuf,
4152 size_t count, loff_t *ppos)
4153{
4154 return count;
4155}
4156
098c879e 4157loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4158{
098c879e
SRRH
4159 int ret;
4160
364829b1 4161 if (file->f_mode & FMODE_READ)
098c879e 4162 ret = seq_lseek(file, offset, whence);
364829b1 4163 else
098c879e
SRRH
4164 file->f_pos = ret = 0;
4165
4166 return ret;
364829b1
SP
4167}
4168
5e2336a0 4169static const struct file_operations tracing_fops = {
4bf39a94
IM
4170 .open = tracing_open,
4171 .read = seq_read,
4acd4d00 4172 .write = tracing_write_stub,
098c879e 4173 .llseek = tracing_lseek,
4bf39a94 4174 .release = tracing_release,
bc0c38d1
SR
4175};
4176
5e2336a0 4177static const struct file_operations show_traces_fops = {
c7078de1
IM
4178 .open = show_traces_open,
4179 .read = seq_read,
4180 .release = seq_release,
b444786f 4181 .llseek = seq_lseek,
c7078de1
IM
4182};
4183
4184static ssize_t
4185tracing_cpumask_read(struct file *filp, char __user *ubuf,
4186 size_t count, loff_t *ppos)
4187{
ccfe9e42 4188 struct trace_array *tr = file_inode(filp)->i_private;
90e406f9 4189 char *mask_str;
36dfe925 4190 int len;
c7078de1 4191
90e406f9
CD
4192 len = snprintf(NULL, 0, "%*pb\n",
4193 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4194 mask_str = kmalloc(len, GFP_KERNEL);
4195 if (!mask_str)
4196 return -ENOMEM;
36dfe925 4197
90e406f9 4198 len = snprintf(mask_str, len, "%*pb\n",
1a40243b
TH
4199 cpumask_pr_args(tr->tracing_cpumask));
4200 if (len >= count) {
36dfe925
IM
4201 count = -EINVAL;
4202 goto out_err;
4203 }
90e406f9 4204 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
36dfe925
IM
4205
4206out_err:
90e406f9 4207 kfree(mask_str);
c7078de1
IM
4208
4209 return count;
4210}
4211
4212static ssize_t
4213tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4214 size_t count, loff_t *ppos)
4215{
ccfe9e42 4216 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4217 cpumask_var_t tracing_cpumask_new;
2b6080f2 4218 int err, cpu;
9e01c1b7
RR
4219
4220 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4221 return -ENOMEM;
c7078de1 4222
9e01c1b7 4223 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4224 if (err)
36dfe925
IM
4225 goto err_unlock;
4226
a5e25883 4227 local_irq_disable();
0b9b12c1 4228 arch_spin_lock(&tr->max_lock);
ab46428c 4229 for_each_tracing_cpu(cpu) {
36dfe925
IM
4230 /*
4231 * Increase/decrease the disabled counter if we are
4232 * about to flip a bit in the cpumask:
4233 */
ccfe9e42 4234 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4235 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4236 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4237 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4238 }
ccfe9e42 4239 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4240 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4241 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4242 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4243 }
4244 }
0b9b12c1 4245 arch_spin_unlock(&tr->max_lock);
a5e25883 4246 local_irq_enable();
36dfe925 4247
ccfe9e42 4248 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
9e01c1b7 4249 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4250
4251 return count;
36dfe925
IM
4252
4253err_unlock:
215368e8 4254 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4255
4256 return err;
c7078de1
IM
4257}
4258
5e2336a0 4259static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4260 .open = tracing_open_generic_tr,
c7078de1
IM
4261 .read = tracing_cpumask_read,
4262 .write = tracing_cpumask_write,
ccfe9e42 4263 .release = tracing_release_generic_tr,
b444786f 4264 .llseek = generic_file_llseek,
bc0c38d1
SR
4265};
4266
fdb372ed 4267static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4268{
d8e83d26 4269 struct tracer_opt *trace_opts;
2b6080f2 4270 struct trace_array *tr = m->private;
d8e83d26 4271 u32 tracer_flags;
d8e83d26 4272 int i;
adf9f195 4273
d8e83d26 4274 mutex_lock(&trace_types_lock);
2b6080f2
SR
4275 tracer_flags = tr->current_trace->flags->val;
4276 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4277
bc0c38d1 4278 for (i = 0; trace_options[i]; i++) {
983f938a 4279 if (tr->trace_flags & (1 << i))
fdb372ed 4280 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4281 else
fdb372ed 4282 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4283 }
4284
adf9f195
FW
4285 for (i = 0; trace_opts[i].name; i++) {
4286 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4287 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4288 else
fdb372ed 4289 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4290 }
d8e83d26 4291 mutex_unlock(&trace_types_lock);
adf9f195 4292
fdb372ed 4293 return 0;
bc0c38d1 4294}
bc0c38d1 4295
8c1a49ae 4296static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4297 struct tracer_flags *tracer_flags,
4298 struct tracer_opt *opts, int neg)
4299{
d39cdd20 4300 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4301 int ret;
bc0c38d1 4302
8c1a49ae 4303 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4304 if (ret)
4305 return ret;
4306
4307 if (neg)
4308 tracer_flags->val &= ~opts->bit;
4309 else
4310 tracer_flags->val |= opts->bit;
4311 return 0;
bc0c38d1
SR
4312}
4313
adf9f195 4314/* Try to assign a tracer specific option */
8c1a49ae 4315static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4316{
8c1a49ae 4317 struct tracer *trace = tr->current_trace;
7770841e 4318 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4319 struct tracer_opt *opts = NULL;
8d18eaaf 4320 int i;
adf9f195 4321
7770841e
Z
4322 for (i = 0; tracer_flags->opts[i].name; i++) {
4323 opts = &tracer_flags->opts[i];
adf9f195 4324
8d18eaaf 4325 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4326 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4327 }
adf9f195 4328
8d18eaaf 4329 return -EINVAL;
adf9f195
FW
4330}
4331
613f04a0
SRRH
4332/* Some tracers require overwrite to stay enabled */
4333int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4334{
4335 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4336 return -1;
4337
4338 return 0;
4339}
4340
2b6080f2 4341int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4342{
4343 /* do nothing if flag is already set */
983f938a 4344 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4345 return 0;
4346
4347 /* Give the tracer a chance to approve the change */
2b6080f2 4348 if (tr->current_trace->flag_changed)
bf6065b5 4349 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4350 return -EINVAL;
af4617bd
SR
4351
4352 if (enabled)
983f938a 4353 tr->trace_flags |= mask;
af4617bd 4354 else
983f938a 4355 tr->trace_flags &= ~mask;
e870e9a1
LZ
4356
4357 if (mask == TRACE_ITER_RECORD_CMD)
4358 trace_event_enable_cmd_record(enabled);
750912fa 4359
d914ba37
JF
4360 if (mask == TRACE_ITER_RECORD_TGID) {
4361 if (!tgid_map)
4362 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4363 GFP_KERNEL);
4364 if (!tgid_map) {
4365 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4366 return -ENOMEM;
4367 }
4368
4369 trace_event_enable_tgid_record(enabled);
4370 }
4371
c37775d5
SR
4372 if (mask == TRACE_ITER_EVENT_FORK)
4373 trace_event_follow_fork(tr, enabled);
4374
1e10486f
NK
4375 if (mask == TRACE_ITER_FUNC_FORK)
4376 ftrace_pid_follow_fork(tr, enabled);
4377
80902822 4378 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4379 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4380#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4381 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4382#endif
4383 }
81698831 4384
b9f9108c 4385 if (mask == TRACE_ITER_PRINTK) {
81698831 4386 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4387 trace_printk_control(enabled);
4388 }
613f04a0
SRRH
4389
4390 return 0;
af4617bd
SR
4391}
4392
2b6080f2 4393static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4394{
8d18eaaf 4395 char *cmp;
bc0c38d1 4396 int neg = 0;
613f04a0 4397 int ret = -ENODEV;
bc0c38d1 4398 int i;
a4d1e688 4399 size_t orig_len = strlen(option);
bc0c38d1 4400
7bcfaf54 4401 cmp = strstrip(option);
bc0c38d1 4402
8d18eaaf 4403 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
4404 neg = 1;
4405 cmp += 2;
4406 }
4407
69d34da2
SRRH
4408 mutex_lock(&trace_types_lock);
4409
bc0c38d1 4410 for (i = 0; trace_options[i]; i++) {
8d18eaaf 4411 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 4412 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
4413 break;
4414 }
4415 }
adf9f195
FW
4416
4417 /* If no option could be set, test the specific tracer options */
69d34da2 4418 if (!trace_options[i])
8c1a49ae 4419 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
4420
4421 mutex_unlock(&trace_types_lock);
bc0c38d1 4422
a4d1e688
JW
4423 /*
4424 * If the first trailing whitespace is replaced with '\0' by strstrip,
4425 * turn it back into a space.
4426 */
4427 if (orig_len > strlen(option))
4428 option[strlen(option)] = ' ';
4429
7bcfaf54
SR
4430 return ret;
4431}
4432
a4d1e688
JW
4433static void __init apply_trace_boot_options(void)
4434{
4435 char *buf = trace_boot_options_buf;
4436 char *option;
4437
4438 while (true) {
4439 option = strsep(&buf, ",");
4440
4441 if (!option)
4442 break;
a4d1e688 4443
43ed3843
SRRH
4444 if (*option)
4445 trace_set_options(&global_trace, option);
a4d1e688
JW
4446
4447 /* Put back the comma to allow this to be called again */
4448 if (buf)
4449 *(buf - 1) = ',';
4450 }
4451}
4452
7bcfaf54
SR
4453static ssize_t
4454tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4455 size_t cnt, loff_t *ppos)
4456{
2b6080f2
SR
4457 struct seq_file *m = filp->private_data;
4458 struct trace_array *tr = m->private;
7bcfaf54 4459 char buf[64];
613f04a0 4460 int ret;
7bcfaf54
SR
4461
4462 if (cnt >= sizeof(buf))
4463 return -EINVAL;
4464
4afe6495 4465 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4466 return -EFAULT;
4467
a8dd2176
SR
4468 buf[cnt] = 0;
4469
2b6080f2 4470 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4471 if (ret < 0)
4472 return ret;
7bcfaf54 4473
cf8517cf 4474 *ppos += cnt;
bc0c38d1
SR
4475
4476 return cnt;
4477}
4478
fdb372ed
LZ
4479static int tracing_trace_options_open(struct inode *inode, struct file *file)
4480{
7b85af63 4481 struct trace_array *tr = inode->i_private;
f77d09a3 4482 int ret;
7b85af63 4483
fdb372ed
LZ
4484 if (tracing_disabled)
4485 return -ENODEV;
2b6080f2 4486
7b85af63
SRRH
4487 if (trace_array_get(tr) < 0)
4488 return -ENODEV;
4489
f77d09a3
AL
4490 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4491 if (ret < 0)
4492 trace_array_put(tr);
4493
4494 return ret;
fdb372ed
LZ
4495}
4496
5e2336a0 4497static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4498 .open = tracing_trace_options_open,
4499 .read = seq_read,
4500 .llseek = seq_lseek,
7b85af63 4501 .release = tracing_single_release_tr,
ee6bce52 4502 .write = tracing_trace_options_write,
bc0c38d1
SR
4503};
4504
7bd2f24c
IM
4505static const char readme_msg[] =
4506 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4507 "# echo 0 > tracing_on : quick way to disable tracing\n"
4508 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4509 " Important files:\n"
4510 " trace\t\t\t- The static contents of the buffer\n"
4511 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4512 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4513 " current_tracer\t- function and latency tracers\n"
4514 " available_tracers\t- list of configured tracers for current_tracer\n"
4515 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4516 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4517 " trace_clock\t\t-change the clock used to order events\n"
4518 " local: Per cpu clock but may not be synced across CPUs\n"
4519 " global: Synced across CPUs but slows tracing down.\n"
4520 " counter: Not a clock, but just an increment\n"
4521 " uptime: Jiffy counter from time of boot\n"
4522 " perf: Same clock that perf events use\n"
4523#ifdef CONFIG_X86_64
4524 " x86-tsc: TSC cycle counter\n"
4525#endif
2c1ea60b
TZ
4526 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4527 " delta: Delta difference against a buffer-wide timestamp\n"
4528 " absolute: Absolute (standalone) timestamp\n"
22f45649 4529 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4530 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4531 " tracing_cpumask\t- Limit which CPUs to trace\n"
4532 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4533 "\t\t\t Remove sub-buffer with rmdir\n"
4534 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4535 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4536 "\t\t\t option name\n"
939c7a4f 4537 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4538#ifdef CONFIG_DYNAMIC_FTRACE
4539 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4540 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4541 "\t\t\t functions\n"
60f1d5e3 4542 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4543 "\t modules: Can select a group via module\n"
4544 "\t Format: :mod:<module-name>\n"
4545 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4546 "\t triggers: a command to perform when function is hit\n"
4547 "\t Format: <function>:<trigger>[:count]\n"
4548 "\t trigger: traceon, traceoff\n"
4549 "\t\t enable_event:<system>:<event>\n"
4550 "\t\t disable_event:<system>:<event>\n"
22f45649 4551#ifdef CONFIG_STACKTRACE
71485c45 4552 "\t\t stacktrace\n"
22f45649
SRRH
4553#endif
4554#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4555 "\t\t snapshot\n"
22f45649 4556#endif
17a280ea
SRRH
4557 "\t\t dump\n"
4558 "\t\t cpudump\n"
71485c45
SRRH
4559 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4560 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4561 "\t The first one will disable tracing every time do_fault is hit\n"
4562 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4563 "\t The first time do trap is hit and it disables tracing, the\n"
4564 "\t counter will decrement to 2. If tracing is already disabled,\n"
4565 "\t the counter will not decrement. It only decrements when the\n"
4566 "\t trigger did work\n"
4567 "\t To remove trigger without count:\n"
4568 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4569 "\t To remove trigger with a count:\n"
4570 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4571 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4572 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4573 "\t modules: Can select a group via module command :mod:\n"
4574 "\t Does not accept triggers\n"
22f45649
SRRH
4575#endif /* CONFIG_DYNAMIC_FTRACE */
4576#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4577 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4578 "\t\t (function)\n"
22f45649
SRRH
4579#endif
4580#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4581 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4582 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4583 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4584#endif
4585#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4586 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4587 "\t\t\t snapshot buffer. Read the contents for more\n"
4588 "\t\t\t information\n"
22f45649 4589#endif
991821c8 4590#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4591 " stack_trace\t\t- Shows the max stack trace when active\n"
4592 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4593 "\t\t\t Write into this file to reset the max size (trigger a\n"
4594 "\t\t\t new trace)\n"
22f45649 4595#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4596 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4597 "\t\t\t traces\n"
22f45649 4598#endif
991821c8 4599#endif /* CONFIG_STACK_TRACER */
6b0b7551 4600#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4601 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4602 "\t\t\t Write into this file to define/undefine new trace events.\n"
4603#endif
6b0b7551 4604#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4605 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4606 "\t\t\t Write into this file to define/undefine new trace events.\n"
4607#endif
6b0b7551 4608#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625 4609 "\t accepts: event-definitions (one definition per line)\n"
c3ca46ef
MH
4610 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4611 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
86425625 4612 "\t -:[<group>/]<event>\n"
6b0b7551 4613#ifdef CONFIG_KPROBE_EVENTS
86425625 4614 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4615 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4616#endif
6b0b7551 4617#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4618 "\t place: <path>:<offset>\n"
4619#endif
4620 "\t args: <name>=fetcharg[:type]\n"
4621 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4622 "\t $stack<index>, $stack, $retval, $comm\n"
4623 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4624 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4625#endif
26f25564
TZ
4626 " events/\t\t- Directory containing all trace event subsystems:\n"
4627 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4628 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4629 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4630 "\t\t\t events\n"
26f25564 4631 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4632 " events/<system>/<event>/\t- Directory containing control files for\n"
4633 "\t\t\t <event>:\n"
26f25564
TZ
4634 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4635 " filter\t\t- If set, only events passing filter are traced\n"
4636 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4637 "\t Format: <trigger>[:count][if <filter>]\n"
4638 "\t trigger: traceon, traceoff\n"
4639 "\t enable_event:<system>:<event>\n"
4640 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4641#ifdef CONFIG_HIST_TRIGGERS
4642 "\t enable_hist:<system>:<event>\n"
4643 "\t disable_hist:<system>:<event>\n"
4644#endif
26f25564 4645#ifdef CONFIG_STACKTRACE
71485c45 4646 "\t\t stacktrace\n"
26f25564
TZ
4647#endif
4648#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4649 "\t\t snapshot\n"
7ef224d1
TZ
4650#endif
4651#ifdef CONFIG_HIST_TRIGGERS
4652 "\t\t hist (see below)\n"
26f25564 4653#endif
71485c45
SRRH
4654 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4655 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4656 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4657 "\t events/block/block_unplug/trigger\n"
4658 "\t The first disables tracing every time block_unplug is hit.\n"
4659 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4660 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4661 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4662 "\t Like function triggers, the counter is only decremented if it\n"
4663 "\t enabled or disabled tracing.\n"
4664 "\t To remove a trigger without a count:\n"
4665 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4666 "\t To remove a trigger with a count:\n"
4667 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4668 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4669#ifdef CONFIG_HIST_TRIGGERS
4670 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4671 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4672 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4673 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4674 "\t [:size=#entries]\n"
e86ae9ba 4675 "\t [:pause][:continue][:clear]\n"
5463bfda 4676 "\t [:name=histname1]\n"
7ef224d1
TZ
4677 "\t [if <filter>]\n\n"
4678 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4679 "\t table using the key(s) and value(s) named, and the value of a\n"
4680 "\t sum called 'hitcount' is incremented. Keys and values\n"
4681 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4682 "\t can be any field, or the special string 'stacktrace'.\n"
4683 "\t Compound keys consisting of up to two fields can be specified\n"
4684 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4685 "\t fields. Sort keys consisting of up to two fields can be\n"
4686 "\t specified using the 'sort' keyword. The sort direction can\n"
4687 "\t be modified by appending '.descending' or '.ascending' to a\n"
4688 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4689 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4690 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4691 "\t its histogram data will be shared with other triggers of the\n"
4692 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4693 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4694 "\t table in its entirety to stdout. If there are multiple hist\n"
4695 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4696 "\t trigger in the output. The table displayed for a named\n"
4697 "\t trigger will be the same as any other instance having the\n"
4698 "\t same name. The default format used to display a given field\n"
4699 "\t can be modified by appending any of the following modifiers\n"
4700 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4701 "\t .hex display a number as a hex value\n"
4702 "\t .sym display an address as a symbol\n"
6b4827ad 4703 "\t .sym-offset display an address as a symbol and offset\n"
31696198 4704 "\t .execname display a common_pid as a program name\n"
860f9f6b
TZ
4705 "\t .syscall display a syscall id as a syscall name\n"
4706 "\t .log2 display log2 value rather than raw number\n"
4707 "\t .usecs display a common_timestamp in microseconds\n\n"
83e99914
TZ
4708 "\t The 'pause' parameter can be used to pause an existing hist\n"
4709 "\t trigger or to start a hist trigger but not log any events\n"
4710 "\t until told to do so. 'continue' can be used to start or\n"
4711 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4712 "\t The 'clear' parameter will clear the contents of a running\n"
4713 "\t hist trigger and leave its current paused/active state\n"
4714 "\t unchanged.\n\n"
d0bad49b
TZ
4715 "\t The enable_hist and disable_hist triggers can be used to\n"
4716 "\t have one event conditionally start and stop another event's\n"
4717 "\t already-attached hist trigger. The syntax is analagous to\n"
4718 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4719#endif
7bd2f24c
IM
4720;
4721
4722static ssize_t
4723tracing_readme_read(struct file *filp, char __user *ubuf,
4724 size_t cnt, loff_t *ppos)
4725{
4726 return simple_read_from_buffer(ubuf, cnt, ppos,
4727 readme_msg, strlen(readme_msg));
4728}
4729
5e2336a0 4730static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4731 .open = tracing_open_generic,
4732 .read = tracing_readme_read,
b444786f 4733 .llseek = generic_file_llseek,
7bd2f24c
IM
4734};
4735
99c621d7
MS
4736static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4737{
4738 int *ptr = v;
4739
4740 if (*pos || m->count)
4741 ptr++;
4742
4743 (*pos)++;
4744
4745 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4746 if (trace_find_tgid(*ptr))
4747 return ptr;
4748 }
4749
4750 return NULL;
4751}
4752
4753static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4754{
4755 void *v;
4756 loff_t l = 0;
4757
4758 if (!tgid_map)
4759 return NULL;
4760
4761 v = &tgid_map[0];
4762 while (l <= *pos) {
4763 v = saved_tgids_next(m, v, &l);
4764 if (!v)
4765 return NULL;
4766 }
4767
4768 return v;
4769}
4770
4771static void saved_tgids_stop(struct seq_file *m, void *v)
4772{
4773}
4774
4775static int saved_tgids_show(struct seq_file *m, void *v)
4776{
4777 int pid = (int *)v - tgid_map;
4778
4779 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4780 return 0;
4781}
4782
4783static const struct seq_operations tracing_saved_tgids_seq_ops = {
4784 .start = saved_tgids_start,
4785 .stop = saved_tgids_stop,
4786 .next = saved_tgids_next,
4787 .show = saved_tgids_show,
4788};
4789
4790static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4791{
4792 if (tracing_disabled)
4793 return -ENODEV;
4794
4795 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4796}
4797
4798
4799static const struct file_operations tracing_saved_tgids_fops = {
4800 .open = tracing_saved_tgids_open,
4801 .read = seq_read,
4802 .llseek = seq_lseek,
4803 .release = seq_release,
4804};
4805
42584c81
YY
4806static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4807{
4808 unsigned int *ptr = v;
69abe6a5 4809
42584c81
YY
4810 if (*pos || m->count)
4811 ptr++;
69abe6a5 4812
42584c81 4813 (*pos)++;
69abe6a5 4814
939c7a4f
YY
4815 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4816 ptr++) {
42584c81
YY
4817 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4818 continue;
69abe6a5 4819
42584c81
YY
4820 return ptr;
4821 }
69abe6a5 4822
42584c81
YY
4823 return NULL;
4824}
4825
4826static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4827{
4828 void *v;
4829 loff_t l = 0;
69abe6a5 4830
4c27e756
SRRH
4831 preempt_disable();
4832 arch_spin_lock(&trace_cmdline_lock);
4833
939c7a4f 4834 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4835 while (l <= *pos) {
4836 v = saved_cmdlines_next(m, v, &l);
4837 if (!v)
4838 return NULL;
69abe6a5
AP
4839 }
4840
42584c81
YY
4841 return v;
4842}
4843
4844static void saved_cmdlines_stop(struct seq_file *m, void *v)
4845{
4c27e756
SRRH
4846 arch_spin_unlock(&trace_cmdline_lock);
4847 preempt_enable();
42584c81 4848}
69abe6a5 4849
42584c81
YY
4850static int saved_cmdlines_show(struct seq_file *m, void *v)
4851{
4852 char buf[TASK_COMM_LEN];
4853 unsigned int *pid = v;
69abe6a5 4854
4c27e756 4855 __trace_find_cmdline(*pid, buf);
42584c81
YY
4856 seq_printf(m, "%d %s\n", *pid, buf);
4857 return 0;
4858}
4859
4860static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4861 .start = saved_cmdlines_start,
4862 .next = saved_cmdlines_next,
4863 .stop = saved_cmdlines_stop,
4864 .show = saved_cmdlines_show,
4865};
4866
4867static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4868{
4869 if (tracing_disabled)
4870 return -ENODEV;
4871
4872 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4873}
4874
4875static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4876 .open = tracing_saved_cmdlines_open,
4877 .read = seq_read,
4878 .llseek = seq_lseek,
4879 .release = seq_release,
69abe6a5
AP
4880};
4881
939c7a4f
YY
4882static ssize_t
4883tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4884 size_t cnt, loff_t *ppos)
4885{
4886 char buf[64];
4887 int r;
4888
4889 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4890 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4891 arch_spin_unlock(&trace_cmdline_lock);
4892
4893 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4894}
4895
4896static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4897{
4898 kfree(s->saved_cmdlines);
4899 kfree(s->map_cmdline_to_pid);
4900 kfree(s);
4901}
4902
4903static int tracing_resize_saved_cmdlines(unsigned int val)
4904{
4905 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4906
a6af8fbf 4907 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4908 if (!s)
4909 return -ENOMEM;
4910
4911 if (allocate_cmdlines_buffer(val, s) < 0) {
4912 kfree(s);
4913 return -ENOMEM;
4914 }
4915
4916 arch_spin_lock(&trace_cmdline_lock);
4917 savedcmd_temp = savedcmd;
4918 savedcmd = s;
4919 arch_spin_unlock(&trace_cmdline_lock);
4920 free_saved_cmdlines_buffer(savedcmd_temp);
4921
4922 return 0;
4923}
4924
4925static ssize_t
4926tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4927 size_t cnt, loff_t *ppos)
4928{
4929 unsigned long val;
4930 int ret;
4931
4932 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4933 if (ret)
4934 return ret;
4935
4936 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4937 if (!val || val > PID_MAX_DEFAULT)
4938 return -EINVAL;
4939
4940 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4941 if (ret < 0)
4942 return ret;
4943
4944 *ppos += cnt;
4945
4946 return cnt;
4947}
4948
4949static const struct file_operations tracing_saved_cmdlines_size_fops = {
4950 .open = tracing_open_generic,
4951 .read = tracing_saved_cmdlines_size_read,
4952 .write = tracing_saved_cmdlines_size_write,
4953};
4954
681bec03 4955#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 4956static union trace_eval_map_item *
f57a4143 4957update_eval_map(union trace_eval_map_item *ptr)
9828413d 4958{
00f4b652 4959 if (!ptr->map.eval_string) {
9828413d
SRRH
4960 if (ptr->tail.next) {
4961 ptr = ptr->tail.next;
4962 /* Set ptr to the next real item (skip head) */
4963 ptr++;
4964 } else
4965 return NULL;
4966 }
4967 return ptr;
4968}
4969
f57a4143 4970static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 4971{
23bf8cb8 4972 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4973
4974 /*
4975 * Paranoid! If ptr points to end, we don't want to increment past it.
4976 * This really should never happen.
4977 */
f57a4143 4978 ptr = update_eval_map(ptr);
9828413d
SRRH
4979 if (WARN_ON_ONCE(!ptr))
4980 return NULL;
4981
4982 ptr++;
4983
4984 (*pos)++;
4985
f57a4143 4986 ptr = update_eval_map(ptr);
9828413d
SRRH
4987
4988 return ptr;
4989}
4990
f57a4143 4991static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 4992{
23bf8cb8 4993 union trace_eval_map_item *v;
9828413d
SRRH
4994 loff_t l = 0;
4995
1793ed93 4996 mutex_lock(&trace_eval_mutex);
9828413d 4997
23bf8cb8 4998 v = trace_eval_maps;
9828413d
SRRH
4999 if (v)
5000 v++;
5001
5002 while (v && l < *pos) {
f57a4143 5003 v = eval_map_next(m, v, &l);
9828413d
SRRH
5004 }
5005
5006 return v;
5007}
5008
f57a4143 5009static void eval_map_stop(struct seq_file *m, void *v)
9828413d 5010{
1793ed93 5011 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5012}
5013
f57a4143 5014static int eval_map_show(struct seq_file *m, void *v)
9828413d 5015{
23bf8cb8 5016 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5017
5018 seq_printf(m, "%s %ld (%s)\n",
00f4b652 5019 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
5020 ptr->map.system);
5021
5022 return 0;
5023}
5024
f57a4143
JL
5025static const struct seq_operations tracing_eval_map_seq_ops = {
5026 .start = eval_map_start,
5027 .next = eval_map_next,
5028 .stop = eval_map_stop,
5029 .show = eval_map_show,
9828413d
SRRH
5030};
5031
f57a4143 5032static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
5033{
5034 if (tracing_disabled)
5035 return -ENODEV;
5036
f57a4143 5037 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
5038}
5039
f57a4143
JL
5040static const struct file_operations tracing_eval_map_fops = {
5041 .open = tracing_eval_map_open,
9828413d
SRRH
5042 .read = seq_read,
5043 .llseek = seq_lseek,
5044 .release = seq_release,
5045};
5046
23bf8cb8 5047static inline union trace_eval_map_item *
5f60b351 5048trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
5049{
5050 /* Return tail of array given the head */
5051 return ptr + ptr->head.length + 1;
5052}
5053
5054static void
f57a4143 5055trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
5056 int len)
5057{
00f4b652
JL
5058 struct trace_eval_map **stop;
5059 struct trace_eval_map **map;
23bf8cb8
JL
5060 union trace_eval_map_item *map_array;
5061 union trace_eval_map_item *ptr;
9828413d
SRRH
5062
5063 stop = start + len;
5064
5065 /*
23bf8cb8 5066 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
5067 * where the head holds the module and length of array, and the
5068 * tail holds a pointer to the next list.
5069 */
5070 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5071 if (!map_array) {
f57a4143 5072 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
5073 return;
5074 }
5075
1793ed93 5076 mutex_lock(&trace_eval_mutex);
9828413d 5077
23bf8cb8
JL
5078 if (!trace_eval_maps)
5079 trace_eval_maps = map_array;
9828413d 5080 else {
23bf8cb8 5081 ptr = trace_eval_maps;
9828413d 5082 for (;;) {
5f60b351 5083 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
5084 if (!ptr->tail.next)
5085 break;
5086 ptr = ptr->tail.next;
5087
5088 }
5089 ptr->tail.next = map_array;
5090 }
5091 map_array->head.mod = mod;
5092 map_array->head.length = len;
5093 map_array++;
5094
5095 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5096 map_array->map = **map;
5097 map_array++;
5098 }
5099 memset(map_array, 0, sizeof(*map_array));
5100
1793ed93 5101 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5102}
5103
f57a4143 5104static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 5105{
681bec03 5106 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 5107 NULL, &tracing_eval_map_fops);
9828413d
SRRH
5108}
5109
681bec03 5110#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
5111static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5112static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 5113 struct trace_eval_map **start, int len) { }
681bec03 5114#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 5115
f57a4143 5116static void trace_insert_eval_map(struct module *mod,
00f4b652 5117 struct trace_eval_map **start, int len)
0c564a53 5118{
00f4b652 5119 struct trace_eval_map **map;
0c564a53
SRRH
5120
5121 if (len <= 0)
5122 return;
5123
5124 map = start;
5125
f57a4143 5126 trace_event_eval_update(map, len);
9828413d 5127
f57a4143 5128 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
5129}
5130
bc0c38d1
SR
5131static ssize_t
5132tracing_set_trace_read(struct file *filp, char __user *ubuf,
5133 size_t cnt, loff_t *ppos)
5134{
2b6080f2 5135 struct trace_array *tr = filp->private_data;
ee6c2c1b 5136 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
5137 int r;
5138
5139 mutex_lock(&trace_types_lock);
2b6080f2 5140 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
5141 mutex_unlock(&trace_types_lock);
5142
4bf39a94 5143 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5144}
5145
b6f11df2
ACM
5146int tracer_init(struct tracer *t, struct trace_array *tr)
5147{
12883efb 5148 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
5149 return t->init(tr);
5150}
5151
12883efb 5152static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
5153{
5154 int cpu;
737223fb 5155
438ced17 5156 for_each_tracing_cpu(cpu)
12883efb 5157 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
5158}
5159
12883efb 5160#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 5161/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
5162static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5163 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
5164{
5165 int cpu, ret = 0;
5166
5167 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5168 for_each_tracing_cpu(cpu) {
12883efb
SRRH
5169 ret = ring_buffer_resize(trace_buf->buffer,
5170 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
5171 if (ret < 0)
5172 break;
12883efb
SRRH
5173 per_cpu_ptr(trace_buf->data, cpu)->entries =
5174 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
5175 }
5176 } else {
12883efb
SRRH
5177 ret = ring_buffer_resize(trace_buf->buffer,
5178 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 5179 if (ret == 0)
12883efb
SRRH
5180 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5181 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
5182 }
5183
5184 return ret;
5185}
12883efb 5186#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 5187
2b6080f2
SR
5188static int __tracing_resize_ring_buffer(struct trace_array *tr,
5189 unsigned long size, int cpu)
73c5162a
SR
5190{
5191 int ret;
5192
5193 /*
5194 * If kernel or user changes the size of the ring buffer
a123c52b
SR
5195 * we use the size that was given, and we can forget about
5196 * expanding it later.
73c5162a 5197 */
55034cd6 5198 ring_buffer_expanded = true;
73c5162a 5199
b382ede6 5200 /* May be called before buffers are initialized */
12883efb 5201 if (!tr->trace_buffer.buffer)
b382ede6
SR
5202 return 0;
5203
12883efb 5204 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
5205 if (ret < 0)
5206 return ret;
5207
12883efb 5208#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5209 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5210 !tr->current_trace->use_max_tr)
ef710e10
KM
5211 goto out;
5212
12883efb 5213 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5214 if (ret < 0) {
12883efb
SRRH
5215 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5216 &tr->trace_buffer, cpu);
73c5162a 5217 if (r < 0) {
a123c52b
SR
5218 /*
5219 * AARGH! We are left with different
5220 * size max buffer!!!!
5221 * The max buffer is our "snapshot" buffer.
5222 * When a tracer needs a snapshot (one of the
5223 * latency tracers), it swaps the max buffer
5224 * with the saved snap shot. We succeeded to
5225 * update the size of the main buffer, but failed to
5226 * update the size of the max buffer. But when we tried
5227 * to reset the main buffer to the original size, we
5228 * failed there too. This is very unlikely to
5229 * happen, but if it does, warn and kill all
5230 * tracing.
5231 */
73c5162a
SR
5232 WARN_ON(1);
5233 tracing_disabled = 1;
5234 }
5235 return ret;
5236 }
5237
438ced17 5238 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5239 set_buffer_entries(&tr->max_buffer, size);
438ced17 5240 else
12883efb 5241 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5242
ef710e10 5243 out:
12883efb
SRRH
5244#endif /* CONFIG_TRACER_MAX_TRACE */
5245
438ced17 5246 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5247 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5248 else
12883efb 5249 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5250
5251 return ret;
5252}
5253
2b6080f2
SR
5254static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5255 unsigned long size, int cpu_id)
4f271a2a 5256{
83f40318 5257 int ret = size;
4f271a2a
VN
5258
5259 mutex_lock(&trace_types_lock);
5260
438ced17
VN
5261 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5262 /* make sure, this cpu is enabled in the mask */
5263 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5264 ret = -EINVAL;
5265 goto out;
5266 }
5267 }
4f271a2a 5268
2b6080f2 5269 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5270 if (ret < 0)
5271 ret = -ENOMEM;
5272
438ced17 5273out:
4f271a2a
VN
5274 mutex_unlock(&trace_types_lock);
5275
5276 return ret;
5277}
5278
ef710e10 5279
1852fcce
SR
5280/**
5281 * tracing_update_buffers - used by tracing facility to expand ring buffers
5282 *
5283 * To save on memory when the tracing is never used on a system with it
5284 * configured in. The ring buffers are set to a minimum size. But once
5285 * a user starts to use the tracing facility, then they need to grow
5286 * to their default size.
5287 *
5288 * This function is to be called when a tracer is about to be used.
5289 */
5290int tracing_update_buffers(void)
5291{
5292 int ret = 0;
5293
1027fcb2 5294 mutex_lock(&trace_types_lock);
1852fcce 5295 if (!ring_buffer_expanded)
2b6080f2 5296 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5297 RING_BUFFER_ALL_CPUS);
1027fcb2 5298 mutex_unlock(&trace_types_lock);
1852fcce
SR
5299
5300 return ret;
5301}
5302
577b785f
SR
5303struct trace_option_dentry;
5304
37aea98b 5305static void
2b6080f2 5306create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5307
6b450d25
SRRH
5308/*
5309 * Used to clear out the tracer before deletion of an instance.
5310 * Must have trace_types_lock held.
5311 */
5312static void tracing_set_nop(struct trace_array *tr)
5313{
5314 if (tr->current_trace == &nop_trace)
5315 return;
5316
50512ab5 5317 tr->current_trace->enabled--;
6b450d25
SRRH
5318
5319 if (tr->current_trace->reset)
5320 tr->current_trace->reset(tr);
5321
5322 tr->current_trace = &nop_trace;
5323}
5324
41d9c0be 5325static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5326{
09d23a1d
SRRH
5327 /* Only enable if the directory has been created already. */
5328 if (!tr->dir)
5329 return;
5330
37aea98b 5331 create_trace_option_files(tr, t);
09d23a1d
SRRH
5332}
5333
5334static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5335{
bc0c38d1 5336 struct tracer *t;
12883efb 5337#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5338 bool had_max_tr;
12883efb 5339#endif
d9e54076 5340 int ret = 0;
bc0c38d1 5341
1027fcb2
SR
5342 mutex_lock(&trace_types_lock);
5343
73c5162a 5344 if (!ring_buffer_expanded) {
2b6080f2 5345 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5346 RING_BUFFER_ALL_CPUS);
73c5162a 5347 if (ret < 0)
59f586db 5348 goto out;
73c5162a
SR
5349 ret = 0;
5350 }
5351
bc0c38d1
SR
5352 for (t = trace_types; t; t = t->next) {
5353 if (strcmp(t->name, buf) == 0)
5354 break;
5355 }
c2931e05
FW
5356 if (!t) {
5357 ret = -EINVAL;
5358 goto out;
5359 }
2b6080f2 5360 if (t == tr->current_trace)
bc0c38d1
SR
5361 goto out;
5362
c7b3ae0b
ZSZ
5363 /* Some tracers won't work on kernel command line */
5364 if (system_state < SYSTEM_RUNNING && t->noboot) {
5365 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5366 t->name);
5367 goto out;
5368 }
5369
607e2ea1
SRRH
5370 /* Some tracers are only allowed for the top level buffer */
5371 if (!trace_ok_for_array(t, tr)) {
5372 ret = -EINVAL;
5373 goto out;
5374 }
5375
cf6ab6d9
SRRH
5376 /* If trace pipe files are being read, we can't change the tracer */
5377 if (tr->current_trace->ref) {
5378 ret = -EBUSY;
5379 goto out;
5380 }
5381
9f029e83 5382 trace_branch_disable();
613f04a0 5383
50512ab5 5384 tr->current_trace->enabled--;
613f04a0 5385
2b6080f2
SR
5386 if (tr->current_trace->reset)
5387 tr->current_trace->reset(tr);
34600f0e 5388
12883efb 5389 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 5390 tr->current_trace = &nop_trace;
34600f0e 5391
45ad21ca
SRRH
5392#ifdef CONFIG_TRACER_MAX_TRACE
5393 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5394
5395 if (had_max_tr && !t->use_max_tr) {
5396 /*
5397 * We need to make sure that the update_max_tr sees that
5398 * current_trace changed to nop_trace to keep it from
5399 * swapping the buffers after we resize it.
5400 * The update_max_tr is called from interrupts disabled
5401 * so a synchronized_sched() is sufficient.
5402 */
5403 synchronize_sched();
3209cff4 5404 free_snapshot(tr);
ef710e10 5405 }
12883efb 5406#endif
12883efb
SRRH
5407
5408#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5409 if (t->use_max_tr && !had_max_tr) {
3209cff4 5410 ret = alloc_snapshot(tr);
d60da506
HT
5411 if (ret < 0)
5412 goto out;
ef710e10 5413 }
12883efb 5414#endif
577b785f 5415
1c80025a 5416 if (t->init) {
b6f11df2 5417 ret = tracer_init(t, tr);
1c80025a
FW
5418 if (ret)
5419 goto out;
5420 }
bc0c38d1 5421
2b6080f2 5422 tr->current_trace = t;
50512ab5 5423 tr->current_trace->enabled++;
9f029e83 5424 trace_branch_enable(tr);
bc0c38d1
SR
5425 out:
5426 mutex_unlock(&trace_types_lock);
5427
d9e54076
PZ
5428 return ret;
5429}
5430
5431static ssize_t
5432tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5433 size_t cnt, loff_t *ppos)
5434{
607e2ea1 5435 struct trace_array *tr = filp->private_data;
ee6c2c1b 5436 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5437 int i;
5438 size_t ret;
e6e7a65a
FW
5439 int err;
5440
5441 ret = cnt;
d9e54076 5442
ee6c2c1b
LZ
5443 if (cnt > MAX_TRACER_SIZE)
5444 cnt = MAX_TRACER_SIZE;
d9e54076 5445
4afe6495 5446 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5447 return -EFAULT;
5448
5449 buf[cnt] = 0;
5450
5451 /* strip ending whitespace. */
5452 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5453 buf[i] = 0;
5454
607e2ea1 5455 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5456 if (err)
5457 return err;
d9e54076 5458
cf8517cf 5459 *ppos += ret;
bc0c38d1 5460
c2931e05 5461 return ret;
bc0c38d1
SR
5462}
5463
5464static ssize_t
6508fa76
SF
5465tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5466 size_t cnt, loff_t *ppos)
bc0c38d1 5467{
bc0c38d1
SR
5468 char buf[64];
5469 int r;
5470
cffae437 5471 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5472 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5473 if (r > sizeof(buf))
5474 r = sizeof(buf);
4bf39a94 5475 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5476}
5477
5478static ssize_t
6508fa76
SF
5479tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5480 size_t cnt, loff_t *ppos)
bc0c38d1 5481{
5e39841c 5482 unsigned long val;
c6caeeb1 5483 int ret;
bc0c38d1 5484
22fe9b54
PH
5485 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5486 if (ret)
c6caeeb1 5487 return ret;
bc0c38d1
SR
5488
5489 *ptr = val * 1000;
5490
5491 return cnt;
5492}
5493
6508fa76
SF
5494static ssize_t
5495tracing_thresh_read(struct file *filp, char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5497{
5498 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5499}
5500
5501static ssize_t
5502tracing_thresh_write(struct file *filp, const char __user *ubuf,
5503 size_t cnt, loff_t *ppos)
5504{
5505 struct trace_array *tr = filp->private_data;
5506 int ret;
5507
5508 mutex_lock(&trace_types_lock);
5509 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5510 if (ret < 0)
5511 goto out;
5512
5513 if (tr->current_trace->update_thresh) {
5514 ret = tr->current_trace->update_thresh(tr);
5515 if (ret < 0)
5516 goto out;
5517 }
5518
5519 ret = cnt;
5520out:
5521 mutex_unlock(&trace_types_lock);
5522
5523 return ret;
5524}
5525
f971cc9a 5526#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5527
6508fa76
SF
5528static ssize_t
5529tracing_max_lat_read(struct file *filp, char __user *ubuf,
5530 size_t cnt, loff_t *ppos)
5531{
5532 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5533}
5534
5535static ssize_t
5536tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5537 size_t cnt, loff_t *ppos)
5538{
5539 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5540}
5541
e428abbb
CG
5542#endif
5543
b3806b43
SR
5544static int tracing_open_pipe(struct inode *inode, struct file *filp)
5545{
15544209 5546 struct trace_array *tr = inode->i_private;
b3806b43 5547 struct trace_iterator *iter;
b04cc6b1 5548 int ret = 0;
b3806b43
SR
5549
5550 if (tracing_disabled)
5551 return -ENODEV;
5552
7b85af63
SRRH
5553 if (trace_array_get(tr) < 0)
5554 return -ENODEV;
5555
b04cc6b1
FW
5556 mutex_lock(&trace_types_lock);
5557
b3806b43
SR
5558 /* create a buffer to store the information to pass to userspace */
5559 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5560 if (!iter) {
5561 ret = -ENOMEM;
f77d09a3 5562 __trace_array_put(tr);
b04cc6b1
FW
5563 goto out;
5564 }
b3806b43 5565
3a161d99 5566 trace_seq_init(&iter->seq);
d716ff71 5567 iter->trace = tr->current_trace;
d7350c3f 5568
4462344e 5569 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5570 ret = -ENOMEM;
d7350c3f 5571 goto fail;
4462344e
RR
5572 }
5573
a309720c 5574 /* trace pipe does not show start of buffer */
4462344e 5575 cpumask_setall(iter->started);
a309720c 5576
983f938a 5577 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5578 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5579
8be0709f 5580 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5581 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5582 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5583
15544209
ON
5584 iter->tr = tr;
5585 iter->trace_buffer = &tr->trace_buffer;
5586 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5587 mutex_init(&iter->mutex);
b3806b43
SR
5588 filp->private_data = iter;
5589
107bad8b
SR
5590 if (iter->trace->pipe_open)
5591 iter->trace->pipe_open(iter);
107bad8b 5592
b444786f 5593 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5594
5595 tr->current_trace->ref++;
b04cc6b1
FW
5596out:
5597 mutex_unlock(&trace_types_lock);
5598 return ret;
d7350c3f
FW
5599
5600fail:
5601 kfree(iter->trace);
5602 kfree(iter);
7b85af63 5603 __trace_array_put(tr);
d7350c3f
FW
5604 mutex_unlock(&trace_types_lock);
5605 return ret;
b3806b43
SR
5606}
5607
5608static int tracing_release_pipe(struct inode *inode, struct file *file)
5609{
5610 struct trace_iterator *iter = file->private_data;
15544209 5611 struct trace_array *tr = inode->i_private;
b3806b43 5612
b04cc6b1
FW
5613 mutex_lock(&trace_types_lock);
5614
cf6ab6d9
SRRH
5615 tr->current_trace->ref--;
5616
29bf4a5e 5617 if (iter->trace->pipe_close)
c521efd1
SR
5618 iter->trace->pipe_close(iter);
5619
b04cc6b1
FW
5620 mutex_unlock(&trace_types_lock);
5621
4462344e 5622 free_cpumask_var(iter->started);
d7350c3f 5623 mutex_destroy(&iter->mutex);
b3806b43 5624 kfree(iter);
b3806b43 5625
7b85af63
SRRH
5626 trace_array_put(tr);
5627
b3806b43
SR
5628 return 0;
5629}
5630
9dd95748 5631static __poll_t
cc60cdc9 5632trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5633{
983f938a
SRRH
5634 struct trace_array *tr = iter->tr;
5635
15693458
SRRH
5636 /* Iterators are static, they should be filled or empty */
5637 if (trace_buffer_iter(iter, iter->cpu_file))
a9a08845 5638 return EPOLLIN | EPOLLRDNORM;
2a2cc8f7 5639
983f938a 5640 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5641 /*
5642 * Always select as readable when in blocking mode
5643 */
a9a08845 5644 return EPOLLIN | EPOLLRDNORM;
15693458 5645 else
12883efb 5646 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5647 filp, poll_table);
2a2cc8f7 5648}
2a2cc8f7 5649
9dd95748 5650static __poll_t
cc60cdc9
SR
5651tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5652{
5653 struct trace_iterator *iter = filp->private_data;
5654
5655 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5656}
5657
d716ff71 5658/* Must be called with iter->mutex held. */
ff98781b 5659static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5660{
5661 struct trace_iterator *iter = filp->private_data;
8b8b3683 5662 int ret;
b3806b43 5663
b3806b43 5664 while (trace_empty(iter)) {
2dc8f095 5665
107bad8b 5666 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5667 return -EAGAIN;
107bad8b 5668 }
2dc8f095 5669
b3806b43 5670 /*
250bfd3d 5671 * We block until we read something and tracing is disabled.
b3806b43
SR
5672 * We still block if tracing is disabled, but we have never
5673 * read anything. This allows a user to cat this file, and
5674 * then enable tracing. But after we have read something,
5675 * we give an EOF when tracing is again disabled.
5676 *
5677 * iter->pos will be 0 if we haven't read anything.
5678 */
75df6e68 5679 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
b3806b43 5680 break;
f4874261
SRRH
5681
5682 mutex_unlock(&iter->mutex);
5683
e30f53aa 5684 ret = wait_on_pipe(iter, false);
f4874261
SRRH
5685
5686 mutex_lock(&iter->mutex);
5687
8b8b3683
SRRH
5688 if (ret)
5689 return ret;
b3806b43
SR
5690 }
5691
ff98781b
EGM
5692 return 1;
5693}
5694
5695/*
5696 * Consumer reader.
5697 */
5698static ssize_t
5699tracing_read_pipe(struct file *filp, char __user *ubuf,
5700 size_t cnt, loff_t *ppos)
5701{
5702 struct trace_iterator *iter = filp->private_data;
5703 ssize_t sret;
5704
d7350c3f
FW
5705 /*
5706 * Avoid more than one consumer on a single file descriptor
5707 * This is just a matter of traces coherency, the ring buffer itself
5708 * is protected.
5709 */
5710 mutex_lock(&iter->mutex);
1245800c
SRRH
5711
5712 /* return any leftover data */
5713 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5714 if (sret != -EBUSY)
5715 goto out;
5716
5717 trace_seq_init(&iter->seq);
5718
ff98781b
EGM
5719 if (iter->trace->read) {
5720 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5721 if (sret)
5722 goto out;
5723 }
5724
5725waitagain:
5726 sret = tracing_wait_pipe(filp);
5727 if (sret <= 0)
5728 goto out;
5729
b3806b43 5730 /* stop when tracing is finished */
ff98781b
EGM
5731 if (trace_empty(iter)) {
5732 sret = 0;
107bad8b 5733 goto out;
ff98781b 5734 }
b3806b43
SR
5735
5736 if (cnt >= PAGE_SIZE)
5737 cnt = PAGE_SIZE - 1;
5738
53d0aa77 5739 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5740 memset(&iter->seq, 0,
5741 sizeof(struct trace_iterator) -
5742 offsetof(struct trace_iterator, seq));
ed5467da 5743 cpumask_clear(iter->started);
4823ed7e 5744 iter->pos = -1;
b3806b43 5745
4f535968 5746 trace_event_read_lock();
7e53bd42 5747 trace_access_lock(iter->cpu_file);
955b61e5 5748 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5749 enum print_line_t ret;
5ac48378 5750 int save_len = iter->seq.seq.len;
088b1e42 5751
f9896bf3 5752 ret = print_trace_line(iter);
2c4f035f 5753 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5754 /* don't print partial lines */
5ac48378 5755 iter->seq.seq.len = save_len;
b3806b43 5756 break;
088b1e42 5757 }
b91facc3
FW
5758 if (ret != TRACE_TYPE_NO_CONSUME)
5759 trace_consume(iter);
b3806b43 5760
5ac48378 5761 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5762 break;
ee5e51f5
JO
5763
5764 /*
5765 * Setting the full flag means we reached the trace_seq buffer
5766 * size and we should leave by partial output condition above.
5767 * One of the trace_seq_* functions is not used properly.
5768 */
5769 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5770 iter->ent->type);
b3806b43 5771 }
7e53bd42 5772 trace_access_unlock(iter->cpu_file);
4f535968 5773 trace_event_read_unlock();
b3806b43 5774
b3806b43 5775 /* Now copy what we have to the user */
6c6c2796 5776 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5777 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5778 trace_seq_init(&iter->seq);
9ff4b974
PP
5779
5780 /*
25985edc 5781 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5782 * entries, go back to wait for more entries.
5783 */
6c6c2796 5784 if (sret == -EBUSY)
9ff4b974 5785 goto waitagain;
b3806b43 5786
107bad8b 5787out:
d7350c3f 5788 mutex_unlock(&iter->mutex);
107bad8b 5789
6c6c2796 5790 return sret;
b3806b43
SR
5791}
5792
3c56819b
EGM
5793static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5794 unsigned int idx)
5795{
5796 __free_page(spd->pages[idx]);
5797}
5798
28dfef8f 5799static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5800 .can_merge = 0,
34cd4998 5801 .confirm = generic_pipe_buf_confirm,
92fdd98c 5802 .release = generic_pipe_buf_release,
34cd4998
SR
5803 .steal = generic_pipe_buf_steal,
5804 .get = generic_pipe_buf_get,
3c56819b
EGM
5805};
5806
34cd4998 5807static size_t
fa7c7f6e 5808tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5809{
5810 size_t count;
74f06bb7 5811 int save_len;
34cd4998
SR
5812 int ret;
5813
5814 /* Seq buffer is page-sized, exactly what we need. */
5815 for (;;) {
74f06bb7 5816 save_len = iter->seq.seq.len;
34cd4998 5817 ret = print_trace_line(iter);
74f06bb7
SRRH
5818
5819 if (trace_seq_has_overflowed(&iter->seq)) {
5820 iter->seq.seq.len = save_len;
34cd4998
SR
5821 break;
5822 }
74f06bb7
SRRH
5823
5824 /*
5825 * This should not be hit, because it should only
5826 * be set if the iter->seq overflowed. But check it
5827 * anyway to be safe.
5828 */
34cd4998 5829 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5830 iter->seq.seq.len = save_len;
5831 break;
5832 }
5833
5ac48378 5834 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5835 if (rem < count) {
5836 rem = 0;
5837 iter->seq.seq.len = save_len;
34cd4998
SR
5838 break;
5839 }
5840
74e7ff8c
LJ
5841 if (ret != TRACE_TYPE_NO_CONSUME)
5842 trace_consume(iter);
34cd4998 5843 rem -= count;
955b61e5 5844 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5845 rem = 0;
5846 iter->ent = NULL;
5847 break;
5848 }
5849 }
5850
5851 return rem;
5852}
5853
3c56819b
EGM
5854static ssize_t tracing_splice_read_pipe(struct file *filp,
5855 loff_t *ppos,
5856 struct pipe_inode_info *pipe,
5857 size_t len,
5858 unsigned int flags)
5859{
35f3d14d
JA
5860 struct page *pages_def[PIPE_DEF_BUFFERS];
5861 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5862 struct trace_iterator *iter = filp->private_data;
5863 struct splice_pipe_desc spd = {
35f3d14d
JA
5864 .pages = pages_def,
5865 .partial = partial_def,
34cd4998 5866 .nr_pages = 0, /* This gets updated below. */
047fe360 5867 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5868 .ops = &tracing_pipe_buf_ops,
5869 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5870 };
5871 ssize_t ret;
34cd4998 5872 size_t rem;
3c56819b
EGM
5873 unsigned int i;
5874
35f3d14d
JA
5875 if (splice_grow_spd(pipe, &spd))
5876 return -ENOMEM;
5877
d7350c3f 5878 mutex_lock(&iter->mutex);
3c56819b
EGM
5879
5880 if (iter->trace->splice_read) {
5881 ret = iter->trace->splice_read(iter, filp,
5882 ppos, pipe, len, flags);
5883 if (ret)
34cd4998 5884 goto out_err;
3c56819b
EGM
5885 }
5886
5887 ret = tracing_wait_pipe(filp);
5888 if (ret <= 0)
34cd4998 5889 goto out_err;
3c56819b 5890
955b61e5 5891 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5892 ret = -EFAULT;
34cd4998 5893 goto out_err;
3c56819b
EGM
5894 }
5895
4f535968 5896 trace_event_read_lock();
7e53bd42 5897 trace_access_lock(iter->cpu_file);
4f535968 5898
3c56819b 5899 /* Fill as many pages as possible. */
a786c06d 5900 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5901 spd.pages[i] = alloc_page(GFP_KERNEL);
5902 if (!spd.pages[i])
34cd4998 5903 break;
3c56819b 5904
fa7c7f6e 5905 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5906
5907 /* Copy the data into the page, so we can start over. */
5908 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5909 page_address(spd.pages[i]),
5ac48378 5910 trace_seq_used(&iter->seq));
3c56819b 5911 if (ret < 0) {
35f3d14d 5912 __free_page(spd.pages[i]);
3c56819b
EGM
5913 break;
5914 }
35f3d14d 5915 spd.partial[i].offset = 0;
5ac48378 5916 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5917
f9520750 5918 trace_seq_init(&iter->seq);
3c56819b
EGM
5919 }
5920
7e53bd42 5921 trace_access_unlock(iter->cpu_file);
4f535968 5922 trace_event_read_unlock();
d7350c3f 5923 mutex_unlock(&iter->mutex);
3c56819b
EGM
5924
5925 spd.nr_pages = i;
5926
a29054d9
SRRH
5927 if (i)
5928 ret = splice_to_pipe(pipe, &spd);
5929 else
5930 ret = 0;
35f3d14d 5931out:
047fe360 5932 splice_shrink_spd(&spd);
35f3d14d 5933 return ret;
3c56819b 5934
34cd4998 5935out_err:
d7350c3f 5936 mutex_unlock(&iter->mutex);
35f3d14d 5937 goto out;
3c56819b
EGM
5938}
5939
a98a3c3f
SR
5940static ssize_t
5941tracing_entries_read(struct file *filp, char __user *ubuf,
5942 size_t cnt, loff_t *ppos)
5943{
0bc392ee
ON
5944 struct inode *inode = file_inode(filp);
5945 struct trace_array *tr = inode->i_private;
5946 int cpu = tracing_get_cpu(inode);
438ced17
VN
5947 char buf[64];
5948 int r = 0;
5949 ssize_t ret;
a98a3c3f 5950
db526ca3 5951 mutex_lock(&trace_types_lock);
438ced17 5952
0bc392ee 5953 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5954 int cpu, buf_size_same;
5955 unsigned long size;
5956
5957 size = 0;
5958 buf_size_same = 1;
5959 /* check if all cpu sizes are same */
5960 for_each_tracing_cpu(cpu) {
5961 /* fill in the size from first enabled cpu */
5962 if (size == 0)
12883efb
SRRH
5963 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5964 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5965 buf_size_same = 0;
5966 break;
5967 }
5968 }
5969
5970 if (buf_size_same) {
5971 if (!ring_buffer_expanded)
5972 r = sprintf(buf, "%lu (expanded: %lu)\n",
5973 size >> 10,
5974 trace_buf_size >> 10);
5975 else
5976 r = sprintf(buf, "%lu\n", size >> 10);
5977 } else
5978 r = sprintf(buf, "X\n");
5979 } else
0bc392ee 5980 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5981
db526ca3
SR
5982 mutex_unlock(&trace_types_lock);
5983
438ced17
VN
5984 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5985 return ret;
a98a3c3f
SR
5986}
5987
5988static ssize_t
5989tracing_entries_write(struct file *filp, const char __user *ubuf,
5990 size_t cnt, loff_t *ppos)
5991{
0bc392ee
ON
5992 struct inode *inode = file_inode(filp);
5993 struct trace_array *tr = inode->i_private;
a98a3c3f 5994 unsigned long val;
4f271a2a 5995 int ret;
a98a3c3f 5996
22fe9b54
PH
5997 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5998 if (ret)
c6caeeb1 5999 return ret;
a98a3c3f
SR
6000
6001 /* must have at least 1 entry */
6002 if (!val)
6003 return -EINVAL;
6004
1696b2b0
SR
6005 /* value is in KB */
6006 val <<= 10;
0bc392ee 6007 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
6008 if (ret < 0)
6009 return ret;
a98a3c3f 6010
cf8517cf 6011 *ppos += cnt;
a98a3c3f 6012
4f271a2a
VN
6013 return cnt;
6014}
bf5e6519 6015
f81ab074
VN
6016static ssize_t
6017tracing_total_entries_read(struct file *filp, char __user *ubuf,
6018 size_t cnt, loff_t *ppos)
6019{
6020 struct trace_array *tr = filp->private_data;
6021 char buf[64];
6022 int r, cpu;
6023 unsigned long size = 0, expanded_size = 0;
6024
6025 mutex_lock(&trace_types_lock);
6026 for_each_tracing_cpu(cpu) {
12883efb 6027 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
6028 if (!ring_buffer_expanded)
6029 expanded_size += trace_buf_size >> 10;
6030 }
6031 if (ring_buffer_expanded)
6032 r = sprintf(buf, "%lu\n", size);
6033 else
6034 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6035 mutex_unlock(&trace_types_lock);
6036
6037 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6038}
6039
4f271a2a
VN
6040static ssize_t
6041tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6042 size_t cnt, loff_t *ppos)
6043{
6044 /*
6045 * There is no need to read what the user has written, this function
6046 * is just to make sure that there is no error when "echo" is used
6047 */
6048
6049 *ppos += cnt;
a98a3c3f
SR
6050
6051 return cnt;
6052}
6053
4f271a2a
VN
6054static int
6055tracing_free_buffer_release(struct inode *inode, struct file *filp)
6056{
2b6080f2
SR
6057 struct trace_array *tr = inode->i_private;
6058
cf30cf67 6059 /* disable tracing ? */
983f938a 6060 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 6061 tracer_tracing_off(tr);
4f271a2a 6062 /* resize the ring buffer to 0 */
2b6080f2 6063 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 6064
7b85af63
SRRH
6065 trace_array_put(tr);
6066
4f271a2a
VN
6067 return 0;
6068}
6069
5bf9a1ee
PP
6070static ssize_t
6071tracing_mark_write(struct file *filp, const char __user *ubuf,
6072 size_t cnt, loff_t *fpos)
6073{
2d71619c 6074 struct trace_array *tr = filp->private_data;
d696b58c
SR
6075 struct ring_buffer_event *event;
6076 struct ring_buffer *buffer;
6077 struct print_entry *entry;
6078 unsigned long irq_flags;
656c7f0d 6079 const char faulted[] = "<faulted>";
d696b58c 6080 ssize_t written;
d696b58c
SR
6081 int size;
6082 int len;
fa32e855 6083
656c7f0d
SRRH
6084/* Used in tracing_mark_raw_write() as well */
6085#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 6086
c76f0694 6087 if (tracing_disabled)
5bf9a1ee
PP
6088 return -EINVAL;
6089
983f938a 6090 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
6091 return -EINVAL;
6092
5bf9a1ee
PP
6093 if (cnt > TRACE_BUF_SIZE)
6094 cnt = TRACE_BUF_SIZE;
6095
d696b58c 6096 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 6097
d696b58c 6098 local_save_flags(irq_flags);
656c7f0d 6099 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 6100
656c7f0d
SRRH
6101 /* If less than "<faulted>", then make sure we can still add that */
6102 if (cnt < FAULTED_SIZE)
6103 size += FAULTED_SIZE - cnt;
d696b58c 6104
2d71619c 6105 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6106 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6107 irq_flags, preempt_count());
656c7f0d 6108 if (unlikely(!event))
d696b58c 6109 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6110 return -EBADF;
d696b58c
SR
6111
6112 entry = ring_buffer_event_data(event);
6113 entry->ip = _THIS_IP_;
6114
656c7f0d
SRRH
6115 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6116 if (len) {
6117 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6118 cnt = FAULTED_SIZE;
6119 written = -EFAULT;
c13d2f7c 6120 } else
656c7f0d
SRRH
6121 written = cnt;
6122 len = cnt;
5bf9a1ee 6123
d696b58c
SR
6124 if (entry->buf[cnt - 1] != '\n') {
6125 entry->buf[cnt] = '\n';
6126 entry->buf[cnt + 1] = '\0';
6127 } else
6128 entry->buf[cnt] = '\0';
6129
7ffbd48d 6130 __buffer_unlock_commit(buffer, event);
5bf9a1ee 6131
656c7f0d
SRRH
6132 if (written > 0)
6133 *fpos += written;
5bf9a1ee 6134
fa32e855
SR
6135 return written;
6136}
6137
6138/* Limit it for now to 3K (including tag) */
6139#define RAW_DATA_MAX_SIZE (1024*3)
6140
6141static ssize_t
6142tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6143 size_t cnt, loff_t *fpos)
6144{
6145 struct trace_array *tr = filp->private_data;
6146 struct ring_buffer_event *event;
6147 struct ring_buffer *buffer;
6148 struct raw_data_entry *entry;
656c7f0d 6149 const char faulted[] = "<faulted>";
fa32e855 6150 unsigned long irq_flags;
fa32e855 6151 ssize_t written;
fa32e855
SR
6152 int size;
6153 int len;
6154
656c7f0d
SRRH
6155#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6156
fa32e855
SR
6157 if (tracing_disabled)
6158 return -EINVAL;
6159
6160 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6161 return -EINVAL;
6162
6163 /* The marker must at least have a tag id */
6164 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6165 return -EINVAL;
6166
6167 if (cnt > TRACE_BUF_SIZE)
6168 cnt = TRACE_BUF_SIZE;
6169
6170 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6171
fa32e855
SR
6172 local_save_flags(irq_flags);
6173 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
6174 if (cnt < FAULT_SIZE_ID)
6175 size += FAULT_SIZE_ID - cnt;
6176
fa32e855 6177 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6178 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6179 irq_flags, preempt_count());
656c7f0d 6180 if (!event)
fa32e855 6181 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6182 return -EBADF;
fa32e855
SR
6183
6184 entry = ring_buffer_event_data(event);
6185
656c7f0d
SRRH
6186 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6187 if (len) {
6188 entry->id = -1;
6189 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6190 written = -EFAULT;
fa32e855 6191 } else
656c7f0d 6192 written = cnt;
fa32e855
SR
6193
6194 __buffer_unlock_commit(buffer, event);
6195
656c7f0d
SRRH
6196 if (written > 0)
6197 *fpos += written;
1aa54bca
MS
6198
6199 return written;
5bf9a1ee
PP
6200}
6201
13f16d20 6202static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 6203{
2b6080f2 6204 struct trace_array *tr = m->private;
5079f326
Z
6205 int i;
6206
6207 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 6208 seq_printf(m,
5079f326 6209 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
6210 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6211 i == tr->clock_id ? "]" : "");
13f16d20 6212 seq_putc(m, '\n');
5079f326 6213
13f16d20 6214 return 0;
5079f326
Z
6215}
6216
e1e232ca 6217static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6218{
5079f326
Z
6219 int i;
6220
5079f326
Z
6221 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6222 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6223 break;
6224 }
6225 if (i == ARRAY_SIZE(trace_clocks))
6226 return -EINVAL;
6227
5079f326
Z
6228 mutex_lock(&trace_types_lock);
6229
2b6080f2
SR
6230 tr->clock_id = i;
6231
12883efb 6232 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6233
60303ed3
DS
6234 /*
6235 * New clock may not be consistent with the previous clock.
6236 * Reset the buffer so that it doesn't have incomparable timestamps.
6237 */
9457158b 6238 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6239
6240#ifdef CONFIG_TRACER_MAX_TRACE
170b3b10 6241 if (tr->max_buffer.buffer)
12883efb 6242 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6243 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6244#endif
60303ed3 6245
5079f326
Z
6246 mutex_unlock(&trace_types_lock);
6247
e1e232ca
SR
6248 return 0;
6249}
6250
6251static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6252 size_t cnt, loff_t *fpos)
6253{
6254 struct seq_file *m = filp->private_data;
6255 struct trace_array *tr = m->private;
6256 char buf[64];
6257 const char *clockstr;
6258 int ret;
6259
6260 if (cnt >= sizeof(buf))
6261 return -EINVAL;
6262
4afe6495 6263 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6264 return -EFAULT;
6265
6266 buf[cnt] = 0;
6267
6268 clockstr = strstrip(buf);
6269
6270 ret = tracing_set_clock(tr, clockstr);
6271 if (ret)
6272 return ret;
6273
5079f326
Z
6274 *fpos += cnt;
6275
6276 return cnt;
6277}
6278
13f16d20
LZ
6279static int tracing_clock_open(struct inode *inode, struct file *file)
6280{
7b85af63
SRRH
6281 struct trace_array *tr = inode->i_private;
6282 int ret;
6283
13f16d20
LZ
6284 if (tracing_disabled)
6285 return -ENODEV;
2b6080f2 6286
7b85af63
SRRH
6287 if (trace_array_get(tr))
6288 return -ENODEV;
6289
6290 ret = single_open(file, tracing_clock_show, inode->i_private);
6291 if (ret < 0)
6292 trace_array_put(tr);
6293
6294 return ret;
13f16d20
LZ
6295}
6296
2c1ea60b
TZ
6297static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6298{
6299 struct trace_array *tr = m->private;
6300
6301 mutex_lock(&trace_types_lock);
6302
6303 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6304 seq_puts(m, "delta [absolute]\n");
6305 else
6306 seq_puts(m, "[delta] absolute\n");
6307
6308 mutex_unlock(&trace_types_lock);
6309
6310 return 0;
6311}
6312
6313static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6314{
6315 struct trace_array *tr = inode->i_private;
6316 int ret;
6317
6318 if (tracing_disabled)
6319 return -ENODEV;
6320
6321 if (trace_array_get(tr))
6322 return -ENODEV;
6323
6324 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6325 if (ret < 0)
6326 trace_array_put(tr);
6327
6328 return ret;
6329}
6330
00b41452
TZ
6331int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6332{
6333 int ret = 0;
6334
6335 mutex_lock(&trace_types_lock);
6336
6337 if (abs && tr->time_stamp_abs_ref++)
6338 goto out;
6339
6340 if (!abs) {
6341 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6342 ret = -EINVAL;
6343 goto out;
6344 }
6345
6346 if (--tr->time_stamp_abs_ref)
6347 goto out;
6348 }
6349
6350 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6351
6352#ifdef CONFIG_TRACER_MAX_TRACE
6353 if (tr->max_buffer.buffer)
6354 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6355#endif
6356 out:
6357 mutex_unlock(&trace_types_lock);
6358
6359 return ret;
6360}
6361
6de58e62
SRRH
6362struct ftrace_buffer_info {
6363 struct trace_iterator iter;
6364 void *spare;
73a757e6 6365 unsigned int spare_cpu;
6de58e62
SRRH
6366 unsigned int read;
6367};
6368
debdd57f
HT
6369#ifdef CONFIG_TRACER_SNAPSHOT
6370static int tracing_snapshot_open(struct inode *inode, struct file *file)
6371{
6484c71c 6372 struct trace_array *tr = inode->i_private;
debdd57f 6373 struct trace_iterator *iter;
2b6080f2 6374 struct seq_file *m;
debdd57f
HT
6375 int ret = 0;
6376
ff451961
SRRH
6377 if (trace_array_get(tr) < 0)
6378 return -ENODEV;
6379
debdd57f 6380 if (file->f_mode & FMODE_READ) {
6484c71c 6381 iter = __tracing_open(inode, file, true);
debdd57f
HT
6382 if (IS_ERR(iter))
6383 ret = PTR_ERR(iter);
2b6080f2
SR
6384 } else {
6385 /* Writes still need the seq_file to hold the private data */
f77d09a3 6386 ret = -ENOMEM;
2b6080f2
SR
6387 m = kzalloc(sizeof(*m), GFP_KERNEL);
6388 if (!m)
f77d09a3 6389 goto out;
2b6080f2
SR
6390 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6391 if (!iter) {
6392 kfree(m);
f77d09a3 6393 goto out;
2b6080f2 6394 }
f77d09a3
AL
6395 ret = 0;
6396
ff451961 6397 iter->tr = tr;
6484c71c
ON
6398 iter->trace_buffer = &tr->max_buffer;
6399 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6400 m->private = iter;
6401 file->private_data = m;
debdd57f 6402 }
f77d09a3 6403out:
ff451961
SRRH
6404 if (ret < 0)
6405 trace_array_put(tr);
6406
debdd57f
HT
6407 return ret;
6408}
6409
6410static ssize_t
6411tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6412 loff_t *ppos)
6413{
2b6080f2
SR
6414 struct seq_file *m = filp->private_data;
6415 struct trace_iterator *iter = m->private;
6416 struct trace_array *tr = iter->tr;
debdd57f
HT
6417 unsigned long val;
6418 int ret;
6419
6420 ret = tracing_update_buffers();
6421 if (ret < 0)
6422 return ret;
6423
6424 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6425 if (ret)
6426 return ret;
6427
6428 mutex_lock(&trace_types_lock);
6429
2b6080f2 6430 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6431 ret = -EBUSY;
6432 goto out;
6433 }
6434
6435 switch (val) {
6436 case 0:
f1affcaa
SRRH
6437 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6438 ret = -EINVAL;
6439 break;
debdd57f 6440 }
3209cff4
SRRH
6441 if (tr->allocated_snapshot)
6442 free_snapshot(tr);
debdd57f
HT
6443 break;
6444 case 1:
f1affcaa
SRRH
6445/* Only allow per-cpu swap if the ring buffer supports it */
6446#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6447 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6448 ret = -EINVAL;
6449 break;
6450 }
6451#endif
45ad21ca 6452 if (!tr->allocated_snapshot) {
3209cff4 6453 ret = alloc_snapshot(tr);
debdd57f
HT
6454 if (ret < 0)
6455 break;
debdd57f 6456 }
debdd57f
HT
6457 local_irq_disable();
6458 /* Now, we're going to swap */
f1affcaa 6459 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6460 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6461 else
ce9bae55 6462 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6463 local_irq_enable();
6464 break;
6465 default:
45ad21ca 6466 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6467 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6468 tracing_reset_online_cpus(&tr->max_buffer);
6469 else
6470 tracing_reset(&tr->max_buffer, iter->cpu_file);
6471 }
debdd57f
HT
6472 break;
6473 }
6474
6475 if (ret >= 0) {
6476 *ppos += cnt;
6477 ret = cnt;
6478 }
6479out:
6480 mutex_unlock(&trace_types_lock);
6481 return ret;
6482}
2b6080f2
SR
6483
6484static int tracing_snapshot_release(struct inode *inode, struct file *file)
6485{
6486 struct seq_file *m = file->private_data;
ff451961
SRRH
6487 int ret;
6488
6489 ret = tracing_release(inode, file);
2b6080f2
SR
6490
6491 if (file->f_mode & FMODE_READ)
ff451961 6492 return ret;
2b6080f2
SR
6493
6494 /* If write only, the seq_file is just a stub */
6495 if (m)
6496 kfree(m->private);
6497 kfree(m);
6498
6499 return 0;
6500}
6501
6de58e62
SRRH
6502static int tracing_buffers_open(struct inode *inode, struct file *filp);
6503static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6504 size_t count, loff_t *ppos);
6505static int tracing_buffers_release(struct inode *inode, struct file *file);
6506static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6507 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6508
6509static int snapshot_raw_open(struct inode *inode, struct file *filp)
6510{
6511 struct ftrace_buffer_info *info;
6512 int ret;
6513
6514 ret = tracing_buffers_open(inode, filp);
6515 if (ret < 0)
6516 return ret;
6517
6518 info = filp->private_data;
6519
6520 if (info->iter.trace->use_max_tr) {
6521 tracing_buffers_release(inode, filp);
6522 return -EBUSY;
6523 }
6524
6525 info->iter.snapshot = true;
6526 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6527
6528 return ret;
6529}
6530
debdd57f
HT
6531#endif /* CONFIG_TRACER_SNAPSHOT */
6532
6533
6508fa76
SF
6534static const struct file_operations tracing_thresh_fops = {
6535 .open = tracing_open_generic,
6536 .read = tracing_thresh_read,
6537 .write = tracing_thresh_write,
6538 .llseek = generic_file_llseek,
6539};
6540
f971cc9a 6541#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6542static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6543 .open = tracing_open_generic,
6544 .read = tracing_max_lat_read,
6545 .write = tracing_max_lat_write,
b444786f 6546 .llseek = generic_file_llseek,
bc0c38d1 6547};
e428abbb 6548#endif
bc0c38d1 6549
5e2336a0 6550static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6551 .open = tracing_open_generic,
6552 .read = tracing_set_trace_read,
6553 .write = tracing_set_trace_write,
b444786f 6554 .llseek = generic_file_llseek,
bc0c38d1
SR
6555};
6556
5e2336a0 6557static const struct file_operations tracing_pipe_fops = {
4bf39a94 6558 .open = tracing_open_pipe,
2a2cc8f7 6559 .poll = tracing_poll_pipe,
4bf39a94 6560 .read = tracing_read_pipe,
3c56819b 6561 .splice_read = tracing_splice_read_pipe,
4bf39a94 6562 .release = tracing_release_pipe,
b444786f 6563 .llseek = no_llseek,
b3806b43
SR
6564};
6565
5e2336a0 6566static const struct file_operations tracing_entries_fops = {
0bc392ee 6567 .open = tracing_open_generic_tr,
a98a3c3f
SR
6568 .read = tracing_entries_read,
6569 .write = tracing_entries_write,
b444786f 6570 .llseek = generic_file_llseek,
0bc392ee 6571 .release = tracing_release_generic_tr,
a98a3c3f
SR
6572};
6573
f81ab074 6574static const struct file_operations tracing_total_entries_fops = {
7b85af63 6575 .open = tracing_open_generic_tr,
f81ab074
VN
6576 .read = tracing_total_entries_read,
6577 .llseek = generic_file_llseek,
7b85af63 6578 .release = tracing_release_generic_tr,
f81ab074
VN
6579};
6580
4f271a2a 6581static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6582 .open = tracing_open_generic_tr,
4f271a2a
VN
6583 .write = tracing_free_buffer_write,
6584 .release = tracing_free_buffer_release,
6585};
6586
5e2336a0 6587static const struct file_operations tracing_mark_fops = {
7b85af63 6588 .open = tracing_open_generic_tr,
5bf9a1ee 6589 .write = tracing_mark_write,
b444786f 6590 .llseek = generic_file_llseek,
7b85af63 6591 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6592};
6593
fa32e855
SR
6594static const struct file_operations tracing_mark_raw_fops = {
6595 .open = tracing_open_generic_tr,
6596 .write = tracing_mark_raw_write,
6597 .llseek = generic_file_llseek,
6598 .release = tracing_release_generic_tr,
6599};
6600
5079f326 6601static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6602 .open = tracing_clock_open,
6603 .read = seq_read,
6604 .llseek = seq_lseek,
7b85af63 6605 .release = tracing_single_release_tr,
5079f326
Z
6606 .write = tracing_clock_write,
6607};
6608
2c1ea60b
TZ
6609static const struct file_operations trace_time_stamp_mode_fops = {
6610 .open = tracing_time_stamp_mode_open,
6611 .read = seq_read,
6612 .llseek = seq_lseek,
6613 .release = tracing_single_release_tr,
6614};
6615
debdd57f
HT
6616#ifdef CONFIG_TRACER_SNAPSHOT
6617static const struct file_operations snapshot_fops = {
6618 .open = tracing_snapshot_open,
6619 .read = seq_read,
6620 .write = tracing_snapshot_write,
098c879e 6621 .llseek = tracing_lseek,
2b6080f2 6622 .release = tracing_snapshot_release,
debdd57f 6623};
debdd57f 6624
6de58e62
SRRH
6625static const struct file_operations snapshot_raw_fops = {
6626 .open = snapshot_raw_open,
6627 .read = tracing_buffers_read,
6628 .release = tracing_buffers_release,
6629 .splice_read = tracing_buffers_splice_read,
6630 .llseek = no_llseek,
2cadf913
SR
6631};
6632
6de58e62
SRRH
6633#endif /* CONFIG_TRACER_SNAPSHOT */
6634
2cadf913
SR
6635static int tracing_buffers_open(struct inode *inode, struct file *filp)
6636{
46ef2be0 6637 struct trace_array *tr = inode->i_private;
2cadf913 6638 struct ftrace_buffer_info *info;
7b85af63 6639 int ret;
2cadf913
SR
6640
6641 if (tracing_disabled)
6642 return -ENODEV;
6643
7b85af63
SRRH
6644 if (trace_array_get(tr) < 0)
6645 return -ENODEV;
6646
2cadf913 6647 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6648 if (!info) {
6649 trace_array_put(tr);
2cadf913 6650 return -ENOMEM;
7b85af63 6651 }
2cadf913 6652
a695cb58
SRRH
6653 mutex_lock(&trace_types_lock);
6654
cc60cdc9 6655 info->iter.tr = tr;
46ef2be0 6656 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6657 info->iter.trace = tr->current_trace;
12883efb 6658 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6659 info->spare = NULL;
2cadf913 6660 /* Force reading ring buffer for first read */
cc60cdc9 6661 info->read = (unsigned int)-1;
2cadf913
SR
6662
6663 filp->private_data = info;
6664
cf6ab6d9
SRRH
6665 tr->current_trace->ref++;
6666
a695cb58
SRRH
6667 mutex_unlock(&trace_types_lock);
6668
7b85af63
SRRH
6669 ret = nonseekable_open(inode, filp);
6670 if (ret < 0)
6671 trace_array_put(tr);
6672
6673 return ret;
2cadf913
SR
6674}
6675
9dd95748 6676static __poll_t
cc60cdc9
SR
6677tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6678{
6679 struct ftrace_buffer_info *info = filp->private_data;
6680 struct trace_iterator *iter = &info->iter;
6681
6682 return trace_poll(iter, filp, poll_table);
6683}
6684
2cadf913
SR
6685static ssize_t
6686tracing_buffers_read(struct file *filp, char __user *ubuf,
6687 size_t count, loff_t *ppos)
6688{
6689 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6690 struct trace_iterator *iter = &info->iter;
a7e52ad7 6691 ssize_t ret = 0;
6de58e62 6692 ssize_t size;
2cadf913 6693
2dc5d12b
SR
6694 if (!count)
6695 return 0;
6696
6de58e62 6697#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6698 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6699 return -EBUSY;
6de58e62
SRRH
6700#endif
6701
73a757e6 6702 if (!info->spare) {
12883efb
SRRH
6703 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6704 iter->cpu_file);
a7e52ad7
SRV
6705 if (IS_ERR(info->spare)) {
6706 ret = PTR_ERR(info->spare);
6707 info->spare = NULL;
6708 } else {
6709 info->spare_cpu = iter->cpu_file;
6710 }
73a757e6 6711 }
ddd538f3 6712 if (!info->spare)
a7e52ad7 6713 return ret;
ddd538f3 6714
2cadf913
SR
6715 /* Do we have previous read data to read? */
6716 if (info->read < PAGE_SIZE)
6717 goto read;
6718
b627344f 6719 again:
cc60cdc9 6720 trace_access_lock(iter->cpu_file);
12883efb 6721 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6722 &info->spare,
6723 count,
cc60cdc9
SR
6724 iter->cpu_file, 0);
6725 trace_access_unlock(iter->cpu_file);
2cadf913 6726
b627344f
SR
6727 if (ret < 0) {
6728 if (trace_empty(iter)) {
d716ff71
SRRH
6729 if ((filp->f_flags & O_NONBLOCK))
6730 return -EAGAIN;
6731
e30f53aa 6732 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
6733 if (ret)
6734 return ret;
6735
b627344f
SR
6736 goto again;
6737 }
d716ff71 6738 return 0;
b627344f 6739 }
436fc280 6740
436fc280 6741 info->read = 0;
b627344f 6742 read:
2cadf913
SR
6743 size = PAGE_SIZE - info->read;
6744 if (size > count)
6745 size = count;
6746
6747 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6748 if (ret == size)
6749 return -EFAULT;
6750
2dc5d12b
SR
6751 size -= ret;
6752
2cadf913
SR
6753 *ppos += size;
6754 info->read += size;
6755
6756 return size;
6757}
6758
6759static int tracing_buffers_release(struct inode *inode, struct file *file)
6760{
6761 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6762 struct trace_iterator *iter = &info->iter;
2cadf913 6763
a695cb58
SRRH
6764 mutex_lock(&trace_types_lock);
6765
cf6ab6d9
SRRH
6766 iter->tr->current_trace->ref--;
6767
ff451961 6768 __trace_array_put(iter->tr);
2cadf913 6769
ddd538f3 6770 if (info->spare)
73a757e6
SRV
6771 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6772 info->spare_cpu, info->spare);
2cadf913
SR
6773 kfree(info);
6774
a695cb58
SRRH
6775 mutex_unlock(&trace_types_lock);
6776
2cadf913
SR
6777 return 0;
6778}
6779
6780struct buffer_ref {
6781 struct ring_buffer *buffer;
6782 void *page;
73a757e6 6783 int cpu;
2cadf913
SR
6784 int ref;
6785};
6786
6787static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6788 struct pipe_buffer *buf)
6789{
6790 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6791
6792 if (--ref->ref)
6793 return;
6794
73a757e6 6795 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6796 kfree(ref);
6797 buf->private = 0;
6798}
6799
2cadf913
SR
6800static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6801 struct pipe_buffer *buf)
6802{
6803 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6804
6805 ref->ref++;
6806}
6807
6808/* Pipe buffer operations for a buffer. */
28dfef8f 6809static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6810 .can_merge = 0,
2cadf913
SR
6811 .confirm = generic_pipe_buf_confirm,
6812 .release = buffer_pipe_buf_release,
d55cb6cf 6813 .steal = generic_pipe_buf_steal,
2cadf913
SR
6814 .get = buffer_pipe_buf_get,
6815};
6816
6817/*
6818 * Callback from splice_to_pipe(), if we need to release some pages
6819 * at the end of the spd in case we error'ed out in filling the pipe.
6820 */
6821static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6822{
6823 struct buffer_ref *ref =
6824 (struct buffer_ref *)spd->partial[i].private;
6825
6826 if (--ref->ref)
6827 return;
6828
73a757e6 6829 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6830 kfree(ref);
6831 spd->partial[i].private = 0;
6832}
6833
6834static ssize_t
6835tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6836 struct pipe_inode_info *pipe, size_t len,
6837 unsigned int flags)
6838{
6839 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6840 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6841 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6842 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6843 struct splice_pipe_desc spd = {
35f3d14d
JA
6844 .pages = pages_def,
6845 .partial = partial_def,
047fe360 6846 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6847 .ops = &buffer_pipe_buf_ops,
6848 .spd_release = buffer_spd_release,
6849 };
6850 struct buffer_ref *ref;
6b7e633f 6851 int entries, i;
07906da7 6852 ssize_t ret = 0;
2cadf913 6853
6de58e62 6854#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6855 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6856 return -EBUSY;
6de58e62
SRRH
6857#endif
6858
d716ff71
SRRH
6859 if (*ppos & (PAGE_SIZE - 1))
6860 return -EINVAL;
93cfb3c9
LJ
6861
6862 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6863 if (len < PAGE_SIZE)
6864 return -EINVAL;
93cfb3c9
LJ
6865 len &= PAGE_MASK;
6866 }
6867
1ae2293d
AV
6868 if (splice_grow_spd(pipe, &spd))
6869 return -ENOMEM;
6870
cc60cdc9
SR
6871 again:
6872 trace_access_lock(iter->cpu_file);
12883efb 6873 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6874
a786c06d 6875 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6876 struct page *page;
6877 int r;
6878
6879 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6880 if (!ref) {
6881 ret = -ENOMEM;
2cadf913 6882 break;
07906da7 6883 }
2cadf913 6884
7267fa68 6885 ref->ref = 1;
12883efb 6886 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6887 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
a7e52ad7
SRV
6888 if (IS_ERR(ref->page)) {
6889 ret = PTR_ERR(ref->page);
6890 ref->page = NULL;
2cadf913
SR
6891 kfree(ref);
6892 break;
6893 }
73a757e6 6894 ref->cpu = iter->cpu_file;
2cadf913
SR
6895
6896 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6897 len, iter->cpu_file, 1);
2cadf913 6898 if (r < 0) {
73a757e6
SRV
6899 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6900 ref->page);
2cadf913
SR
6901 kfree(ref);
6902 break;
6903 }
6904
2cadf913
SR
6905 page = virt_to_page(ref->page);
6906
6907 spd.pages[i] = page;
6908 spd.partial[i].len = PAGE_SIZE;
6909 spd.partial[i].offset = 0;
6910 spd.partial[i].private = (unsigned long)ref;
6911 spd.nr_pages++;
93cfb3c9 6912 *ppos += PAGE_SIZE;
93459c6c 6913
12883efb 6914 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6915 }
6916
cc60cdc9 6917 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6918 spd.nr_pages = i;
6919
6920 /* did we read anything? */
6921 if (!spd.nr_pages) {
07906da7 6922 if (ret)
1ae2293d 6923 goto out;
d716ff71 6924
1ae2293d 6925 ret = -EAGAIN;
d716ff71 6926 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6927 goto out;
07906da7 6928
e30f53aa 6929 ret = wait_on_pipe(iter, true);
8b8b3683 6930 if (ret)
1ae2293d 6931 goto out;
e30f53aa 6932
cc60cdc9 6933 goto again;
2cadf913
SR
6934 }
6935
6936 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6937out:
047fe360 6938 splice_shrink_spd(&spd);
6de58e62 6939
2cadf913
SR
6940 return ret;
6941}
6942
6943static const struct file_operations tracing_buffers_fops = {
6944 .open = tracing_buffers_open,
6945 .read = tracing_buffers_read,
cc60cdc9 6946 .poll = tracing_buffers_poll,
2cadf913
SR
6947 .release = tracing_buffers_release,
6948 .splice_read = tracing_buffers_splice_read,
6949 .llseek = no_llseek,
6950};
6951
c8d77183
SR
6952static ssize_t
6953tracing_stats_read(struct file *filp, char __user *ubuf,
6954 size_t count, loff_t *ppos)
6955{
4d3435b8
ON
6956 struct inode *inode = file_inode(filp);
6957 struct trace_array *tr = inode->i_private;
12883efb 6958 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6959 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6960 struct trace_seq *s;
6961 unsigned long cnt;
c64e148a
VN
6962 unsigned long long t;
6963 unsigned long usec_rem;
c8d77183 6964
e4f2d10f 6965 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 6966 if (!s)
a646365c 6967 return -ENOMEM;
c8d77183
SR
6968
6969 trace_seq_init(s);
6970
12883efb 6971 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6972 trace_seq_printf(s, "entries: %ld\n", cnt);
6973
12883efb 6974 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6975 trace_seq_printf(s, "overrun: %ld\n", cnt);
6976
12883efb 6977 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6978 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6979
12883efb 6980 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
6981 trace_seq_printf(s, "bytes: %ld\n", cnt);
6982
58e8eedf 6983 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 6984 /* local or global for trace_clock */
12883efb 6985 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
6986 usec_rem = do_div(t, USEC_PER_SEC);
6987 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6988 t, usec_rem);
6989
12883efb 6990 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
6991 usec_rem = do_div(t, USEC_PER_SEC);
6992 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6993 } else {
6994 /* counter or tsc mode for trace_clock */
6995 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 6996 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 6997
11043d8b 6998 trace_seq_printf(s, "now ts: %llu\n",
12883efb 6999 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 7000 }
c64e148a 7001
12883efb 7002 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
7003 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7004
12883efb 7005 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
7006 trace_seq_printf(s, "read events: %ld\n", cnt);
7007
5ac48378
SRRH
7008 count = simple_read_from_buffer(ubuf, count, ppos,
7009 s->buffer, trace_seq_used(s));
c8d77183
SR
7010
7011 kfree(s);
7012
7013 return count;
7014}
7015
7016static const struct file_operations tracing_stats_fops = {
4d3435b8 7017 .open = tracing_open_generic_tr,
c8d77183 7018 .read = tracing_stats_read,
b444786f 7019 .llseek = generic_file_llseek,
4d3435b8 7020 .release = tracing_release_generic_tr,
c8d77183
SR
7021};
7022
bc0c38d1
SR
7023#ifdef CONFIG_DYNAMIC_FTRACE
7024
7025static ssize_t
b807c3d0 7026tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
7027 size_t cnt, loff_t *ppos)
7028{
7029 unsigned long *p = filp->private_data;
6a9c981b 7030 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
7031 int r;
7032
6a9c981b 7033 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
7034 buf[r++] = '\n';
7035
6a9c981b 7036 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
7037}
7038
5e2336a0 7039static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 7040 .open = tracing_open_generic,
b807c3d0 7041 .read = tracing_read_dyn_info,
b444786f 7042 .llseek = generic_file_llseek,
bc0c38d1 7043};
77fd5c15 7044#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 7045
77fd5c15
SRRH
7046#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7047static void
bca6c8d0 7048ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7049 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7050 void *data)
77fd5c15 7051{
cab50379 7052 tracing_snapshot_instance(tr);
77fd5c15 7053}
bc0c38d1 7054
77fd5c15 7055static void
bca6c8d0 7056ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 7057 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 7058 void *data)
bc0c38d1 7059{
6e444319 7060 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7061 long *count = NULL;
77fd5c15 7062
1a93f8bd
SRV
7063 if (mapper)
7064 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7065
7066 if (count) {
7067
7068 if (*count <= 0)
7069 return;
bc0c38d1 7070
77fd5c15 7071 (*count)--;
1a93f8bd 7072 }
77fd5c15 7073
cab50379 7074 tracing_snapshot_instance(tr);
77fd5c15
SRRH
7075}
7076
7077static int
7078ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7079 struct ftrace_probe_ops *ops, void *data)
7080{
6e444319 7081 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7082 long *count = NULL;
77fd5c15
SRRH
7083
7084 seq_printf(m, "%ps:", (void *)ip);
7085
fa6f0cc7 7086 seq_puts(m, "snapshot");
77fd5c15 7087
1a93f8bd
SRV
7088 if (mapper)
7089 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7090
7091 if (count)
7092 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 7093 else
1a93f8bd 7094 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
7095
7096 return 0;
7097}
7098
1a93f8bd 7099static int
b5f081b5 7100ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7101 unsigned long ip, void *init_data, void **data)
1a93f8bd 7102{
6e444319
SRV
7103 struct ftrace_func_mapper *mapper = *data;
7104
7105 if (!mapper) {
7106 mapper = allocate_ftrace_func_mapper();
7107 if (!mapper)
7108 return -ENOMEM;
7109 *data = mapper;
7110 }
1a93f8bd 7111
6e444319 7112 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
7113}
7114
7115static void
b5f081b5 7116ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7117 unsigned long ip, void *data)
1a93f8bd 7118{
6e444319
SRV
7119 struct ftrace_func_mapper *mapper = data;
7120
7121 if (!ip) {
7122 if (!mapper)
7123 return;
7124 free_ftrace_func_mapper(mapper, NULL);
7125 return;
7126 }
1a93f8bd
SRV
7127
7128 ftrace_func_mapper_remove_ip(mapper, ip);
7129}
7130
77fd5c15
SRRH
7131static struct ftrace_probe_ops snapshot_probe_ops = {
7132 .func = ftrace_snapshot,
7133 .print = ftrace_snapshot_print,
7134};
7135
7136static struct ftrace_probe_ops snapshot_count_probe_ops = {
7137 .func = ftrace_count_snapshot,
7138 .print = ftrace_snapshot_print,
1a93f8bd
SRV
7139 .init = ftrace_snapshot_init,
7140 .free = ftrace_snapshot_free,
77fd5c15
SRRH
7141};
7142
7143static int
04ec7bb6 7144ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
7145 char *glob, char *cmd, char *param, int enable)
7146{
7147 struct ftrace_probe_ops *ops;
7148 void *count = (void *)-1;
7149 char *number;
7150 int ret;
7151
0f179765
SRV
7152 if (!tr)
7153 return -ENODEV;
7154
77fd5c15
SRRH
7155 /* hash funcs only work with set_ftrace_filter */
7156 if (!enable)
7157 return -EINVAL;
7158
7159 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7160
d3d532d7 7161 if (glob[0] == '!')
7b60f3d8 7162 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
7163
7164 if (!param)
7165 goto out_reg;
7166
7167 number = strsep(&param, ":");
7168
7169 if (!strlen(number))
7170 goto out_reg;
7171
7172 /*
7173 * We use the callback data field (which is a pointer)
7174 * as our counter.
7175 */
7176 ret = kstrtoul(number, 0, (unsigned long *)&count);
7177 if (ret)
7178 return ret;
7179
7180 out_reg:
4c174688 7181 ret = alloc_snapshot(tr);
df62db5b
SRV
7182 if (ret < 0)
7183 goto out;
77fd5c15 7184
4c174688 7185 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 7186
df62db5b 7187 out:
77fd5c15
SRRH
7188 return ret < 0 ? ret : 0;
7189}
7190
7191static struct ftrace_func_command ftrace_snapshot_cmd = {
7192 .name = "snapshot",
7193 .func = ftrace_trace_snapshot_callback,
7194};
7195
38de93ab 7196static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
7197{
7198 return register_ftrace_command(&ftrace_snapshot_cmd);
7199}
7200#else
38de93ab 7201static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 7202#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 7203
7eeafbca 7204static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 7205{
8434dc93
SRRH
7206 if (WARN_ON(!tr->dir))
7207 return ERR_PTR(-ENODEV);
7208
7209 /* Top directory uses NULL as the parent */
7210 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7211 return NULL;
7212
7213 /* All sub buffers have a descriptor */
2b6080f2 7214 return tr->dir;
bc0c38d1
SR
7215}
7216
2b6080f2 7217static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 7218{
b04cc6b1
FW
7219 struct dentry *d_tracer;
7220
2b6080f2
SR
7221 if (tr->percpu_dir)
7222 return tr->percpu_dir;
b04cc6b1 7223
7eeafbca 7224 d_tracer = tracing_get_dentry(tr);
14a5ae40 7225 if (IS_ERR(d_tracer))
b04cc6b1
FW
7226 return NULL;
7227
8434dc93 7228 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 7229
2b6080f2 7230 WARN_ONCE(!tr->percpu_dir,
8434dc93 7231 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 7232
2b6080f2 7233 return tr->percpu_dir;
b04cc6b1
FW
7234}
7235
649e9c70
ON
7236static struct dentry *
7237trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7238 void *data, long cpu, const struct file_operations *fops)
7239{
7240 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7241
7242 if (ret) /* See tracing_get_cpu() */
7682c918 7243 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
7244 return ret;
7245}
7246
2b6080f2 7247static void
8434dc93 7248tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 7249{
2b6080f2 7250 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 7251 struct dentry *d_cpu;
dd49a38c 7252 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 7253
0a3d7ce7
NK
7254 if (!d_percpu)
7255 return;
7256
dd49a38c 7257 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 7258 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 7259 if (!d_cpu) {
a395d6a7 7260 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
7261 return;
7262 }
b04cc6b1 7263
8656e7a2 7264 /* per cpu trace_pipe */
649e9c70 7265 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 7266 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
7267
7268 /* per cpu trace */
649e9c70 7269 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 7270 tr, cpu, &tracing_fops);
7f96f93f 7271
649e9c70 7272 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 7273 tr, cpu, &tracing_buffers_fops);
7f96f93f 7274
649e9c70 7275 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 7276 tr, cpu, &tracing_stats_fops);
438ced17 7277
649e9c70 7278 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 7279 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
7280
7281#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 7282 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 7283 tr, cpu, &snapshot_fops);
6de58e62 7284
649e9c70 7285 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 7286 tr, cpu, &snapshot_raw_fops);
f1affcaa 7287#endif
b04cc6b1
FW
7288}
7289
60a11774
SR
7290#ifdef CONFIG_FTRACE_SELFTEST
7291/* Let selftest have access to static functions in this file */
7292#include "trace_selftest.c"
7293#endif
7294
577b785f
SR
7295static ssize_t
7296trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7297 loff_t *ppos)
7298{
7299 struct trace_option_dentry *topt = filp->private_data;
7300 char *buf;
7301
7302 if (topt->flags->val & topt->opt->bit)
7303 buf = "1\n";
7304 else
7305 buf = "0\n";
7306
7307 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7308}
7309
7310static ssize_t
7311trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7312 loff_t *ppos)
7313{
7314 struct trace_option_dentry *topt = filp->private_data;
7315 unsigned long val;
577b785f
SR
7316 int ret;
7317
22fe9b54
PH
7318 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7319 if (ret)
577b785f
SR
7320 return ret;
7321
8d18eaaf
LZ
7322 if (val != 0 && val != 1)
7323 return -EINVAL;
577b785f 7324
8d18eaaf 7325 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7326 mutex_lock(&trace_types_lock);
8c1a49ae 7327 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7328 topt->opt, !val);
577b785f
SR
7329 mutex_unlock(&trace_types_lock);
7330 if (ret)
7331 return ret;
577b785f
SR
7332 }
7333
7334 *ppos += cnt;
7335
7336 return cnt;
7337}
7338
7339
7340static const struct file_operations trace_options_fops = {
7341 .open = tracing_open_generic,
7342 .read = trace_options_read,
7343 .write = trace_options_write,
b444786f 7344 .llseek = generic_file_llseek,
577b785f
SR
7345};
7346
9a38a885
SRRH
7347/*
7348 * In order to pass in both the trace_array descriptor as well as the index
7349 * to the flag that the trace option file represents, the trace_array
7350 * has a character array of trace_flags_index[], which holds the index
7351 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7352 * The address of this character array is passed to the flag option file
7353 * read/write callbacks.
7354 *
7355 * In order to extract both the index and the trace_array descriptor,
7356 * get_tr_index() uses the following algorithm.
7357 *
7358 * idx = *ptr;
7359 *
7360 * As the pointer itself contains the address of the index (remember
7361 * index[1] == 1).
7362 *
7363 * Then to get the trace_array descriptor, by subtracting that index
7364 * from the ptr, we get to the start of the index itself.
7365 *
7366 * ptr - idx == &index[0]
7367 *
7368 * Then a simple container_of() from that pointer gets us to the
7369 * trace_array descriptor.
7370 */
7371static void get_tr_index(void *data, struct trace_array **ptr,
7372 unsigned int *pindex)
7373{
7374 *pindex = *(unsigned char *)data;
7375
7376 *ptr = container_of(data - *pindex, struct trace_array,
7377 trace_flags_index);
7378}
7379
a8259075
SR
7380static ssize_t
7381trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7382 loff_t *ppos)
7383{
9a38a885
SRRH
7384 void *tr_index = filp->private_data;
7385 struct trace_array *tr;
7386 unsigned int index;
a8259075
SR
7387 char *buf;
7388
9a38a885
SRRH
7389 get_tr_index(tr_index, &tr, &index);
7390
7391 if (tr->trace_flags & (1 << index))
a8259075
SR
7392 buf = "1\n";
7393 else
7394 buf = "0\n";
7395
7396 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7397}
7398
7399static ssize_t
7400trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7401 loff_t *ppos)
7402{
9a38a885
SRRH
7403 void *tr_index = filp->private_data;
7404 struct trace_array *tr;
7405 unsigned int index;
a8259075
SR
7406 unsigned long val;
7407 int ret;
7408
9a38a885
SRRH
7409 get_tr_index(tr_index, &tr, &index);
7410
22fe9b54
PH
7411 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7412 if (ret)
a8259075
SR
7413 return ret;
7414
f2d84b65 7415 if (val != 0 && val != 1)
a8259075 7416 return -EINVAL;
69d34da2
SRRH
7417
7418 mutex_lock(&trace_types_lock);
2b6080f2 7419 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7420 mutex_unlock(&trace_types_lock);
a8259075 7421
613f04a0
SRRH
7422 if (ret < 0)
7423 return ret;
7424
a8259075
SR
7425 *ppos += cnt;
7426
7427 return cnt;
7428}
7429
a8259075
SR
7430static const struct file_operations trace_options_core_fops = {
7431 .open = tracing_open_generic,
7432 .read = trace_options_core_read,
7433 .write = trace_options_core_write,
b444786f 7434 .llseek = generic_file_llseek,
a8259075
SR
7435};
7436
5452af66 7437struct dentry *trace_create_file(const char *name,
f4ae40a6 7438 umode_t mode,
5452af66
FW
7439 struct dentry *parent,
7440 void *data,
7441 const struct file_operations *fops)
7442{
7443 struct dentry *ret;
7444
8434dc93 7445 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7446 if (!ret)
a395d6a7 7447 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7448
7449 return ret;
7450}
7451
7452
2b6080f2 7453static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7454{
7455 struct dentry *d_tracer;
a8259075 7456
2b6080f2
SR
7457 if (tr->options)
7458 return tr->options;
a8259075 7459
7eeafbca 7460 d_tracer = tracing_get_dentry(tr);
14a5ae40 7461 if (IS_ERR(d_tracer))
a8259075
SR
7462 return NULL;
7463
8434dc93 7464 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7465 if (!tr->options) {
a395d6a7 7466 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7467 return NULL;
7468 }
7469
2b6080f2 7470 return tr->options;
a8259075
SR
7471}
7472
577b785f 7473static void
2b6080f2
SR
7474create_trace_option_file(struct trace_array *tr,
7475 struct trace_option_dentry *topt,
577b785f
SR
7476 struct tracer_flags *flags,
7477 struct tracer_opt *opt)
7478{
7479 struct dentry *t_options;
577b785f 7480
2b6080f2 7481 t_options = trace_options_init_dentry(tr);
577b785f
SR
7482 if (!t_options)
7483 return;
7484
7485 topt->flags = flags;
7486 topt->opt = opt;
2b6080f2 7487 topt->tr = tr;
577b785f 7488
5452af66 7489 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7490 &trace_options_fops);
7491
577b785f
SR
7492}
7493
37aea98b 7494static void
2b6080f2 7495create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7496{
7497 struct trace_option_dentry *topts;
37aea98b 7498 struct trace_options *tr_topts;
577b785f
SR
7499 struct tracer_flags *flags;
7500 struct tracer_opt *opts;
7501 int cnt;
37aea98b 7502 int i;
577b785f
SR
7503
7504 if (!tracer)
37aea98b 7505 return;
577b785f
SR
7506
7507 flags = tracer->flags;
7508
7509 if (!flags || !flags->opts)
37aea98b
SRRH
7510 return;
7511
7512 /*
7513 * If this is an instance, only create flags for tracers
7514 * the instance may have.
7515 */
7516 if (!trace_ok_for_array(tracer, tr))
7517 return;
7518
7519 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7520 /* Make sure there's no duplicate flags. */
7521 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7522 return;
7523 }
577b785f
SR
7524
7525 opts = flags->opts;
7526
7527 for (cnt = 0; opts[cnt].name; cnt++)
7528 ;
7529
0cfe8245 7530 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7531 if (!topts)
37aea98b
SRRH
7532 return;
7533
7534 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7535 GFP_KERNEL);
7536 if (!tr_topts) {
7537 kfree(topts);
7538 return;
7539 }
7540
7541 tr->topts = tr_topts;
7542 tr->topts[tr->nr_topts].tracer = tracer;
7543 tr->topts[tr->nr_topts].topts = topts;
7544 tr->nr_topts++;
577b785f 7545
41d9c0be 7546 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7547 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7548 &opts[cnt]);
41d9c0be
SRRH
7549 WARN_ONCE(topts[cnt].entry == NULL,
7550 "Failed to create trace option: %s",
7551 opts[cnt].name);
7552 }
577b785f
SR
7553}
7554
a8259075 7555static struct dentry *
2b6080f2
SR
7556create_trace_option_core_file(struct trace_array *tr,
7557 const char *option, long index)
a8259075
SR
7558{
7559 struct dentry *t_options;
a8259075 7560
2b6080f2 7561 t_options = trace_options_init_dentry(tr);
a8259075
SR
7562 if (!t_options)
7563 return NULL;
7564
9a38a885
SRRH
7565 return trace_create_file(option, 0644, t_options,
7566 (void *)&tr->trace_flags_index[index],
7567 &trace_options_core_fops);
a8259075
SR
7568}
7569
16270145 7570static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7571{
7572 struct dentry *t_options;
16270145 7573 bool top_level = tr == &global_trace;
a8259075
SR
7574 int i;
7575
2b6080f2 7576 t_options = trace_options_init_dentry(tr);
a8259075
SR
7577 if (!t_options)
7578 return;
7579
16270145
SRRH
7580 for (i = 0; trace_options[i]; i++) {
7581 if (top_level ||
7582 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7583 create_trace_option_core_file(tr, trace_options[i], i);
7584 }
a8259075
SR
7585}
7586
499e5470
SR
7587static ssize_t
7588rb_simple_read(struct file *filp, char __user *ubuf,
7589 size_t cnt, loff_t *ppos)
7590{
348f0fc2 7591 struct trace_array *tr = filp->private_data;
499e5470
SR
7592 char buf[64];
7593 int r;
7594
10246fa3 7595 r = tracer_tracing_is_on(tr);
499e5470
SR
7596 r = sprintf(buf, "%d\n", r);
7597
7598 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7599}
7600
7601static ssize_t
7602rb_simple_write(struct file *filp, const char __user *ubuf,
7603 size_t cnt, loff_t *ppos)
7604{
348f0fc2 7605 struct trace_array *tr = filp->private_data;
12883efb 7606 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7607 unsigned long val;
7608 int ret;
7609
7610 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7611 if (ret)
7612 return ret;
7613
7614 if (buffer) {
2df8f8a6
SR
7615 mutex_lock(&trace_types_lock);
7616 if (val) {
10246fa3 7617 tracer_tracing_on(tr);
2b6080f2
SR
7618 if (tr->current_trace->start)
7619 tr->current_trace->start(tr);
2df8f8a6 7620 } else {
10246fa3 7621 tracer_tracing_off(tr);
2b6080f2
SR
7622 if (tr->current_trace->stop)
7623 tr->current_trace->stop(tr);
2df8f8a6
SR
7624 }
7625 mutex_unlock(&trace_types_lock);
499e5470
SR
7626 }
7627
7628 (*ppos)++;
7629
7630 return cnt;
7631}
7632
7633static const struct file_operations rb_simple_fops = {
7b85af63 7634 .open = tracing_open_generic_tr,
499e5470
SR
7635 .read = rb_simple_read,
7636 .write = rb_simple_write,
7b85af63 7637 .release = tracing_release_generic_tr,
499e5470
SR
7638 .llseek = default_llseek,
7639};
7640
277ba044
SR
7641struct dentry *trace_instance_dir;
7642
7643static void
8434dc93 7644init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7645
55034cd6
SRRH
7646static int
7647allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7648{
7649 enum ring_buffer_flags rb_flags;
737223fb 7650
983f938a 7651 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7652
dced341b
SRRH
7653 buf->tr = tr;
7654
55034cd6
SRRH
7655 buf->buffer = ring_buffer_alloc(size, rb_flags);
7656 if (!buf->buffer)
7657 return -ENOMEM;
737223fb 7658
55034cd6
SRRH
7659 buf->data = alloc_percpu(struct trace_array_cpu);
7660 if (!buf->data) {
7661 ring_buffer_free(buf->buffer);
4397f045 7662 buf->buffer = NULL;
55034cd6
SRRH
7663 return -ENOMEM;
7664 }
737223fb 7665
737223fb
SRRH
7666 /* Allocate the first page for all buffers */
7667 set_buffer_entries(&tr->trace_buffer,
7668 ring_buffer_size(tr->trace_buffer.buffer, 0));
7669
55034cd6
SRRH
7670 return 0;
7671}
737223fb 7672
55034cd6
SRRH
7673static int allocate_trace_buffers(struct trace_array *tr, int size)
7674{
7675 int ret;
737223fb 7676
55034cd6
SRRH
7677 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7678 if (ret)
7679 return ret;
737223fb 7680
55034cd6
SRRH
7681#ifdef CONFIG_TRACER_MAX_TRACE
7682 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7683 allocate_snapshot ? size : 1);
7684 if (WARN_ON(ret)) {
737223fb 7685 ring_buffer_free(tr->trace_buffer.buffer);
24f2aaf9 7686 tr->trace_buffer.buffer = NULL;
55034cd6 7687 free_percpu(tr->trace_buffer.data);
24f2aaf9 7688 tr->trace_buffer.data = NULL;
55034cd6
SRRH
7689 return -ENOMEM;
7690 }
7691 tr->allocated_snapshot = allocate_snapshot;
737223fb 7692
55034cd6
SRRH
7693 /*
7694 * Only the top level trace array gets its snapshot allocated
7695 * from the kernel command line.
7696 */
7697 allocate_snapshot = false;
737223fb 7698#endif
55034cd6 7699 return 0;
737223fb
SRRH
7700}
7701
f0b70cc4
SRRH
7702static void free_trace_buffer(struct trace_buffer *buf)
7703{
7704 if (buf->buffer) {
7705 ring_buffer_free(buf->buffer);
7706 buf->buffer = NULL;
7707 free_percpu(buf->data);
7708 buf->data = NULL;
7709 }
7710}
7711
23aaa3c1
SRRH
7712static void free_trace_buffers(struct trace_array *tr)
7713{
7714 if (!tr)
7715 return;
7716
f0b70cc4 7717 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7718
7719#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7720 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7721#endif
7722}
7723
9a38a885
SRRH
7724static void init_trace_flags_index(struct trace_array *tr)
7725{
7726 int i;
7727
7728 /* Used by the trace options files */
7729 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7730 tr->trace_flags_index[i] = i;
7731}
7732
37aea98b
SRRH
7733static void __update_tracer_options(struct trace_array *tr)
7734{
7735 struct tracer *t;
7736
7737 for (t = trace_types; t; t = t->next)
7738 add_tracer_options(tr, t);
7739}
7740
7741static void update_tracer_options(struct trace_array *tr)
7742{
7743 mutex_lock(&trace_types_lock);
7744 __update_tracer_options(tr);
7745 mutex_unlock(&trace_types_lock);
7746}
7747
eae47358 7748static int instance_mkdir(const char *name)
737223fb 7749{
277ba044
SR
7750 struct trace_array *tr;
7751 int ret;
277ba044 7752
12ecef0c 7753 mutex_lock(&event_mutex);
277ba044
SR
7754 mutex_lock(&trace_types_lock);
7755
7756 ret = -EEXIST;
7757 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7758 if (tr->name && strcmp(tr->name, name) == 0)
7759 goto out_unlock;
7760 }
7761
7762 ret = -ENOMEM;
7763 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7764 if (!tr)
7765 goto out_unlock;
7766
7767 tr->name = kstrdup(name, GFP_KERNEL);
7768 if (!tr->name)
7769 goto out_free_tr;
7770
ccfe9e42
AL
7771 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7772 goto out_free_tr;
7773
20550622 7774 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7775
ccfe9e42
AL
7776 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7777
277ba044
SR
7778 raw_spin_lock_init(&tr->start_lock);
7779
0b9b12c1
SRRH
7780 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7781
277ba044
SR
7782 tr->current_trace = &nop_trace;
7783
7784 INIT_LIST_HEAD(&tr->systems);
7785 INIT_LIST_HEAD(&tr->events);
067fe038 7786 INIT_LIST_HEAD(&tr->hist_vars);
277ba044 7787
737223fb 7788 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7789 goto out_free_tr;
7790
8434dc93 7791 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7792 if (!tr->dir)
7793 goto out_free_tr;
7794
7795 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7796 if (ret) {
8434dc93 7797 tracefs_remove_recursive(tr->dir);
277ba044 7798 goto out_free_tr;
609e85a7 7799 }
277ba044 7800
04ec7bb6
SRV
7801 ftrace_init_trace_array(tr);
7802
8434dc93 7803 init_tracer_tracefs(tr, tr->dir);
9a38a885 7804 init_trace_flags_index(tr);
37aea98b 7805 __update_tracer_options(tr);
277ba044
SR
7806
7807 list_add(&tr->list, &ftrace_trace_arrays);
7808
7809 mutex_unlock(&trace_types_lock);
12ecef0c 7810 mutex_unlock(&event_mutex);
277ba044
SR
7811
7812 return 0;
7813
7814 out_free_tr:
23aaa3c1 7815 free_trace_buffers(tr);
ccfe9e42 7816 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7817 kfree(tr->name);
7818 kfree(tr);
7819
7820 out_unlock:
7821 mutex_unlock(&trace_types_lock);
12ecef0c 7822 mutex_unlock(&event_mutex);
277ba044
SR
7823
7824 return ret;
7825
7826}
7827
eae47358 7828static int instance_rmdir(const char *name)
0c8916c3
SR
7829{
7830 struct trace_array *tr;
7831 int found = 0;
7832 int ret;
37aea98b 7833 int i;
0c8916c3 7834
12ecef0c 7835 mutex_lock(&event_mutex);
0c8916c3
SR
7836 mutex_lock(&trace_types_lock);
7837
7838 ret = -ENODEV;
7839 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7840 if (tr->name && strcmp(tr->name, name) == 0) {
7841 found = 1;
7842 break;
7843 }
7844 }
7845 if (!found)
7846 goto out_unlock;
7847
a695cb58 7848 ret = -EBUSY;
cf6ab6d9 7849 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7850 goto out_unlock;
7851
0c8916c3
SR
7852 list_del(&tr->list);
7853
20550622
SRRH
7854 /* Disable all the flags that were enabled coming in */
7855 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7856 if ((1 << i) & ZEROED_TRACE_FLAGS)
7857 set_tracer_flag(tr, 1 << i, 0);
7858 }
7859
6b450d25 7860 tracing_set_nop(tr);
a0e6369e 7861 clear_ftrace_function_probes(tr);
0c8916c3 7862 event_trace_del_tracer(tr);
d879d0b8 7863 ftrace_clear_pids(tr);
591dffda 7864 ftrace_destroy_function_files(tr);
681a4a2f 7865 tracefs_remove_recursive(tr->dir);
a9fcaaac 7866 free_trace_buffers(tr);
0c8916c3 7867
37aea98b
SRRH
7868 for (i = 0; i < tr->nr_topts; i++) {
7869 kfree(tr->topts[i].topts);
7870 }
7871 kfree(tr->topts);
7872
db9108e0 7873 free_cpumask_var(tr->tracing_cpumask);
0c8916c3
SR
7874 kfree(tr->name);
7875 kfree(tr);
7876
7877 ret = 0;
7878
7879 out_unlock:
7880 mutex_unlock(&trace_types_lock);
12ecef0c 7881 mutex_unlock(&event_mutex);
0c8916c3
SR
7882
7883 return ret;
7884}
7885
277ba044
SR
7886static __init void create_trace_instances(struct dentry *d_tracer)
7887{
eae47358
SRRH
7888 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7889 instance_mkdir,
7890 instance_rmdir);
277ba044
SR
7891 if (WARN_ON(!trace_instance_dir))
7892 return;
277ba044
SR
7893}
7894
2b6080f2 7895static void
8434dc93 7896init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7897{
121aaee7 7898 int cpu;
2b6080f2 7899
607e2ea1
SRRH
7900 trace_create_file("available_tracers", 0444, d_tracer,
7901 tr, &show_traces_fops);
7902
7903 trace_create_file("current_tracer", 0644, d_tracer,
7904 tr, &set_tracer_fops);
7905
ccfe9e42
AL
7906 trace_create_file("tracing_cpumask", 0644, d_tracer,
7907 tr, &tracing_cpumask_fops);
7908
2b6080f2
SR
7909 trace_create_file("trace_options", 0644, d_tracer,
7910 tr, &tracing_iter_fops);
7911
7912 trace_create_file("trace", 0644, d_tracer,
6484c71c 7913 tr, &tracing_fops);
2b6080f2
SR
7914
7915 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 7916 tr, &tracing_pipe_fops);
2b6080f2
SR
7917
7918 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 7919 tr, &tracing_entries_fops);
2b6080f2
SR
7920
7921 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7922 tr, &tracing_total_entries_fops);
7923
238ae93d 7924 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
7925 tr, &tracing_free_buffer_fops);
7926
7927 trace_create_file("trace_marker", 0220, d_tracer,
7928 tr, &tracing_mark_fops);
7929
fa32e855
SR
7930 trace_create_file("trace_marker_raw", 0220, d_tracer,
7931 tr, &tracing_mark_raw_fops);
7932
2b6080f2
SR
7933 trace_create_file("trace_clock", 0644, d_tracer, tr,
7934 &trace_clock_fops);
7935
7936 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 7937 tr, &rb_simple_fops);
ce9bae55 7938
2c1ea60b
TZ
7939 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
7940 &trace_time_stamp_mode_fops);
7941
16270145
SRRH
7942 create_trace_options_dir(tr);
7943
f971cc9a 7944#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
7945 trace_create_file("tracing_max_latency", 0644, d_tracer,
7946 &tr->max_latency, &tracing_max_lat_fops);
7947#endif
7948
591dffda
SRRH
7949 if (ftrace_create_function_files(tr, d_tracer))
7950 WARN(1, "Could not allocate function filter files");
7951
ce9bae55
SRRH
7952#ifdef CONFIG_TRACER_SNAPSHOT
7953 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 7954 tr, &snapshot_fops);
ce9bae55 7955#endif
121aaee7
SRRH
7956
7957 for_each_tracing_cpu(cpu)
8434dc93 7958 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 7959
345ddcc8 7960 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
7961}
7962
93faccbb 7963static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
7964{
7965 struct vfsmount *mnt;
7966 struct file_system_type *type;
7967
7968 /*
7969 * To maintain backward compatibility for tools that mount
7970 * debugfs to get to the tracing facility, tracefs is automatically
7971 * mounted to the debugfs/tracing directory.
7972 */
7973 type = get_fs_type("tracefs");
7974 if (!type)
7975 return NULL;
93faccbb 7976 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
7977 put_filesystem(type);
7978 if (IS_ERR(mnt))
7979 return NULL;
7980 mntget(mnt);
7981
7982 return mnt;
7983}
7984
7eeafbca
SRRH
7985/**
7986 * tracing_init_dentry - initialize top level trace array
7987 *
7988 * This is called when creating files or directories in the tracing
7989 * directory. It is called via fs_initcall() by any of the boot up code
7990 * and expects to return the dentry of the top level tracing directory.
7991 */
7992struct dentry *tracing_init_dentry(void)
7993{
7994 struct trace_array *tr = &global_trace;
7995
f76180bc 7996 /* The top level trace array uses NULL as parent */
7eeafbca 7997 if (tr->dir)
f76180bc 7998 return NULL;
7eeafbca 7999
8b129199
JW
8000 if (WARN_ON(!tracefs_initialized()) ||
8001 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8002 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
8003 return ERR_PTR(-ENODEV);
8004
f76180bc
SRRH
8005 /*
8006 * As there may still be users that expect the tracing
8007 * files to exist in debugfs/tracing, we must automount
8008 * the tracefs file system there, so older tools still
8009 * work with the newer kerenl.
8010 */
8011 tr->dir = debugfs_create_automount("tracing", NULL,
8012 trace_automount, NULL);
7eeafbca
SRRH
8013 if (!tr->dir) {
8014 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8015 return ERR_PTR(-ENOMEM);
8016 }
8017
8434dc93 8018 return NULL;
7eeafbca
SRRH
8019}
8020
00f4b652
JL
8021extern struct trace_eval_map *__start_ftrace_eval_maps[];
8022extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 8023
5f60b351 8024static void __init trace_eval_init(void)
0c564a53 8025{
3673b8e4
SRRH
8026 int len;
8027
02fd7f68 8028 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 8029 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
8030}
8031
8032#ifdef CONFIG_MODULES
f57a4143 8033static void trace_module_add_evals(struct module *mod)
3673b8e4 8034{
99be647c 8035 if (!mod->num_trace_evals)
3673b8e4
SRRH
8036 return;
8037
8038 /*
8039 * Modules with bad taint do not have events created, do
8040 * not bother with enums either.
8041 */
8042 if (trace_module_has_bad_taint(mod))
8043 return;
8044
f57a4143 8045 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
8046}
8047
681bec03 8048#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 8049static void trace_module_remove_evals(struct module *mod)
9828413d 8050{
23bf8cb8
JL
8051 union trace_eval_map_item *map;
8052 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 8053
99be647c 8054 if (!mod->num_trace_evals)
9828413d
SRRH
8055 return;
8056
1793ed93 8057 mutex_lock(&trace_eval_mutex);
9828413d 8058
23bf8cb8 8059 map = trace_eval_maps;
9828413d
SRRH
8060
8061 while (map) {
8062 if (map->head.mod == mod)
8063 break;
5f60b351 8064 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
8065 last = &map->tail.next;
8066 map = map->tail.next;
8067 }
8068 if (!map)
8069 goto out;
8070
5f60b351 8071 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
8072 kfree(map);
8073 out:
1793ed93 8074 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
8075}
8076#else
f57a4143 8077static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 8078#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 8079
3673b8e4
SRRH
8080static int trace_module_notify(struct notifier_block *self,
8081 unsigned long val, void *data)
8082{
8083 struct module *mod = data;
8084
8085 switch (val) {
8086 case MODULE_STATE_COMING:
f57a4143 8087 trace_module_add_evals(mod);
3673b8e4 8088 break;
9828413d 8089 case MODULE_STATE_GOING:
f57a4143 8090 trace_module_remove_evals(mod);
9828413d 8091 break;
3673b8e4
SRRH
8092 }
8093
8094 return 0;
0c564a53
SRRH
8095}
8096
3673b8e4
SRRH
8097static struct notifier_block trace_module_nb = {
8098 .notifier_call = trace_module_notify,
8099 .priority = 0,
8100};
9828413d 8101#endif /* CONFIG_MODULES */
3673b8e4 8102
8434dc93 8103static __init int tracer_init_tracefs(void)
bc0c38d1
SR
8104{
8105 struct dentry *d_tracer;
bc0c38d1 8106
7e53bd42
LJ
8107 trace_access_lock_init();
8108
bc0c38d1 8109 d_tracer = tracing_init_dentry();
14a5ae40 8110 if (IS_ERR(d_tracer))
ed6f1c99 8111 return 0;
bc0c38d1 8112
8434dc93 8113 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 8114 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 8115
5452af66 8116 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 8117 &global_trace, &tracing_thresh_fops);
a8259075 8118
339ae5d3 8119 trace_create_file("README", 0444, d_tracer,
5452af66
FW
8120 NULL, &tracing_readme_fops);
8121
69abe6a5
AP
8122 trace_create_file("saved_cmdlines", 0444, d_tracer,
8123 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 8124
939c7a4f
YY
8125 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8126 NULL, &tracing_saved_cmdlines_size_fops);
8127
99c621d7
MS
8128 trace_create_file("saved_tgids", 0444, d_tracer,
8129 NULL, &tracing_saved_tgids_fops);
8130
5f60b351 8131 trace_eval_init();
0c564a53 8132
f57a4143 8133 trace_create_eval_file(d_tracer);
9828413d 8134
3673b8e4
SRRH
8135#ifdef CONFIG_MODULES
8136 register_module_notifier(&trace_module_nb);
8137#endif
8138
bc0c38d1 8139#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
8140 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8141 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 8142#endif
b04cc6b1 8143
277ba044 8144 create_trace_instances(d_tracer);
5452af66 8145
37aea98b 8146 update_tracer_options(&global_trace);
09d23a1d 8147
b5ad384e 8148 return 0;
bc0c38d1
SR
8149}
8150
3f5a54e3
SR
8151static int trace_panic_handler(struct notifier_block *this,
8152 unsigned long event, void *unused)
8153{
944ac425 8154 if (ftrace_dump_on_oops)
cecbca96 8155 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8156 return NOTIFY_OK;
8157}
8158
8159static struct notifier_block trace_panic_notifier = {
8160 .notifier_call = trace_panic_handler,
8161 .next = NULL,
8162 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8163};
8164
8165static int trace_die_handler(struct notifier_block *self,
8166 unsigned long val,
8167 void *data)
8168{
8169 switch (val) {
8170 case DIE_OOPS:
944ac425 8171 if (ftrace_dump_on_oops)
cecbca96 8172 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8173 break;
8174 default:
8175 break;
8176 }
8177 return NOTIFY_OK;
8178}
8179
8180static struct notifier_block trace_die_notifier = {
8181 .notifier_call = trace_die_handler,
8182 .priority = 200
8183};
8184
8185/*
8186 * printk is set to max of 1024, we really don't need it that big.
8187 * Nothing should be printing 1000 characters anyway.
8188 */
8189#define TRACE_MAX_PRINT 1000
8190
8191/*
8192 * Define here KERN_TRACE so that we have one place to modify
8193 * it if we decide to change what log level the ftrace dump
8194 * should be at.
8195 */
428aee14 8196#define KERN_TRACE KERN_EMERG
3f5a54e3 8197
955b61e5 8198void
3f5a54e3
SR
8199trace_printk_seq(struct trace_seq *s)
8200{
8201 /* Probably should print a warning here. */
3a161d99
SRRH
8202 if (s->seq.len >= TRACE_MAX_PRINT)
8203 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 8204
820b75f6
SRRH
8205 /*
8206 * More paranoid code. Although the buffer size is set to
8207 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8208 * an extra layer of protection.
8209 */
8210 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8211 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
8212
8213 /* should be zero ended, but we are paranoid. */
3a161d99 8214 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
8215
8216 printk(KERN_TRACE "%s", s->buffer);
8217
f9520750 8218 trace_seq_init(s);
3f5a54e3
SR
8219}
8220
955b61e5
JW
8221void trace_init_global_iter(struct trace_iterator *iter)
8222{
8223 iter->tr = &global_trace;
2b6080f2 8224 iter->trace = iter->tr->current_trace;
ae3b5093 8225 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 8226 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
8227
8228 if (iter->trace && iter->trace->open)
8229 iter->trace->open(iter);
8230
8231 /* Annotate start of buffers if we had overruns */
8232 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8233 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8234
8235 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8236 if (trace_clocks[iter->tr->clock_id].in_ns)
8237 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
8238}
8239
7fe70b57 8240void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 8241{
3f5a54e3
SR
8242 /* use static because iter can be a bit big for the stack */
8243 static struct trace_iterator iter;
7fe70b57 8244 static atomic_t dump_running;
983f938a 8245 struct trace_array *tr = &global_trace;
cf586b61 8246 unsigned int old_userobj;
d769041f
SR
8247 unsigned long flags;
8248 int cnt = 0, cpu;
3f5a54e3 8249
7fe70b57
SRRH
8250 /* Only allow one dump user at a time. */
8251 if (atomic_inc_return(&dump_running) != 1) {
8252 atomic_dec(&dump_running);
8253 return;
8254 }
3f5a54e3 8255
7fe70b57
SRRH
8256 /*
8257 * Always turn off tracing when we dump.
8258 * We don't need to show trace output of what happens
8259 * between multiple crashes.
8260 *
8261 * If the user does a sysrq-z, then they can re-enable
8262 * tracing with echo 1 > tracing_on.
8263 */
0ee6b6cf 8264 tracing_off();
cf586b61 8265
7fe70b57 8266 local_irq_save(flags);
3f5a54e3 8267
38dbe0b1 8268 /* Simulate the iterator */
955b61e5
JW
8269 trace_init_global_iter(&iter);
8270
d769041f 8271 for_each_tracing_cpu(cpu) {
5e2d5ef8 8272 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
8273 }
8274
983f938a 8275 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 8276
b54d3de9 8277 /* don't look at user memory in panic mode */
983f938a 8278 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 8279
cecbca96
FW
8280 switch (oops_dump_mode) {
8281 case DUMP_ALL:
ae3b5093 8282 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8283 break;
8284 case DUMP_ORIG:
8285 iter.cpu_file = raw_smp_processor_id();
8286 break;
8287 case DUMP_NONE:
8288 goto out_enable;
8289 default:
8290 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 8291 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8292 }
8293
8294 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 8295
7fe70b57
SRRH
8296 /* Did function tracer already get disabled? */
8297 if (ftrace_is_dead()) {
8298 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8299 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8300 }
8301
3f5a54e3
SR
8302 /*
8303 * We need to stop all tracing on all CPUS to read the
8304 * the next buffer. This is a bit expensive, but is
8305 * not done often. We fill all what we can read,
8306 * and then release the locks again.
8307 */
8308
3f5a54e3
SR
8309 while (!trace_empty(&iter)) {
8310
8311 if (!cnt)
8312 printk(KERN_TRACE "---------------------------------\n");
8313
8314 cnt++;
8315
8316 /* reset all but tr, trace, and overruns */
8317 memset(&iter.seq, 0,
8318 sizeof(struct trace_iterator) -
8319 offsetof(struct trace_iterator, seq));
8320 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8321 iter.pos = -1;
8322
955b61e5 8323 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8324 int ret;
8325
8326 ret = print_trace_line(&iter);
8327 if (ret != TRACE_TYPE_NO_CONSUME)
8328 trace_consume(&iter);
3f5a54e3 8329 }
b892e5c8 8330 touch_nmi_watchdog();
3f5a54e3
SR
8331
8332 trace_printk_seq(&iter.seq);
8333 }
8334
8335 if (!cnt)
8336 printk(KERN_TRACE " (ftrace buffer empty)\n");
8337 else
8338 printk(KERN_TRACE "---------------------------------\n");
8339
cecbca96 8340 out_enable:
983f938a 8341 tr->trace_flags |= old_userobj;
cf586b61 8342
7fe70b57
SRRH
8343 for_each_tracing_cpu(cpu) {
8344 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8345 }
7fe70b57 8346 atomic_dec(&dump_running);
cd891ae0 8347 local_irq_restore(flags);
3f5a54e3 8348}
a8eecf22 8349EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8350
7e465baa
TZ
8351int trace_run_command(const char *buf, int (*createfn)(int, char **))
8352{
8353 char **argv;
8354 int argc, ret;
8355
8356 argc = 0;
8357 ret = 0;
8358 argv = argv_split(GFP_KERNEL, buf, &argc);
8359 if (!argv)
8360 return -ENOMEM;
8361
8362 if (argc)
8363 ret = createfn(argc, argv);
8364
8365 argv_free(argv);
8366
8367 return ret;
8368}
8369
8370#define WRITE_BUFSIZE 4096
8371
8372ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8373 size_t count, loff_t *ppos,
8374 int (*createfn)(int, char **))
8375{
8376 char *kbuf, *buf, *tmp;
8377 int ret = 0;
8378 size_t done = 0;
8379 size_t size;
8380
8381 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8382 if (!kbuf)
8383 return -ENOMEM;
8384
8385 while (done < count) {
8386 size = count - done;
8387
8388 if (size >= WRITE_BUFSIZE)
8389 size = WRITE_BUFSIZE - 1;
8390
8391 if (copy_from_user(kbuf, buffer + done, size)) {
8392 ret = -EFAULT;
8393 goto out;
8394 }
8395 kbuf[size] = '\0';
8396 buf = kbuf;
8397 do {
8398 tmp = strchr(buf, '\n');
8399 if (tmp) {
8400 *tmp = '\0';
8401 size = tmp - buf + 1;
8402 } else {
8403 size = strlen(buf);
8404 if (done + size < count) {
8405 if (buf != kbuf)
8406 break;
8407 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8408 pr_warn("Line length is too long: Should be less than %d\n",
8409 WRITE_BUFSIZE - 2);
8410 ret = -EINVAL;
8411 goto out;
8412 }
8413 }
8414 done += size;
8415
8416 /* Remove comments */
8417 tmp = strchr(buf, '#');
8418
8419 if (tmp)
8420 *tmp = '\0';
8421
8422 ret = trace_run_command(buf, createfn);
8423 if (ret)
8424 goto out;
8425 buf += size;
8426
8427 } while (done < count);
8428 }
8429 ret = done;
8430
8431out:
8432 kfree(kbuf);
8433
8434 return ret;
8435}
8436
3928a8a2 8437__init static int tracer_alloc_buffers(void)
bc0c38d1 8438{
73c5162a 8439 int ring_buf_size;
9e01c1b7 8440 int ret = -ENOMEM;
4c11d7ae 8441
b5e87c05
SRRH
8442 /*
8443 * Make sure we don't accidently add more trace options
8444 * than we have bits for.
8445 */
9a38a885 8446 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8447
9e01c1b7
RR
8448 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8449 goto out;
8450
ccfe9e42 8451 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8452 goto out_free_buffer_mask;
4c11d7ae 8453
07d777fe
SR
8454 /* Only allocate trace_printk buffers if a trace_printk exists */
8455 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8456 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8457 trace_printk_init_buffers();
8458
73c5162a
SR
8459 /* To save memory, keep the ring buffer size to its minimum */
8460 if (ring_buffer_expanded)
8461 ring_buf_size = trace_buf_size;
8462 else
8463 ring_buf_size = 1;
8464
9e01c1b7 8465 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8466 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8467
2b6080f2
SR
8468 raw_spin_lock_init(&global_trace.start_lock);
8469
b32614c0
SAS
8470 /*
8471 * The prepare callbacks allocates some memory for the ring buffer. We
8472 * don't free the buffer if the if the CPU goes down. If we were to free
8473 * the buffer, then the user would lose any trace that was in the
8474 * buffer. The memory will be removed once the "instance" is removed.
8475 */
8476 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8477 "trace/RB:preapre", trace_rb_cpu_prepare,
8478 NULL);
8479 if (ret < 0)
8480 goto out_free_cpumask;
2c4a33ab 8481 /* Used for event triggers */
147d88e0 8482 ret = -ENOMEM;
2c4a33ab
SRRH
8483 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8484 if (!temp_buffer)
b32614c0 8485 goto out_rm_hp_state;
2c4a33ab 8486
939c7a4f
YY
8487 if (trace_create_savedcmd() < 0)
8488 goto out_free_temp_buffer;
8489
9e01c1b7 8490 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8491 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8492 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8493 WARN_ON(1);
939c7a4f 8494 goto out_free_savedcmd;
4c11d7ae 8495 }
a7603ff4 8496
499e5470
SR
8497 if (global_trace.buffer_disabled)
8498 tracing_off();
4c11d7ae 8499
e1e232ca
SR
8500 if (trace_boot_clock) {
8501 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8502 if (ret < 0)
a395d6a7
JP
8503 pr_warn("Trace clock %s not defined, going back to default\n",
8504 trace_boot_clock);
e1e232ca
SR
8505 }
8506
ca164318
SRRH
8507 /*
8508 * register_tracer() might reference current_trace, so it
8509 * needs to be set before we register anything. This is
8510 * just a bootstrap of current_trace anyway.
8511 */
2b6080f2
SR
8512 global_trace.current_trace = &nop_trace;
8513
0b9b12c1
SRRH
8514 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8515
4104d326
SRRH
8516 ftrace_init_global_array_ops(&global_trace);
8517
9a38a885
SRRH
8518 init_trace_flags_index(&global_trace);
8519
ca164318
SRRH
8520 register_tracer(&nop_trace);
8521
dbeafd0d
SRV
8522 /* Function tracing may start here (via kernel command line) */
8523 init_function_trace();
8524
60a11774
SR
8525 /* All seems OK, enable tracing */
8526 tracing_disabled = 0;
3928a8a2 8527
3f5a54e3
SR
8528 atomic_notifier_chain_register(&panic_notifier_list,
8529 &trace_panic_notifier);
8530
8531 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8532
ae63b31e
SR
8533 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8534
8535 INIT_LIST_HEAD(&global_trace.systems);
8536 INIT_LIST_HEAD(&global_trace.events);
067fe038 8537 INIT_LIST_HEAD(&global_trace.hist_vars);
ae63b31e
SR
8538 list_add(&global_trace.list, &ftrace_trace_arrays);
8539
a4d1e688 8540 apply_trace_boot_options();
7bcfaf54 8541
77fd5c15
SRRH
8542 register_snapshot_cmd();
8543
2fc1dfbe 8544 return 0;
3f5a54e3 8545
939c7a4f
YY
8546out_free_savedcmd:
8547 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8548out_free_temp_buffer:
8549 ring_buffer_free(temp_buffer);
b32614c0
SAS
8550out_rm_hp_state:
8551 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8552out_free_cpumask:
ccfe9e42 8553 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8554out_free_buffer_mask:
8555 free_cpumask_var(tracing_buffer_mask);
8556out:
8557 return ret;
bc0c38d1 8558}
b2821ae6 8559
e725c731 8560void __init early_trace_init(void)
5f893b26 8561{
0daa2302
SRRH
8562 if (tracepoint_printk) {
8563 tracepoint_print_iter =
8564 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8565 if (WARN_ON(!tracepoint_print_iter))
8566 tracepoint_printk = 0;
42391745
SRRH
8567 else
8568 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8569 }
5f893b26 8570 tracer_alloc_buffers();
e725c731
SRV
8571}
8572
8573void __init trace_init(void)
8574{
0c564a53 8575 trace_event_init();
5f893b26
SRRH
8576}
8577
b2821ae6
SR
8578__init static int clear_boot_tracer(void)
8579{
8580 /*
8581 * The default tracer at boot buffer is an init section.
8582 * This function is called in lateinit. If we did not
8583 * find the boot tracer, then clear it out, to prevent
8584 * later registration from accessing the buffer that is
8585 * about to be freed.
8586 */
8587 if (!default_bootup_tracer)
8588 return 0;
8589
8590 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8591 default_bootup_tracer);
8592 default_bootup_tracer = NULL;
8593
8594 return 0;
8595}
8596
8434dc93 8597fs_initcall(tracer_init_tracefs);
4bb0f0e7 8598late_initcall_sync(clear_boot_tracer);