]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/trace/trace.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
76c813e2 28#include <linux/vmalloc.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
478409dd 43#include <linux/trace.h>
8bd75c77 44#include <linux/sched/rt.h>
86387f7e 45
bc0c38d1 46#include "trace.h"
f0868d1e 47#include "trace_output.h"
bc0c38d1 48
73c5162a
SR
49/*
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
55034cd6 53bool ring_buffer_expanded;
73c5162a 54
8e1b82e0
FW
55/*
56 * We need to change this state when a selftest is running.
ff32504f
FW
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
5e1607a0 59 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
60 * at the same time, giving false positive or negative results.
61 */
8e1b82e0 62static bool __read_mostly tracing_selftest_running;
ff32504f 63
b2821ae6
SR
64/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
020e5f85 67bool __read_mostly tracing_selftest_disabled;
b2821ae6 68
0daa2302
SRRH
69/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
42391745 72static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 73
adf9f195
FW
74/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
8c1a49ae
SRRH
79static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
81{
82 return 0;
83}
0f048701 84
7ffbd48d
SR
85/*
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
d914ba37 90static DEFINE_PER_CPU(bool, trace_taskinfo_save);
7ffbd48d 91
0f048701
SR
92/*
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
4fd27358 98static int tracing_disabled = 1;
0f048701 99
955b61e5 100cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 101
944ac425
SR
102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 116 */
cecbca96
FW
117
118enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 119
de7edd31
SRRH
120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
681bec03
JL
123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
23bf8cb8 125struct trace_eval_map_head {
9828413d
SRRH
126 struct module *mod;
127 unsigned long length;
128};
129
23bf8cb8 130union trace_eval_map_item;
9828413d 131
23bf8cb8 132struct trace_eval_map_tail {
9828413d
SRRH
133 /*
134 * "end" is first and points to NULL as it must be different
00f4b652 135 * than "mod" or "eval_string"
9828413d 136 */
23bf8cb8 137 union trace_eval_map_item *next;
9828413d
SRRH
138 const char *end; /* points to NULL */
139};
140
1793ed93 141static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
142
143/*
23bf8cb8 144 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
681bec03 148 * pointer to the next array of saved eval_map items.
9828413d 149 */
23bf8cb8 150union trace_eval_map_item {
00f4b652 151 struct trace_eval_map map;
23bf8cb8
JL
152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
9828413d
SRRH
154};
155
23bf8cb8 156static union trace_eval_map_item *trace_eval_maps;
681bec03 157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 158
607e2ea1 159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 160
ee6c2c1b
LZ
161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 163static char *default_bootup_tracer;
d9e54076 164
55034cd6
SRRH
165static bool allocate_snapshot;
166
1beee96b 167static int __init set_cmdline_ftrace(char *str)
d9e54076 168{
67012ab1 169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 170 default_bootup_tracer = bootup_tracer_buf;
73c5162a 171 /* We are using ftrace early, expand it */
55034cd6 172 ring_buffer_expanded = true;
d9e54076
PZ
173 return 1;
174}
1beee96b 175__setup("ftrace=", set_cmdline_ftrace);
d9e54076 176
944ac425
SR
177static int __init set_ftrace_dump_on_oops(char *str)
178{
cecbca96
FW
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
944ac425
SR
190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 192
de7edd31
SRRH
193static int __init stop_trace_on_warning(char *str)
194{
933ff9f2
LCG
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
de7edd31
SRRH
197 return 1;
198}
933ff9f2 199__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 200
3209cff4 201static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
3209cff4 208__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 209
7bcfaf54
SR
210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
212
213static int __init set_trace_boot_options(char *str)
214{
67012ab1 215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
e1e232ca
SR
220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
0daa2302
SRRH
231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
de7edd31 238
a5a1d1c2 239unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
983f938a
SRRH
246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
16270145
SRRH
254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
20550622
SRRH
258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
1e10486f 260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 261
4fcdae83 262/*
67d04bb2
JF
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
4fcdae83 265 */
983f938a
SRRH
266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
bc0c38d1 269
ae63b31e 270LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 271
ff451961
SRRH
272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
2425bcb9 303int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
0fc1b09f 309 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
310 return 1;
311 }
312
313 return 0;
eb02ce01
TZ
314}
315
76c813e2
SRRH
316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
d8275c45
SR
322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
5cc8976b
SRRH
400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
76c813e2
SRRH
471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
67f20b08
WY
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
76c813e2 516 set_bit(pid, pid_list->pids);
76c813e2
SRRH
517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
a5a1d1c2 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
9457158b 573 if (!buf->buffer)
37886f6a
SR
574 return trace_clock_local();
575
9457158b
AL
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
578
579 return ts;
580}
bc0c38d1 581
a5a1d1c2 582u64 ftrace_now(int cpu)
9457158b
AL
583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
10246fa3
SRRH
587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
9036990d
SR
596int tracing_is_enabled(void)
597{
10246fa3
SRRH
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
9036990d
SR
605}
606
4fcdae83 607/*
3928a8a2
SR
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
3f5a54e3
SR
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
4fcdae83 616 */
3928a8a2 617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 618
3928a8a2 619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 620
4fcdae83 621/* trace_types holds a link list of available tracers. */
bc0c38d1 622static struct tracer *trace_types __read_mostly;
4fcdae83 623
4fcdae83
SR
624/*
625 * trace_types_lock is used to protect the trace_types list.
4fcdae83 626 */
a8227415 627DEFINE_MUTEX(trace_types_lock);
4fcdae83 628
7e53bd42
LJ
629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
ae3b5093 657 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
ae3b5093 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
ae3b5093 673 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
d78a4614
SRRH
711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
73dddbb5
SRRH
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
ca475e83 719
d78a4614
SRRH
720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
2d34f489
SRRH
726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
73dddbb5
SRRH
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
730{
731}
732
d78a4614
SRRH
733#endif
734
3e9a8aad
SRRH
735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
2290f2c5 760void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
499e5470
SR
777/**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
10246fa3 785 tracer_tracing_on(&global_trace);
499e5470
SR
786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
52ffabe3
SRRH
789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
d914ba37 793 __this_cpu_write(trace_taskinfo_save, true);
52ffabe3
SRRH
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
09ae7234
SRRH
805/**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
8abfb872
J
818 int pc;
819
983f938a 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
821 return 0;
822
8abfb872 823 pc = preempt_count();
09ae7234 824
3132e107
SRRH
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
09ae7234
SRRH
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
09ae7234
SRRH
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
2d34f489 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
8abfb872
J
868 int pc;
869
983f938a 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
871 return 0;
872
8abfb872 873 pc = preempt_count();
09ae7234 874
3132e107
SRRH
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
09ae7234
SRRH
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
09ae7234
SRRH
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
2d34f489 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
ad909e21 896#ifdef CONFIG_TRACER_SNAPSHOT
cab50379 897static void tracing_snapshot_instance(struct trace_array *tr)
ad909e21 898{
ad909e21
SRRH
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
1b22e382
SRRH
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
ad909e21 908 if (!tr->allocated_snapshot) {
ca268da6
SRRH
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
ca268da6
SRRH
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
cab50379
SRV
926
927/**
928 * trace_snapshot - take a snapshot of the current buffer.
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
1b22e382 947EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953static int alloc_snapshot(struct trace_array *tr)
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
ad1438a0 971static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
ad909e21 983
93e31ffb
TZ
984/**
985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
999 ret = alloc_snapshot(tr);
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
ad909e21
SRRH
1006/**
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
ad909e21
SRRH
1019 int ret;
1020
93e31ffb
TZ
1021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
3209cff4 1023 return;
ad909e21
SRRH
1024
1025 tracing_snapshot();
1026}
1b22e382 1027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
1b22e382 1033EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
1b22e382 1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
2290f2c5 1048void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
499e5470
SR
1065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
10246fa3 1075 tracer_tracing_off(&global_trace);
499e5470
SR
1076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
de7edd31
SRRH
1079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
10246fa3
SRRH
1085/**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
e7c15cd8 1091int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
499e5470
SR
1098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
10246fa3 1103 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
3928a8a2 1107static int __init set_buf_size(char *str)
bc0c38d1 1108{
3928a8a2 1109 unsigned long buf_size;
c6caeeb1 1110
bc0c38d1
SR
1111 if (!str)
1112 return 0;
9d612bef 1113 buf_size = memparse(str, &str);
c6caeeb1 1114 /* nr_entries can not be zero */
9d612bef 1115 if (buf_size == 0)
c6caeeb1 1116 return 0;
3928a8a2 1117 trace_buf_size = buf_size;
bc0c38d1
SR
1118 return 1;
1119}
3928a8a2 1120__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1121
0e950173
TB
1122static int __init set_tracing_thresh(char *str)
1123{
87abb3b1 1124 unsigned long threshold;
0e950173
TB
1125 int ret;
1126
1127 if (!str)
1128 return 0;
bcd83ea6 1129 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1130 if (ret < 0)
1131 return 0;
87abb3b1 1132 tracing_thresh = threshold * 1000;
0e950173
TB
1133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
57f50be1
SR
1137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
a3418a36
SRRH
1142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1146 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1147 */
1148#undef C
1149#define C(a, b) b
1150
4fcdae83 1151/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1152static const char *trace_options[] = {
a3418a36 1153 TRACE_FLAGS
bc0c38d1
SR
1154 NULL
1155};
1156
5079f326
Z
1157static struct {
1158 u64 (*func)(void);
1159 const char *name;
8be0709f 1160 int in_ns; /* is this clock in nanoseconds? */
5079f326 1161} trace_clocks[] = {
1b3e5c09
TG
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
e7fda6c4 1165 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
80ec3552 1169 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1170 ARCH_TRACE_CLOCKS
5079f326
Z
1171};
1172
b63f39ea 1173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178 memset(parser, 0, sizeof(*parser));
1179
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1182 return 1;
1183
1184 parser->size = size;
1185 return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193 kfree(parser->buffer);
0e684b65 1194 parser->buffer = NULL;
b63f39ea 1195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
3c235a33 1251 if (parser->idx < parser->size - 1)
b63f39ea 1252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
057db848 1268 } else if (parser->idx < parser->size - 1) {
b63f39ea 1269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
057db848
SR
1271 } else {
1272 ret = -EINVAL;
1273 goto out;
b63f39ea 1274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
3a161d99 1283/* TODO add a seq_buf_to_buffer() */
b8b94265 1284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1285{
1286 int len;
3c56819b 1287
5ac48378 1288 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1289 return -EBUSY;
1290
5ac48378 1291 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1292 if (cnt > len)
1293 cnt = len;
3a161d99 1294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1295
3a161d99 1296 s->seq.readpos += cnt;
3c56819b
EGM
1297 return cnt;
1298}
1299
0e950173
TB
1300unsigned long __read_mostly tracing_thresh;
1301
5d4a9dba 1302#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
12883efb
SRRH
1311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1315
12883efb
SRRH
1316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1318
6d9b3fa5 1319 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
5d4a9dba 1322
1acaa1b2 1323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1324 max_data->pid = tsk->pid;
f17a5194
SRRH
1325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
8248ac05
SR
1334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
4fcdae83
SR
1342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
e309b41d 1351void
bc0c38d1
SR
1352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
2721e72d 1354 struct ring_buffer *buf;
bc0c38d1 1355
2b6080f2 1356 if (tr->stop_count)
b8de7bd1
SR
1357 return;
1358
4c11d7ae 1359 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1360
45ad21ca 1361 if (!tr->allocated_snapshot) {
debdd57f 1362 /* Only the nop tracer should hit this when disabling */
2b6080f2 1363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1364 return;
debdd57f 1365 }
34600f0e 1366
0b9b12c1 1367 arch_spin_lock(&tr->max_lock);
3928a8a2 1368
12883efb
SRRH
1369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
3928a8a2 1372
bc0c38d1 1373 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1374 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1384 */
e309b41d 1385void
bc0c38d1
SR
1386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
3928a8a2 1388 int ret;
bc0c38d1 1389
2b6080f2 1390 if (tr->stop_count)
b8de7bd1
SR
1391 return;
1392
4c11d7ae 1393 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1394 if (!tr->allocated_snapshot) {
2930e04d 1395 /* Only the nop tracer should hit this when disabling */
9e8529af 1396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1397 return;
2930e04d 1398 }
ef710e10 1399
0b9b12c1 1400 arch_spin_lock(&tr->max_lock);
bc0c38d1 1401
12883efb 1402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1403
e8165dbb
SR
1404 if (ret == -EBUSY) {
1405 /*
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1410 */
12883efb 1411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1412 "Failed to swap buffers due to commit in progress\n");
1413 }
1414
e8165dbb 1415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1416
1417 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1418 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1419}
5d4a9dba 1420#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1421
e30f53aa 1422static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1423{
15693458
SRRH
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1426 return 0;
0d5c6e1c 1427
e30f53aa
RV
1428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429 full);
0d5c6e1c
SR
1430}
1431
f4e781c0 1432#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444 struct trace_selftests *selftest;
1445
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447 if (!selftest)
1448 return -ENOMEM;
1449
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1452 return 0;
1453}
1454
f4e781c0
SRRH
1455static int run_tracer_selftest(struct tracer *type)
1456{
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1459 int ret;
0d5c6e1c 1460
f4e781c0
SRRH
1461 if (!type->selftest || tracing_selftest_disabled)
1462 return 0;
0d5c6e1c 1463
9afecfbb
SRV
1464 /*
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1468 */
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1471
0d5c6e1c 1472 /*
f4e781c0
SRRH
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
0d5c6e1c 1478 */
f4e781c0 1479 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1480
f4e781c0
SRRH
1481 tr->current_trace = type;
1482
1483#ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1490 }
1491#endif
1492
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1498 if (ret) {
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1501 WARN_ON(1);
1502 return -1;
1503 }
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507#ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
0d5c6e1c 1510
f4e781c0
SRRH
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1515 }
1516#endif
1517
1518 printk(KERN_CONT "PASSED\n");
1519 return 0;
1520}
9afecfbb
SRV
1521
1522static __init int init_trace_selftests(void)
1523{
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1526 int ret;
1527
1528 selftests_can_run = true;
1529
1530 mutex_lock(&trace_types_lock);
1531
1532 if (list_empty(&postponed_selftests))
1533 goto out;
1534
1535 pr_info("Running postponed tracer tests:\n");
1536
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1540 if (ret < 0) {
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1542 p->type->name);
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1545 if (t == p->type) {
1546 *last = t->next;
1547 break;
1548 }
1549 last = &t->next;
1550 }
1551 }
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555
1556 out:
1557 mutex_unlock(&trace_types_lock);
1558
1559 return 0;
1560}
b9ef0326 1561core_initcall(init_trace_selftests);
f4e781c0
SRRH
1562#else
1563static inline int run_tracer_selftest(struct tracer *type)
1564{
1565 return 0;
0d5c6e1c 1566}
f4e781c0 1567#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1568
41d9c0be
SRRH
1569static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
a4d1e688
JW
1571static void __init apply_trace_boot_options(void);
1572
4fcdae83
SR
1573/**
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1576 *
1577 * Register a new plugin tracer.
1578 */
a4d1e688 1579int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1580{
1581 struct tracer *t;
bc0c38d1
SR
1582 int ret = 0;
1583
1584 if (!type->name) {
1585 pr_info("Tracer must have a name\n");
1586 return -1;
1587 }
1588
24a461d5 1589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591 return -1;
1592 }
1593
bc0c38d1 1594 mutex_lock(&trace_types_lock);
86fa2f60 1595
8e1b82e0
FW
1596 tracing_selftest_running = true;
1597
bc0c38d1
SR
1598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1600 /* already found */
ee6c2c1b 1601 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1602 type->name);
1603 ret = -1;
1604 goto out;
1605 }
1606 }
1607
adf9f195
FW
1608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1610 if (!type->flags) {
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1613 if (!type->flags) {
1614 ret = -ENOMEM;
1615 goto out;
1616 }
d39cdd20
CH
1617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1619 } else
adf9f195
FW
1620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1622
d39cdd20
CH
1623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1625
f4e781c0
SRRH
1626 ret = run_tracer_selftest(type);
1627 if (ret < 0)
1628 goto out;
60a11774 1629
bc0c38d1
SR
1630 type->next = trace_types;
1631 trace_types = type;
41d9c0be 1632 add_tracer_options(&global_trace, type);
60a11774 1633
bc0c38d1 1634 out:
8e1b82e0 1635 tracing_selftest_running = false;
bc0c38d1
SR
1636 mutex_unlock(&trace_types_lock);
1637
dac74940
SR
1638 if (ret || !default_bootup_tracer)
1639 goto out_unlock;
1640
ee6c2c1b 1641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1642 goto out_unlock;
1643
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
607e2ea1 1646 tracing_set_tracer(&global_trace, type->name);
dac74940 1647 default_bootup_tracer = NULL;
a4d1e688
JW
1648
1649 apply_trace_boot_options();
1650
dac74940 1651 /* disable other selftests, since this will break it. */
55034cd6 1652 tracing_selftest_disabled = true;
b2821ae6 1653#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655 type->name);
b2821ae6 1656#endif
b2821ae6 1657
dac74940 1658 out_unlock:
bc0c38d1
SR
1659 return ret;
1660}
1661
12883efb 1662void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1663{
12883efb 1664 struct ring_buffer *buffer = buf->buffer;
f633903a 1665
a5416411
HT
1666 if (!buffer)
1667 return;
1668
f633903a
SR
1669 ring_buffer_record_disable(buffer);
1670
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
68179686 1673 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1674
1675 ring_buffer_record_enable(buffer);
1676}
1677
12883efb 1678void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1679{
12883efb 1680 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1681 int cpu;
1682
a5416411
HT
1683 if (!buffer)
1684 return;
1685
621968cd
SR
1686 ring_buffer_record_disable(buffer);
1687
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690
9457158b 1691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1692
1693 for_each_online_cpu(cpu)
68179686 1694 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1695
1696 ring_buffer_record_enable(buffer);
213cc060
PE
1697}
1698
09d8091c 1699/* Must have trace_types_lock held */
873c642f 1700void tracing_reset_all_online_cpus(void)
9456f0fa 1701{
873c642f
SRRH
1702 struct trace_array *tr;
1703
873c642f 1704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1705 tracing_reset_online_cpus(&tr->trace_buffer);
1706#ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr->max_buffer);
1708#endif
873c642f 1709 }
9456f0fa
SR
1710}
1711
d914ba37
JF
1712static int *tgid_map;
1713
939c7a4f 1714#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1715#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1716static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1717struct saved_cmdlines_buffer {
1718 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1719 unsigned *map_cmdline_to_pid;
1720 unsigned cmdline_num;
1721 int cmdline_idx;
1722 char *saved_cmdlines;
1723};
1724static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1725
25b0b44a 1726/* temporary disable recording */
d914ba37 1727static atomic_t trace_record_taskinfo_disabled __read_mostly;
bc0c38d1 1728
939c7a4f
YY
1729static inline char *get_saved_cmdlines(int idx)
1730{
1731 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1732}
1733
1734static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1735{
939c7a4f
YY
1736 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1737}
1738
1739static int allocate_cmdlines_buffer(unsigned int val,
1740 struct saved_cmdlines_buffer *s)
1741{
1742 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1743 GFP_KERNEL);
1744 if (!s->map_cmdline_to_pid)
1745 return -ENOMEM;
1746
1747 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1748 if (!s->saved_cmdlines) {
1749 kfree(s->map_cmdline_to_pid);
1750 return -ENOMEM;
1751 }
1752
1753 s->cmdline_idx = 0;
1754 s->cmdline_num = val;
1755 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1756 sizeof(s->map_pid_to_cmdline));
1757 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1758 val * sizeof(*s->map_cmdline_to_pid));
1759
1760 return 0;
1761}
1762
1763static int trace_create_savedcmd(void)
1764{
1765 int ret;
1766
a6af8fbf 1767 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1768 if (!savedcmd)
1769 return -ENOMEM;
1770
1771 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1772 if (ret < 0) {
1773 kfree(savedcmd);
1774 savedcmd = NULL;
1775 return -ENOMEM;
1776 }
1777
1778 return 0;
bc0c38d1
SR
1779}
1780
b5130b1e
CE
1781int is_tracing_stopped(void)
1782{
2b6080f2 1783 return global_trace.stop_count;
b5130b1e
CE
1784}
1785
0f048701
SR
1786/**
1787 * tracing_start - quick start of the tracer
1788 *
1789 * If tracing is enabled but was stopped by tracing_stop,
1790 * this will start the tracer back up.
1791 */
1792void tracing_start(void)
1793{
1794 struct ring_buffer *buffer;
1795 unsigned long flags;
1796
1797 if (tracing_disabled)
1798 return;
1799
2b6080f2
SR
1800 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1801 if (--global_trace.stop_count) {
1802 if (global_trace.stop_count < 0) {
b06a8301
SR
1803 /* Someone screwed up their debugging */
1804 WARN_ON_ONCE(1);
2b6080f2 1805 global_trace.stop_count = 0;
b06a8301 1806 }
0f048701
SR
1807 goto out;
1808 }
1809
a2f80714 1810 /* Prevent the buffers from switching */
0b9b12c1 1811 arch_spin_lock(&global_trace.max_lock);
0f048701 1812
12883efb 1813 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1814 if (buffer)
1815 ring_buffer_record_enable(buffer);
1816
12883efb
SRRH
1817#ifdef CONFIG_TRACER_MAX_TRACE
1818 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1819 if (buffer)
1820 ring_buffer_record_enable(buffer);
12883efb 1821#endif
0f048701 1822
0b9b12c1 1823 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1824
0f048701 1825 out:
2b6080f2
SR
1826 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1827}
1828
1829static void tracing_start_tr(struct trace_array *tr)
1830{
1831 struct ring_buffer *buffer;
1832 unsigned long flags;
1833
1834 if (tracing_disabled)
1835 return;
1836
1837 /* If global, we need to also start the max tracer */
1838 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1839 return tracing_start();
1840
1841 raw_spin_lock_irqsave(&tr->start_lock, flags);
1842
1843 if (--tr->stop_count) {
1844 if (tr->stop_count < 0) {
1845 /* Someone screwed up their debugging */
1846 WARN_ON_ONCE(1);
1847 tr->stop_count = 0;
1848 }
1849 goto out;
1850 }
1851
12883efb 1852 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1853 if (buffer)
1854 ring_buffer_record_enable(buffer);
1855
1856 out:
1857 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1858}
1859
1860/**
1861 * tracing_stop - quick stop of the tracer
1862 *
1863 * Light weight way to stop tracing. Use in conjunction with
1864 * tracing_start.
1865 */
1866void tracing_stop(void)
1867{
1868 struct ring_buffer *buffer;
1869 unsigned long flags;
1870
2b6080f2
SR
1871 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1872 if (global_trace.stop_count++)
0f048701
SR
1873 goto out;
1874
a2f80714 1875 /* Prevent the buffers from switching */
0b9b12c1 1876 arch_spin_lock(&global_trace.max_lock);
a2f80714 1877
12883efb 1878 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1879 if (buffer)
1880 ring_buffer_record_disable(buffer);
1881
12883efb
SRRH
1882#ifdef CONFIG_TRACER_MAX_TRACE
1883 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1884 if (buffer)
1885 ring_buffer_record_disable(buffer);
12883efb 1886#endif
0f048701 1887
0b9b12c1 1888 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1889
0f048701 1890 out:
2b6080f2
SR
1891 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1892}
1893
1894static void tracing_stop_tr(struct trace_array *tr)
1895{
1896 struct ring_buffer *buffer;
1897 unsigned long flags;
1898
1899 /* If global, we need to also stop the max tracer */
1900 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1901 return tracing_stop();
1902
1903 raw_spin_lock_irqsave(&tr->start_lock, flags);
1904 if (tr->stop_count++)
1905 goto out;
1906
12883efb 1907 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1908 if (buffer)
1909 ring_buffer_record_disable(buffer);
1910
1911 out:
1912 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1913}
1914
379cfdac 1915static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1916{
a635cf04 1917 unsigned pid, idx;
bc0c38d1 1918
eaf260ac
JF
1919 /* treat recording of idle task as a success */
1920 if (!tsk->pid)
1921 return 1;
1922
1923 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1924 return 0;
bc0c38d1
SR
1925
1926 /*
1927 * It's not the end of the world if we don't get
1928 * the lock, but we also don't want to spin
1929 * nor do we want to disable interrupts,
1930 * so if we miss here, then better luck next time.
1931 */
0199c4e6 1932 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1933 return 0;
bc0c38d1 1934
939c7a4f 1935 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1936 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1937 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1938
a635cf04
CE
1939 /*
1940 * Check whether the cmdline buffer at idx has a pid
1941 * mapped. We are going to overwrite that entry so we
1942 * need to clear the map_pid_to_cmdline. Otherwise we
1943 * would read the new comm for the old pid.
1944 */
939c7a4f 1945 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1946 if (pid != NO_CMDLINE_MAP)
939c7a4f 1947 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1948
939c7a4f
YY
1949 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1950 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1951
939c7a4f 1952 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1953 }
1954
939c7a4f 1955 set_cmdline(idx, tsk->comm);
bc0c38d1 1956
0199c4e6 1957 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1958
1959 return 1;
bc0c38d1
SR
1960}
1961
4c27e756 1962static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1963{
bc0c38d1
SR
1964 unsigned map;
1965
4ca53085
SR
1966 if (!pid) {
1967 strcpy(comm, "<idle>");
1968 return;
1969 }
bc0c38d1 1970
74bf4076
SR
1971 if (WARN_ON_ONCE(pid < 0)) {
1972 strcpy(comm, "<XXX>");
1973 return;
1974 }
1975
4ca53085
SR
1976 if (pid > PID_MAX_DEFAULT) {
1977 strcpy(comm, "<...>");
1978 return;
1979 }
bc0c38d1 1980
939c7a4f 1981 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1982 if (map != NO_CMDLINE_MAP)
e09e2867 1983 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
1984 else
1985 strcpy(comm, "<...>");
4c27e756
SRRH
1986}
1987
1988void trace_find_cmdline(int pid, char comm[])
1989{
1990 preempt_disable();
1991 arch_spin_lock(&trace_cmdline_lock);
1992
1993 __trace_find_cmdline(pid, comm);
bc0c38d1 1994
0199c4e6 1995 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1996 preempt_enable();
bc0c38d1
SR
1997}
1998
d914ba37
JF
1999int trace_find_tgid(int pid)
2000{
2001 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2002 return 0;
2003
2004 return tgid_map[pid];
2005}
2006
2007static int trace_save_tgid(struct task_struct *tsk)
2008{
bd45d34d
JF
2009 /* treat recording of idle task as a success */
2010 if (!tsk->pid)
2011 return 1;
2012
2013 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
d914ba37
JF
2014 return 0;
2015
2016 tgid_map[tsk->pid] = tsk->tgid;
2017 return 1;
2018}
2019
2020static bool tracing_record_taskinfo_skip(int flags)
2021{
2022 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2023 return true;
2024 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2025 return true;
2026 if (!__this_cpu_read(trace_taskinfo_save))
2027 return true;
2028 return false;
2029}
2030
2031/**
2032 * tracing_record_taskinfo - record the task info of a task
2033 *
2034 * @task - task to record
2035 * @flags - TRACE_RECORD_CMDLINE for recording comm
2036 * - TRACE_RECORD_TGID for recording tgid
2037 */
2038void tracing_record_taskinfo(struct task_struct *task, int flags)
2039{
29b1a8ad
JF
2040 bool done;
2041
d914ba37
JF
2042 if (tracing_record_taskinfo_skip(flags))
2043 return;
29b1a8ad
JF
2044
2045 /*
2046 * Record as much task information as possible. If some fail, continue
2047 * to try to record the others.
2048 */
2049 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2050 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2051
2052 /* If recording any information failed, retry again soon. */
2053 if (!done)
d914ba37
JF
2054 return;
2055
2056 __this_cpu_write(trace_taskinfo_save, false);
2057}
2058
2059/**
2060 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2061 *
2062 * @prev - previous task during sched_switch
2063 * @next - next task during sched_switch
2064 * @flags - TRACE_RECORD_CMDLINE for recording comm
2065 * TRACE_RECORD_TGID for recording tgid
2066 */
2067void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2068 struct task_struct *next, int flags)
bc0c38d1 2069{
29b1a8ad
JF
2070 bool done;
2071
d914ba37
JF
2072 if (tracing_record_taskinfo_skip(flags))
2073 return;
2074
29b1a8ad
JF
2075 /*
2076 * Record as much task information as possible. If some fail, continue
2077 * to try to record the others.
2078 */
2079 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2080 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2081 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2082 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
bc0c38d1 2083
29b1a8ad
JF
2084 /* If recording any information failed, retry again soon. */
2085 if (!done)
7ffbd48d
SR
2086 return;
2087
d914ba37
JF
2088 __this_cpu_write(trace_taskinfo_save, false);
2089}
2090
2091/* Helpers to record a specific task information */
2092void tracing_record_cmdline(struct task_struct *task)
2093{
2094 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2095}
2096
2097void tracing_record_tgid(struct task_struct *task)
2098{
2099 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
bc0c38d1
SR
2100}
2101
af0009fc
SRV
2102/*
2103 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2104 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2105 * simplifies those functions and keeps them in sync.
2106 */
2107enum print_line_t trace_handle_return(struct trace_seq *s)
2108{
2109 return trace_seq_has_overflowed(s) ?
2110 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2111}
2112EXPORT_SYMBOL_GPL(trace_handle_return);
2113
45dcd8b8 2114void
38697053
SR
2115tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2116 int pc)
bc0c38d1
SR
2117{
2118 struct task_struct *tsk = current;
bc0c38d1 2119
777e208d
SR
2120 entry->preempt_count = pc & 0xff;
2121 entry->pid = (tsk) ? tsk->pid : 0;
2122 entry->flags =
9244489a 2123#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2124 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2125#else
2126 TRACE_FLAG_IRQS_NOSUPPORT |
2127#endif
7e6867bf 2128 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2129 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2130 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2131 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2132 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2133}
f413cdb8 2134EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2135
e77405ad
SR
2136struct ring_buffer_event *
2137trace_buffer_lock_reserve(struct ring_buffer *buffer,
2138 int type,
2139 unsigned long len,
2140 unsigned long flags, int pc)
51a763dd 2141{
3e9a8aad 2142 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2143}
2144
2145DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2146DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2147static int trace_buffered_event_ref;
2148
2149/**
2150 * trace_buffered_event_enable - enable buffering events
2151 *
2152 * When events are being filtered, it is quicker to use a temporary
2153 * buffer to write the event data into if there's a likely chance
2154 * that it will not be committed. The discard of the ring buffer
2155 * is not as fast as committing, and is much slower than copying
2156 * a commit.
2157 *
2158 * When an event is to be filtered, allocate per cpu buffers to
2159 * write the event data into, and if the event is filtered and discarded
2160 * it is simply dropped, otherwise, the entire data is to be committed
2161 * in one shot.
2162 */
2163void trace_buffered_event_enable(void)
2164{
2165 struct ring_buffer_event *event;
2166 struct page *page;
2167 int cpu;
51a763dd 2168
0fc1b09f
SRRH
2169 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2170
2171 if (trace_buffered_event_ref++)
2172 return;
2173
2174 for_each_tracing_cpu(cpu) {
2175 page = alloc_pages_node(cpu_to_node(cpu),
2176 GFP_KERNEL | __GFP_NORETRY, 0);
2177 if (!page)
2178 goto failed;
2179
2180 event = page_address(page);
2181 memset(event, 0, sizeof(*event));
2182
2183 per_cpu(trace_buffered_event, cpu) = event;
2184
2185 preempt_disable();
2186 if (cpu == smp_processor_id() &&
2187 this_cpu_read(trace_buffered_event) !=
2188 per_cpu(trace_buffered_event, cpu))
2189 WARN_ON_ONCE(1);
2190 preempt_enable();
51a763dd
ACM
2191 }
2192
0fc1b09f
SRRH
2193 return;
2194 failed:
2195 trace_buffered_event_disable();
2196}
2197
2198static void enable_trace_buffered_event(void *data)
2199{
2200 /* Probably not needed, but do it anyway */
2201 smp_rmb();
2202 this_cpu_dec(trace_buffered_event_cnt);
2203}
2204
2205static void disable_trace_buffered_event(void *data)
2206{
2207 this_cpu_inc(trace_buffered_event_cnt);
2208}
2209
2210/**
2211 * trace_buffered_event_disable - disable buffering events
2212 *
2213 * When a filter is removed, it is faster to not use the buffered
2214 * events, and to commit directly into the ring buffer. Free up
2215 * the temp buffers when there are no more users. This requires
2216 * special synchronization with current events.
2217 */
2218void trace_buffered_event_disable(void)
2219{
2220 int cpu;
2221
2222 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2223
2224 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2225 return;
2226
2227 if (--trace_buffered_event_ref)
2228 return;
2229
2230 preempt_disable();
2231 /* For each CPU, set the buffer as used. */
2232 smp_call_function_many(tracing_buffer_mask,
2233 disable_trace_buffered_event, NULL, 1);
2234 preempt_enable();
2235
2236 /* Wait for all current users to finish */
2237 synchronize_sched();
2238
2239 for_each_tracing_cpu(cpu) {
2240 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2241 per_cpu(trace_buffered_event, cpu) = NULL;
2242 }
2243 /*
2244 * Make sure trace_buffered_event is NULL before clearing
2245 * trace_buffered_event_cnt.
2246 */
2247 smp_wmb();
2248
2249 preempt_disable();
2250 /* Do the work on each cpu */
2251 smp_call_function_many(tracing_buffer_mask,
2252 enable_trace_buffered_event, NULL, 1);
2253 preempt_enable();
51a763dd 2254}
51a763dd 2255
2c4a33ab
SRRH
2256static struct ring_buffer *temp_buffer;
2257
ccb469a1
SR
2258struct ring_buffer_event *
2259trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2260 struct trace_event_file *trace_file,
ccb469a1
SR
2261 int type, unsigned long len,
2262 unsigned long flags, int pc)
2263{
2c4a33ab 2264 struct ring_buffer_event *entry;
0fc1b09f 2265 int val;
2c4a33ab 2266
7f1d2f82 2267 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f
SRRH
2268
2269 if ((trace_file->flags &
2270 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2271 (entry = this_cpu_read(trace_buffered_event))) {
2272 /* Try to use the per cpu buffer first */
2273 val = this_cpu_inc_return(trace_buffered_event_cnt);
2274 if (val == 1) {
2275 trace_event_setup(entry, type, flags, pc);
2276 entry->array[0] = len;
2277 return entry;
2278 }
2279 this_cpu_dec(trace_buffered_event_cnt);
2280 }
2281
3e9a8aad
SRRH
2282 entry = __trace_buffer_lock_reserve(*current_rb,
2283 type, len, flags, pc);
2c4a33ab
SRRH
2284 /*
2285 * If tracing is off, but we have triggers enabled
2286 * we still need to look at the event data. Use the temp_buffer
2287 * to store the trace event for the tigger to use. It's recusive
2288 * safe and will not be recorded anywhere.
2289 */
5d6ad960 2290 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2291 *current_rb = temp_buffer;
3e9a8aad
SRRH
2292 entry = __trace_buffer_lock_reserve(*current_rb,
2293 type, len, flags, pc);
2c4a33ab
SRRH
2294 }
2295 return entry;
ccb469a1
SR
2296}
2297EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2298
42391745
SRRH
2299static DEFINE_SPINLOCK(tracepoint_iter_lock);
2300static DEFINE_MUTEX(tracepoint_printk_mutex);
2301
2302static void output_printk(struct trace_event_buffer *fbuffer)
2303{
2304 struct trace_event_call *event_call;
2305 struct trace_event *event;
2306 unsigned long flags;
2307 struct trace_iterator *iter = tracepoint_print_iter;
2308
2309 /* We should never get here if iter is NULL */
2310 if (WARN_ON_ONCE(!iter))
2311 return;
2312
2313 event_call = fbuffer->trace_file->event_call;
2314 if (!event_call || !event_call->event.funcs ||
2315 !event_call->event.funcs->trace)
2316 return;
2317
2318 event = &fbuffer->trace_file->event_call->event;
2319
2320 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2321 trace_seq_init(&iter->seq);
2322 iter->ent = fbuffer->entry;
2323 event_call->event.funcs->trace(iter, 0, event);
2324 trace_seq_putc(&iter->seq, 0);
2325 printk("%s", iter->seq.buffer);
2326
2327 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2328}
2329
2330int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2331 void __user *buffer, size_t *lenp,
2332 loff_t *ppos)
2333{
2334 int save_tracepoint_printk;
2335 int ret;
2336
2337 mutex_lock(&tracepoint_printk_mutex);
2338 save_tracepoint_printk = tracepoint_printk;
2339
2340 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2341
2342 /*
2343 * This will force exiting early, as tracepoint_printk
2344 * is always zero when tracepoint_printk_iter is not allocated
2345 */
2346 if (!tracepoint_print_iter)
2347 tracepoint_printk = 0;
2348
2349 if (save_tracepoint_printk == tracepoint_printk)
2350 goto out;
2351
2352 if (tracepoint_printk)
2353 static_key_enable(&tracepoint_printk_key.key);
2354 else
2355 static_key_disable(&tracepoint_printk_key.key);
2356
2357 out:
2358 mutex_unlock(&tracepoint_printk_mutex);
2359
2360 return ret;
2361}
2362
2363void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2364{
2365 if (static_key_false(&tracepoint_printk_key.key))
2366 output_printk(fbuffer);
2367
2368 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2369 fbuffer->event, fbuffer->entry,
2370 fbuffer->flags, fbuffer->pc);
2371}
2372EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2373
b7f0c959
SRRH
2374void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2375 struct ring_buffer *buffer,
0d5c6e1c
SR
2376 struct ring_buffer_event *event,
2377 unsigned long flags, int pc,
2378 struct pt_regs *regs)
1fd8df2c 2379{
7ffbd48d 2380 __buffer_unlock_commit(buffer, event);
1fd8df2c 2381
be54f69c
SRRH
2382 /*
2383 * If regs is not set, then skip the following callers:
2384 * trace_buffer_unlock_commit_regs
2385 * event_trigger_unlock_commit
2386 * trace_event_buffer_commit
2387 * trace_event_raw_event_sched_switch
2388 * Note, we can still get here via blktrace, wakeup tracer
2389 * and mmiotrace, but that's ok if they lose a function or
2390 * two. They are that meaningful.
2391 */
2392 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
1fd8df2c
MH
2393 ftrace_trace_userstack(buffer, flags, pc);
2394}
1fd8df2c 2395
52ffabe3
SRRH
2396/*
2397 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2398 */
2399void
2400trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2401 struct ring_buffer_event *event)
2402{
2403 __buffer_unlock_commit(buffer, event);
2404}
2405
478409dd
CZ
2406static void
2407trace_process_export(struct trace_export *export,
2408 struct ring_buffer_event *event)
2409{
2410 struct trace_entry *entry;
2411 unsigned int size = 0;
2412
2413 entry = ring_buffer_event_data(event);
2414 size = ring_buffer_event_length(event);
2415 export->write(entry, size);
2416}
2417
2418static DEFINE_MUTEX(ftrace_export_lock);
2419
2420static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2421
2422static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2423
2424static inline void ftrace_exports_enable(void)
2425{
2426 static_branch_enable(&ftrace_exports_enabled);
2427}
2428
2429static inline void ftrace_exports_disable(void)
2430{
2431 static_branch_disable(&ftrace_exports_enabled);
2432}
2433
2434void ftrace_exports(struct ring_buffer_event *event)
2435{
2436 struct trace_export *export;
2437
2438 preempt_disable_notrace();
2439
2440 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2441 while (export) {
2442 trace_process_export(export, event);
2443 export = rcu_dereference_raw_notrace(export->next);
2444 }
2445
2446 preempt_enable_notrace();
2447}
2448
2449static inline void
2450add_trace_export(struct trace_export **list, struct trace_export *export)
2451{
2452 rcu_assign_pointer(export->next, *list);
2453 /*
2454 * We are entering export into the list but another
2455 * CPU might be walking that list. We need to make sure
2456 * the export->next pointer is valid before another CPU sees
2457 * the export pointer included into the list.
2458 */
2459 rcu_assign_pointer(*list, export);
2460}
2461
2462static inline int
2463rm_trace_export(struct trace_export **list, struct trace_export *export)
2464{
2465 struct trace_export **p;
2466
2467 for (p = list; *p != NULL; p = &(*p)->next)
2468 if (*p == export)
2469 break;
2470
2471 if (*p != export)
2472 return -1;
2473
2474 rcu_assign_pointer(*p, (*p)->next);
2475
2476 return 0;
2477}
2478
2479static inline void
2480add_ftrace_export(struct trace_export **list, struct trace_export *export)
2481{
2482 if (*list == NULL)
2483 ftrace_exports_enable();
2484
2485 add_trace_export(list, export);
2486}
2487
2488static inline int
2489rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2490{
2491 int ret;
2492
2493 ret = rm_trace_export(list, export);
2494 if (*list == NULL)
2495 ftrace_exports_disable();
2496
2497 return ret;
2498}
2499
2500int register_ftrace_export(struct trace_export *export)
2501{
2502 if (WARN_ON_ONCE(!export->write))
2503 return -1;
2504
2505 mutex_lock(&ftrace_export_lock);
2506
2507 add_ftrace_export(&ftrace_exports_list, export);
2508
2509 mutex_unlock(&ftrace_export_lock);
2510
2511 return 0;
2512}
2513EXPORT_SYMBOL_GPL(register_ftrace_export);
2514
2515int unregister_ftrace_export(struct trace_export *export)
2516{
2517 int ret;
2518
2519 mutex_lock(&ftrace_export_lock);
2520
2521 ret = rm_ftrace_export(&ftrace_exports_list, export);
2522
2523 mutex_unlock(&ftrace_export_lock);
2524
2525 return ret;
2526}
2527EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2528
e309b41d 2529void
7be42151 2530trace_function(struct trace_array *tr,
38697053
SR
2531 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2532 int pc)
bc0c38d1 2533{
2425bcb9 2534 struct trace_event_call *call = &event_function;
12883efb 2535 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2536 struct ring_buffer_event *event;
777e208d 2537 struct ftrace_entry *entry;
bc0c38d1 2538
3e9a8aad
SRRH
2539 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2540 flags, pc);
3928a8a2
SR
2541 if (!event)
2542 return;
2543 entry = ring_buffer_event_data(event);
777e208d
SR
2544 entry->ip = ip;
2545 entry->parent_ip = parent_ip;
e1112b4d 2546
478409dd
CZ
2547 if (!call_filter_check_discard(call, entry, buffer, event)) {
2548 if (static_branch_unlikely(&ftrace_exports_enabled))
2549 ftrace_exports(event);
7ffbd48d 2550 __buffer_unlock_commit(buffer, event);
478409dd 2551 }
bc0c38d1
SR
2552}
2553
c0a0d0d3 2554#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2555
2556#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2557struct ftrace_stack {
2558 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2559};
2560
2561static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2562static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2563
e77405ad 2564static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2565 unsigned long flags,
1fd8df2c 2566 int skip, int pc, struct pt_regs *regs)
86387f7e 2567{
2425bcb9 2568 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2569 struct ring_buffer_event *event;
777e208d 2570 struct stack_entry *entry;
86387f7e 2571 struct stack_trace trace;
4a9bd3f1
SR
2572 int use_stack;
2573 int size = FTRACE_STACK_ENTRIES;
2574
2575 trace.nr_entries = 0;
2576 trace.skip = skip;
2577
be54f69c
SRRH
2578 /*
2579 * Add two, for this function and the call to save_stack_trace()
2580 * If regs is set, then these functions will not be in the way.
2581 */
2582 if (!regs)
2583 trace.skip += 2;
2584
4a9bd3f1
SR
2585 /*
2586 * Since events can happen in NMIs there's no safe way to
2587 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2588 * or NMI comes in, it will just have to use the default
2589 * FTRACE_STACK_SIZE.
2590 */
2591 preempt_disable_notrace();
2592
82146529 2593 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2594 /*
2595 * We don't need any atomic variables, just a barrier.
2596 * If an interrupt comes in, we don't care, because it would
2597 * have exited and put the counter back to what we want.
2598 * We just need a barrier to keep gcc from moving things
2599 * around.
2600 */
2601 barrier();
2602 if (use_stack == 1) {
bdffd893 2603 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2604 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2605
2606 if (regs)
2607 save_stack_trace_regs(regs, &trace);
2608 else
2609 save_stack_trace(&trace);
2610
2611 if (trace.nr_entries > size)
2612 size = trace.nr_entries;
2613 } else
2614 /* From now on, use_stack is a boolean */
2615 use_stack = 0;
2616
2617 size *= sizeof(unsigned long);
86387f7e 2618
3e9a8aad
SRRH
2619 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2620 sizeof(*entry) + size, flags, pc);
3928a8a2 2621 if (!event)
4a9bd3f1
SR
2622 goto out;
2623 entry = ring_buffer_event_data(event);
86387f7e 2624
4a9bd3f1
SR
2625 memset(&entry->caller, 0, size);
2626
2627 if (use_stack)
2628 memcpy(&entry->caller, trace.entries,
2629 trace.nr_entries * sizeof(unsigned long));
2630 else {
2631 trace.max_entries = FTRACE_STACK_ENTRIES;
2632 trace.entries = entry->caller;
2633 if (regs)
2634 save_stack_trace_regs(regs, &trace);
2635 else
2636 save_stack_trace(&trace);
2637 }
2638
2639 entry->size = trace.nr_entries;
86387f7e 2640
f306cc82 2641 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2642 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2643
2644 out:
2645 /* Again, don't let gcc optimize things here */
2646 barrier();
82146529 2647 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2648 preempt_enable_notrace();
2649
f0a920d5
IM
2650}
2651
2d34f489
SRRH
2652static inline void ftrace_trace_stack(struct trace_array *tr,
2653 struct ring_buffer *buffer,
73dddbb5
SRRH
2654 unsigned long flags,
2655 int skip, int pc, struct pt_regs *regs)
53614991 2656{
2d34f489 2657 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2658 return;
2659
73dddbb5 2660 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2661}
2662
c0a0d0d3
FW
2663void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2664 int pc)
38697053 2665{
a33d7d94
SRV
2666 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2667
2668 if (rcu_is_watching()) {
2669 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2670 return;
2671 }
2672
2673 /*
2674 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2675 * but if the above rcu_is_watching() failed, then the NMI
2676 * triggered someplace critical, and rcu_irq_enter() should
2677 * not be called from NMI.
2678 */
2679 if (unlikely(in_nmi()))
2680 return;
2681
2682 /*
2683 * It is possible that a function is being traced in a
2684 * location that RCU is not watching. A call to
2685 * rcu_irq_enter() will make sure that it is, but there's
2686 * a few internal rcu functions that could be traced
2687 * where that wont work either. In those cases, we just
2688 * do nothing.
2689 */
2690 if (unlikely(rcu_irq_enter_disabled()))
2691 return;
2692
2693 rcu_irq_enter_irqson();
2694 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2695 rcu_irq_exit_irqson();
38697053
SR
2696}
2697
03889384
SR
2698/**
2699 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2700 * @skip: Number of functions to skip (helper handlers)
03889384 2701 */
c142be8e 2702void trace_dump_stack(int skip)
03889384
SR
2703{
2704 unsigned long flags;
2705
2706 if (tracing_disabled || tracing_selftest_running)
e36c5458 2707 return;
03889384
SR
2708
2709 local_save_flags(flags);
2710
c142be8e
SRRH
2711 /*
2712 * Skip 3 more, seems to get us at the caller of
2713 * this function.
2714 */
2715 skip += 3;
2716 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2717 flags, skip, preempt_count(), NULL);
03889384
SR
2718}
2719
91e86e56
SR
2720static DEFINE_PER_CPU(int, user_stack_count);
2721
e77405ad
SR
2722void
2723ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2724{
2425bcb9 2725 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2726 struct ring_buffer_event *event;
02b67518
TE
2727 struct userstack_entry *entry;
2728 struct stack_trace trace;
02b67518 2729
983f938a 2730 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2731 return;
2732
b6345879
SR
2733 /*
2734 * NMIs can not handle page faults, even with fix ups.
2735 * The save user stack can (and often does) fault.
2736 */
2737 if (unlikely(in_nmi()))
2738 return;
02b67518 2739
91e86e56
SR
2740 /*
2741 * prevent recursion, since the user stack tracing may
2742 * trigger other kernel events.
2743 */
2744 preempt_disable();
2745 if (__this_cpu_read(user_stack_count))
2746 goto out;
2747
2748 __this_cpu_inc(user_stack_count);
2749
3e9a8aad
SRRH
2750 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2751 sizeof(*entry), flags, pc);
02b67518 2752 if (!event)
1dbd1951 2753 goto out_drop_count;
02b67518 2754 entry = ring_buffer_event_data(event);
02b67518 2755
48659d31 2756 entry->tgid = current->tgid;
02b67518
TE
2757 memset(&entry->caller, 0, sizeof(entry->caller));
2758
2759 trace.nr_entries = 0;
2760 trace.max_entries = FTRACE_STACK_ENTRIES;
2761 trace.skip = 0;
2762 trace.entries = entry->caller;
2763
2764 save_stack_trace_user(&trace);
f306cc82 2765 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2766 __buffer_unlock_commit(buffer, event);
91e86e56 2767
1dbd1951 2768 out_drop_count:
91e86e56 2769 __this_cpu_dec(user_stack_count);
91e86e56
SR
2770 out:
2771 preempt_enable();
02b67518
TE
2772}
2773
4fd27358
HE
2774#ifdef UNUSED
2775static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2776{
7be42151 2777 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2778}
4fd27358 2779#endif /* UNUSED */
02b67518 2780
c0a0d0d3
FW
2781#endif /* CONFIG_STACKTRACE */
2782
07d777fe
SR
2783/* created for use with alloc_percpu */
2784struct trace_buffer_struct {
e2ace001
AL
2785 int nesting;
2786 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2787};
2788
2789static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2790
2791/*
e2ace001
AL
2792 * Thise allows for lockless recording. If we're nested too deeply, then
2793 * this returns NULL.
07d777fe
SR
2794 */
2795static char *get_trace_buf(void)
2796{
e2ace001 2797 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2798
e2ace001 2799 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2800 return NULL;
2801
e2ace001
AL
2802 return &buffer->buffer[buffer->nesting++][0];
2803}
2804
2805static void put_trace_buf(void)
2806{
2807 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2808}
2809
2810static int alloc_percpu_trace_buffer(void)
2811{
2812 struct trace_buffer_struct *buffers;
07d777fe
SR
2813
2814 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2815 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2816 return -ENOMEM;
07d777fe
SR
2817
2818 trace_percpu_buffer = buffers;
07d777fe 2819 return 0;
07d777fe
SR
2820}
2821
81698831
SR
2822static int buffers_allocated;
2823
07d777fe
SR
2824void trace_printk_init_buffers(void)
2825{
07d777fe
SR
2826 if (buffers_allocated)
2827 return;
2828
2829 if (alloc_percpu_trace_buffer())
2830 return;
2831
2184db46
SR
2832 /* trace_printk() is for debug use only. Don't use it in production. */
2833
a395d6a7
JP
2834 pr_warn("\n");
2835 pr_warn("**********************************************************\n");
2836 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2837 pr_warn("** **\n");
2838 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2839 pr_warn("** **\n");
2840 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2841 pr_warn("** unsafe for production use. **\n");
2842 pr_warn("** **\n");
2843 pr_warn("** If you see this message and you are not debugging **\n");
2844 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2845 pr_warn("** **\n");
2846 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2847 pr_warn("**********************************************************\n");
07d777fe 2848
b382ede6
SR
2849 /* Expand the buffers to set size */
2850 tracing_update_buffers();
2851
07d777fe 2852 buffers_allocated = 1;
81698831
SR
2853
2854 /*
2855 * trace_printk_init_buffers() can be called by modules.
2856 * If that happens, then we need to start cmdline recording
2857 * directly here. If the global_trace.buffer is already
2858 * allocated here, then this was called by module code.
2859 */
12883efb 2860 if (global_trace.trace_buffer.buffer)
81698831
SR
2861 tracing_start_cmdline_record();
2862}
2863
2864void trace_printk_start_comm(void)
2865{
2866 /* Start tracing comms if trace printk is set */
2867 if (!buffers_allocated)
2868 return;
2869 tracing_start_cmdline_record();
2870}
2871
2872static void trace_printk_start_stop_comm(int enabled)
2873{
2874 if (!buffers_allocated)
2875 return;
2876
2877 if (enabled)
2878 tracing_start_cmdline_record();
2879 else
2880 tracing_stop_cmdline_record();
07d777fe
SR
2881}
2882
769b0441 2883/**
48ead020 2884 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2885 *
2886 */
40ce74f1 2887int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2888{
2425bcb9 2889 struct trace_event_call *call = &event_bprint;
769b0441 2890 struct ring_buffer_event *event;
e77405ad 2891 struct ring_buffer *buffer;
769b0441 2892 struct trace_array *tr = &global_trace;
48ead020 2893 struct bprint_entry *entry;
769b0441 2894 unsigned long flags;
07d777fe
SR
2895 char *tbuffer;
2896 int len = 0, size, pc;
769b0441
FW
2897
2898 if (unlikely(tracing_selftest_running || tracing_disabled))
2899 return 0;
2900
2901 /* Don't pollute graph traces with trace_vprintk internals */
2902 pause_graph_tracing();
2903
2904 pc = preempt_count();
5168ae50 2905 preempt_disable_notrace();
769b0441 2906
07d777fe
SR
2907 tbuffer = get_trace_buf();
2908 if (!tbuffer) {
2909 len = 0;
e2ace001 2910 goto out_nobuffer;
07d777fe 2911 }
769b0441 2912
07d777fe 2913 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2914
07d777fe
SR
2915 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2916 goto out;
769b0441 2917
07d777fe 2918 local_save_flags(flags);
769b0441 2919 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2920 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2921 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2922 flags, pc);
769b0441 2923 if (!event)
07d777fe 2924 goto out;
769b0441
FW
2925 entry = ring_buffer_event_data(event);
2926 entry->ip = ip;
769b0441
FW
2927 entry->fmt = fmt;
2928
07d777fe 2929 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2930 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2931 __buffer_unlock_commit(buffer, event);
2d34f489 2932 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2933 }
769b0441 2934
769b0441 2935out:
e2ace001
AL
2936 put_trace_buf();
2937
2938out_nobuffer:
5168ae50 2939 preempt_enable_notrace();
769b0441
FW
2940 unpause_graph_tracing();
2941
2942 return len;
2943}
48ead020
FW
2944EXPORT_SYMBOL_GPL(trace_vbprintk);
2945
12883efb
SRRH
2946static int
2947__trace_array_vprintk(struct ring_buffer *buffer,
2948 unsigned long ip, const char *fmt, va_list args)
48ead020 2949{
2425bcb9 2950 struct trace_event_call *call = &event_print;
48ead020 2951 struct ring_buffer_event *event;
07d777fe 2952 int len = 0, size, pc;
48ead020 2953 struct print_entry *entry;
07d777fe
SR
2954 unsigned long flags;
2955 char *tbuffer;
48ead020
FW
2956
2957 if (tracing_disabled || tracing_selftest_running)
2958 return 0;
2959
07d777fe
SR
2960 /* Don't pollute graph traces with trace_vprintk internals */
2961 pause_graph_tracing();
2962
48ead020
FW
2963 pc = preempt_count();
2964 preempt_disable_notrace();
48ead020 2965
07d777fe
SR
2966
2967 tbuffer = get_trace_buf();
2968 if (!tbuffer) {
2969 len = 0;
e2ace001 2970 goto out_nobuffer;
07d777fe 2971 }
48ead020 2972
3558a5ac 2973 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2974
07d777fe 2975 local_save_flags(flags);
48ead020 2976 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2977 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2978 flags, pc);
48ead020 2979 if (!event)
07d777fe 2980 goto out;
48ead020 2981 entry = ring_buffer_event_data(event);
c13d2f7c 2982 entry->ip = ip;
48ead020 2983
3558a5ac 2984 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2985 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2986 __buffer_unlock_commit(buffer, event);
2d34f489 2987 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2988 }
e2ace001
AL
2989
2990out:
2991 put_trace_buf();
2992
2993out_nobuffer:
48ead020 2994 preempt_enable_notrace();
07d777fe 2995 unpause_graph_tracing();
48ead020
FW
2996
2997 return len;
2998}
659372d3 2999
12883efb
SRRH
3000int trace_array_vprintk(struct trace_array *tr,
3001 unsigned long ip, const char *fmt, va_list args)
3002{
3003 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3004}
3005
3006int trace_array_printk(struct trace_array *tr,
3007 unsigned long ip, const char *fmt, ...)
3008{
3009 int ret;
3010 va_list ap;
3011
983f938a 3012 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3013 return 0;
3014
3015 va_start(ap, fmt);
3016 ret = trace_array_vprintk(tr, ip, fmt, ap);
3017 va_end(ap);
3018 return ret;
3019}
3020
3021int trace_array_printk_buf(struct ring_buffer *buffer,
3022 unsigned long ip, const char *fmt, ...)
3023{
3024 int ret;
3025 va_list ap;
3026
983f938a 3027 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3028 return 0;
3029
3030 va_start(ap, fmt);
3031 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3032 va_end(ap);
3033 return ret;
3034}
3035
659372d3
SR
3036int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3037{
a813a159 3038 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 3039}
769b0441
FW
3040EXPORT_SYMBOL_GPL(trace_vprintk);
3041
e2ac8ef5 3042static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 3043{
6d158a81
SR
3044 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3045
5a90f577 3046 iter->idx++;
6d158a81
SR
3047 if (buf_iter)
3048 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
3049}
3050
e309b41d 3051static struct trace_entry *
bc21b478
SR
3052peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3053 unsigned long *lost_events)
dd0e545f 3054{
3928a8a2 3055 struct ring_buffer_event *event;
6d158a81 3056 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 3057
d769041f
SR
3058 if (buf_iter)
3059 event = ring_buffer_iter_peek(buf_iter, ts);
3060 else
12883efb 3061 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 3062 lost_events);
d769041f 3063
4a9bd3f1
SR
3064 if (event) {
3065 iter->ent_size = ring_buffer_event_length(event);
3066 return ring_buffer_event_data(event);
3067 }
3068 iter->ent_size = 0;
3069 return NULL;
dd0e545f 3070}
d769041f 3071
dd0e545f 3072static struct trace_entry *
bc21b478
SR
3073__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3074 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 3075{
12883efb 3076 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 3077 struct trace_entry *ent, *next = NULL;
aa27497c 3078 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 3079 int cpu_file = iter->cpu_file;
3928a8a2 3080 u64 next_ts = 0, ts;
bc0c38d1 3081 int next_cpu = -1;
12b5da34 3082 int next_size = 0;
bc0c38d1
SR
3083 int cpu;
3084
b04cc6b1
FW
3085 /*
3086 * If we are in a per_cpu trace file, don't bother by iterating over
3087 * all cpu and peek directly.
3088 */
ae3b5093 3089 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
3090 if (ring_buffer_empty_cpu(buffer, cpu_file))
3091 return NULL;
bc21b478 3092 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
3093 if (ent_cpu)
3094 *ent_cpu = cpu_file;
3095
3096 return ent;
3097 }
3098
ab46428c 3099 for_each_tracing_cpu(cpu) {
dd0e545f 3100
3928a8a2
SR
3101 if (ring_buffer_empty_cpu(buffer, cpu))
3102 continue;
dd0e545f 3103
bc21b478 3104 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3105
cdd31cd2
IM
3106 /*
3107 * Pick the entry with the smallest timestamp:
3108 */
3928a8a2 3109 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3110 next = ent;
3111 next_cpu = cpu;
3928a8a2 3112 next_ts = ts;
bc21b478 3113 next_lost = lost_events;
12b5da34 3114 next_size = iter->ent_size;
bc0c38d1
SR
3115 }
3116 }
3117
12b5da34
SR
3118 iter->ent_size = next_size;
3119
bc0c38d1
SR
3120 if (ent_cpu)
3121 *ent_cpu = next_cpu;
3122
3928a8a2
SR
3123 if (ent_ts)
3124 *ent_ts = next_ts;
3125
bc21b478
SR
3126 if (missing_events)
3127 *missing_events = next_lost;
3128
bc0c38d1
SR
3129 return next;
3130}
3131
dd0e545f 3132/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3133struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3134 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3135{
bc21b478 3136 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3137}
3138
3139/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3140void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3141{
bc21b478
SR
3142 iter->ent = __find_next_entry(iter, &iter->cpu,
3143 &iter->lost_events, &iter->ts);
dd0e545f 3144
3928a8a2 3145 if (iter->ent)
e2ac8ef5 3146 trace_iterator_increment(iter);
dd0e545f 3147
3928a8a2 3148 return iter->ent ? iter : NULL;
b3806b43 3149}
bc0c38d1 3150
e309b41d 3151static void trace_consume(struct trace_iterator *iter)
b3806b43 3152{
12883efb 3153 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3154 &iter->lost_events);
bc0c38d1
SR
3155}
3156
e309b41d 3157static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3158{
3159 struct trace_iterator *iter = m->private;
bc0c38d1 3160 int i = (int)*pos;
4e3c3333 3161 void *ent;
bc0c38d1 3162
a63ce5b3
SR
3163 WARN_ON_ONCE(iter->leftover);
3164
bc0c38d1
SR
3165 (*pos)++;
3166
3167 /* can't go backwards */
3168 if (iter->idx > i)
3169 return NULL;
3170
3171 if (iter->idx < 0)
955b61e5 3172 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3173 else
3174 ent = iter;
3175
3176 while (ent && iter->idx < i)
955b61e5 3177 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3178
3179 iter->pos = *pos;
3180
bc0c38d1
SR
3181 return ent;
3182}
3183
955b61e5 3184void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3185{
2f26ebd5
SR
3186 struct ring_buffer_event *event;
3187 struct ring_buffer_iter *buf_iter;
3188 unsigned long entries = 0;
3189 u64 ts;
3190
12883efb 3191 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3192
6d158a81
SR
3193 buf_iter = trace_buffer_iter(iter, cpu);
3194 if (!buf_iter)
2f26ebd5
SR
3195 return;
3196
2f26ebd5
SR
3197 ring_buffer_iter_reset(buf_iter);
3198
3199 /*
3200 * We could have the case with the max latency tracers
3201 * that a reset never took place on a cpu. This is evident
3202 * by the timestamp being before the start of the buffer.
3203 */
3204 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3205 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3206 break;
3207 entries++;
3208 ring_buffer_read(buf_iter, NULL);
3209 }
3210
12883efb 3211 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3212}
3213
d7350c3f 3214/*
d7350c3f
FW
3215 * The current tracer is copied to avoid a global locking
3216 * all around.
3217 */
bc0c38d1
SR
3218static void *s_start(struct seq_file *m, loff_t *pos)
3219{
3220 struct trace_iterator *iter = m->private;
2b6080f2 3221 struct trace_array *tr = iter->tr;
b04cc6b1 3222 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3223 void *p = NULL;
3224 loff_t l = 0;
3928a8a2 3225 int cpu;
bc0c38d1 3226
2fd196ec
HT
3227 /*
3228 * copy the tracer to avoid using a global lock all around.
3229 * iter->trace is a copy of current_trace, the pointer to the
3230 * name may be used instead of a strcmp(), as iter->trace->name
3231 * will point to the same string as current_trace->name.
3232 */
bc0c38d1 3233 mutex_lock(&trace_types_lock);
2b6080f2
SR
3234 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3235 *iter->trace = *tr->current_trace;
d7350c3f 3236 mutex_unlock(&trace_types_lock);
bc0c38d1 3237
12883efb 3238#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3239 if (iter->snapshot && iter->trace->use_max_tr)
3240 return ERR_PTR(-EBUSY);
12883efb 3241#endif
debdd57f
HT
3242
3243 if (!iter->snapshot)
d914ba37 3244 atomic_inc(&trace_record_taskinfo_disabled);
bc0c38d1 3245
bc0c38d1
SR
3246 if (*pos != iter->pos) {
3247 iter->ent = NULL;
3248 iter->cpu = 0;
3249 iter->idx = -1;
3250
ae3b5093 3251 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3252 for_each_tracing_cpu(cpu)
2f26ebd5 3253 tracing_iter_reset(iter, cpu);
b04cc6b1 3254 } else
2f26ebd5 3255 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3256
ac91d854 3257 iter->leftover = 0;
bc0c38d1
SR
3258 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3259 ;
3260
3261 } else {
a63ce5b3
SR
3262 /*
3263 * If we overflowed the seq_file before, then we want
3264 * to just reuse the trace_seq buffer again.
3265 */
3266 if (iter->leftover)
3267 p = iter;
3268 else {
3269 l = *pos - 1;
3270 p = s_next(m, p, &l);
3271 }
bc0c38d1
SR
3272 }
3273
4f535968 3274 trace_event_read_lock();
7e53bd42 3275 trace_access_lock(cpu_file);
bc0c38d1
SR
3276 return p;
3277}
3278
3279static void s_stop(struct seq_file *m, void *p)
3280{
7e53bd42
LJ
3281 struct trace_iterator *iter = m->private;
3282
12883efb 3283#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3284 if (iter->snapshot && iter->trace->use_max_tr)
3285 return;
12883efb 3286#endif
debdd57f
HT
3287
3288 if (!iter->snapshot)
d914ba37 3289 atomic_dec(&trace_record_taskinfo_disabled);
12883efb 3290
7e53bd42 3291 trace_access_unlock(iter->cpu_file);
4f535968 3292 trace_event_read_unlock();
bc0c38d1
SR
3293}
3294
39eaf7ef 3295static void
12883efb
SRRH
3296get_total_entries(struct trace_buffer *buf,
3297 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3298{
3299 unsigned long count;
3300 int cpu;
3301
3302 *total = 0;
3303 *entries = 0;
3304
3305 for_each_tracing_cpu(cpu) {
12883efb 3306 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3307 /*
3308 * If this buffer has skipped entries, then we hold all
3309 * entries for the trace and we need to ignore the
3310 * ones before the time stamp.
3311 */
12883efb
SRRH
3312 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3313 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3314 /* total is the same as the entries */
3315 *total += count;
3316 } else
3317 *total += count +
12883efb 3318 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3319 *entries += count;
3320 }
3321}
3322
e309b41d 3323static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3324{
d79ac28f
RV
3325 seq_puts(m, "# _------=> CPU# \n"
3326 "# / _-----=> irqs-off \n"
3327 "# | / _----=> need-resched \n"
3328 "# || / _---=> hardirq/softirq \n"
3329 "# ||| / _--=> preempt-depth \n"
3330 "# |||| / delay \n"
3331 "# cmd pid ||||| time | caller \n"
3332 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3333}
3334
12883efb 3335static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3336{
39eaf7ef
SR
3337 unsigned long total;
3338 unsigned long entries;
3339
12883efb 3340 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3341 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3342 entries, total, num_online_cpus());
3343 seq_puts(m, "#\n");
3344}
3345
441dae8f
JF
3346static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3347 unsigned int flags)
39eaf7ef 3348{
441dae8f
JF
3349 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3350
12883efb 3351 print_event_info(buf, m);
441dae8f
JF
3352
3353 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3354 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
bc0c38d1
SR
3355}
3356
441dae8f
JF
3357static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3358 unsigned int flags)
77271ce4 3359{
441dae8f 3360 bool tgid = flags & TRACE_ITER_RECORD_TGID;
b11fb737
SRV
3361 const char tgid_space[] = " ";
3362 const char space[] = " ";
3363
3364 seq_printf(m, "# %s _-----=> irqs-off\n",
3365 tgid ? tgid_space : space);
3366 seq_printf(m, "# %s / _----=> need-resched\n",
3367 tgid ? tgid_space : space);
3368 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3369 tgid ? tgid_space : space);
3370 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3371 tgid ? tgid_space : space);
3372 seq_printf(m, "# %s||| / delay\n",
3373 tgid ? tgid_space : space);
3374 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3375 tgid ? " TGID " : space);
3376 seq_printf(m, "# | | | %s|||| | |\n",
3377 tgid ? " | " : space);
77271ce4 3378}
bc0c38d1 3379
62b915f1 3380void
bc0c38d1
SR
3381print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3382{
983f938a 3383 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3384 struct trace_buffer *buf = iter->trace_buffer;
3385 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3386 struct tracer *type = iter->trace;
39eaf7ef
SR
3387 unsigned long entries;
3388 unsigned long total;
bc0c38d1
SR
3389 const char *name = "preemption";
3390
d840f718 3391 name = type->name;
bc0c38d1 3392
12883efb 3393 get_total_entries(buf, &total, &entries);
bc0c38d1 3394
888b55dc 3395 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3396 name, UTS_RELEASE);
888b55dc 3397 seq_puts(m, "# -----------------------------------"
bc0c38d1 3398 "---------------------------------\n");
888b55dc 3399 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3400 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3401 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3402 entries,
4c11d7ae 3403 total,
12883efb 3404 buf->cpu,
bc0c38d1
SR
3405#if defined(CONFIG_PREEMPT_NONE)
3406 "server",
3407#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3408 "desktop",
b5c21b45 3409#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3410 "preempt",
3411#else
3412 "unknown",
3413#endif
3414 /* These are reserved for later use */
3415 0, 0, 0, 0);
3416#ifdef CONFIG_SMP
3417 seq_printf(m, " #P:%d)\n", num_online_cpus());
3418#else
3419 seq_puts(m, ")\n");
3420#endif
888b55dc
KM
3421 seq_puts(m, "# -----------------\n");
3422 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3423 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3424 data->comm, data->pid,
3425 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3426 data->policy, data->rt_priority);
888b55dc 3427 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3428
3429 if (data->critical_start) {
888b55dc 3430 seq_puts(m, "# => started at: ");
214023c3
SR
3431 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3432 trace_print_seq(m, &iter->seq);
888b55dc 3433 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3434 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3435 trace_print_seq(m, &iter->seq);
8248ac05 3436 seq_puts(m, "\n#\n");
bc0c38d1
SR
3437 }
3438
888b55dc 3439 seq_puts(m, "#\n");
bc0c38d1
SR
3440}
3441
a309720c
SR
3442static void test_cpu_buff_start(struct trace_iterator *iter)
3443{
3444 struct trace_seq *s = &iter->seq;
983f938a 3445 struct trace_array *tr = iter->tr;
a309720c 3446
983f938a 3447 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3448 return;
3449
3450 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3451 return;
3452
4dbbe2d8
MK
3453 if (cpumask_available(iter->started) &&
3454 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3455 return;
3456
12883efb 3457 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3458 return;
3459
4dbbe2d8 3460 if (cpumask_available(iter->started))
919cd979 3461 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3462
3463 /* Don't print started cpu buffer for the first entry of the trace */
3464 if (iter->idx > 1)
3465 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3466 iter->cpu);
a309720c
SR
3467}
3468
2c4f035f 3469static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3470{
983f938a 3471 struct trace_array *tr = iter->tr;
214023c3 3472 struct trace_seq *s = &iter->seq;
983f938a 3473 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3474 struct trace_entry *entry;
f633cef0 3475 struct trace_event *event;
bc0c38d1 3476
4e3c3333 3477 entry = iter->ent;
dd0e545f 3478
a309720c
SR
3479 test_cpu_buff_start(iter);
3480
c4a8e8be 3481 event = ftrace_find_event(entry->type);
bc0c38d1 3482
983f938a 3483 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3484 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3485 trace_print_lat_context(iter);
3486 else
3487 trace_print_context(iter);
c4a8e8be 3488 }
bc0c38d1 3489
19a7fe20
SRRH
3490 if (trace_seq_has_overflowed(s))
3491 return TRACE_TYPE_PARTIAL_LINE;
3492
268ccda0 3493 if (event)
a9a57763 3494 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3495
19a7fe20 3496 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3497
19a7fe20 3498 return trace_handle_return(s);
bc0c38d1
SR
3499}
3500
2c4f035f 3501static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3502{
983f938a 3503 struct trace_array *tr = iter->tr;
f9896bf3
IM
3504 struct trace_seq *s = &iter->seq;
3505 struct trace_entry *entry;
f633cef0 3506 struct trace_event *event;
f9896bf3
IM
3507
3508 entry = iter->ent;
dd0e545f 3509
983f938a 3510 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3511 trace_seq_printf(s, "%d %d %llu ",
3512 entry->pid, iter->cpu, iter->ts);
3513
3514 if (trace_seq_has_overflowed(s))
3515 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3516
f633cef0 3517 event = ftrace_find_event(entry->type);
268ccda0 3518 if (event)
a9a57763 3519 return event->funcs->raw(iter, 0, event);
d9793bd8 3520
19a7fe20 3521 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3522
19a7fe20 3523 return trace_handle_return(s);
f9896bf3
IM
3524}
3525
2c4f035f 3526static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3527{
983f938a 3528 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3529 struct trace_seq *s = &iter->seq;
3530 unsigned char newline = '\n';
3531 struct trace_entry *entry;
f633cef0 3532 struct trace_event *event;
5e3ca0ec
IM
3533
3534 entry = iter->ent;
dd0e545f 3535
983f938a 3536 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3537 SEQ_PUT_HEX_FIELD(s, entry->pid);
3538 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3539 SEQ_PUT_HEX_FIELD(s, iter->ts);
3540 if (trace_seq_has_overflowed(s))
3541 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3542 }
5e3ca0ec 3543
f633cef0 3544 event = ftrace_find_event(entry->type);
268ccda0 3545 if (event) {
a9a57763 3546 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3547 if (ret != TRACE_TYPE_HANDLED)
3548 return ret;
3549 }
7104f300 3550
19a7fe20 3551 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3552
19a7fe20 3553 return trace_handle_return(s);
5e3ca0ec
IM
3554}
3555
2c4f035f 3556static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3557{
983f938a 3558 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3559 struct trace_seq *s = &iter->seq;
3560 struct trace_entry *entry;
f633cef0 3561 struct trace_event *event;
cb0f12aa
IM
3562
3563 entry = iter->ent;
dd0e545f 3564
983f938a 3565 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3566 SEQ_PUT_FIELD(s, entry->pid);
3567 SEQ_PUT_FIELD(s, iter->cpu);
3568 SEQ_PUT_FIELD(s, iter->ts);
3569 if (trace_seq_has_overflowed(s))
3570 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3571 }
cb0f12aa 3572
f633cef0 3573 event = ftrace_find_event(entry->type);
a9a57763
SR
3574 return event ? event->funcs->binary(iter, 0, event) :
3575 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3576}
3577
62b915f1 3578int trace_empty(struct trace_iterator *iter)
bc0c38d1 3579{
6d158a81 3580 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3581 int cpu;
3582
9aba60fe 3583 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3584 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3585 cpu = iter->cpu_file;
6d158a81
SR
3586 buf_iter = trace_buffer_iter(iter, cpu);
3587 if (buf_iter) {
3588 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3589 return 0;
3590 } else {
12883efb 3591 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3592 return 0;
3593 }
3594 return 1;
3595 }
3596
ab46428c 3597 for_each_tracing_cpu(cpu) {
6d158a81
SR
3598 buf_iter = trace_buffer_iter(iter, cpu);
3599 if (buf_iter) {
3600 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3601 return 0;
3602 } else {
12883efb 3603 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3604 return 0;
3605 }
bc0c38d1 3606 }
d769041f 3607
797d3712 3608 return 1;
bc0c38d1
SR
3609}
3610
4f535968 3611/* Called with trace_event_read_lock() held. */
955b61e5 3612enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3613{
983f938a
SRRH
3614 struct trace_array *tr = iter->tr;
3615 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3616 enum print_line_t ret;
3617
19a7fe20
SRRH
3618 if (iter->lost_events) {
3619 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3620 iter->cpu, iter->lost_events);
3621 if (trace_seq_has_overflowed(&iter->seq))
3622 return TRACE_TYPE_PARTIAL_LINE;
3623 }
bc21b478 3624
2c4f035f
FW
3625 if (iter->trace && iter->trace->print_line) {
3626 ret = iter->trace->print_line(iter);
3627 if (ret != TRACE_TYPE_UNHANDLED)
3628 return ret;
3629 }
72829bc3 3630
09ae7234
SRRH
3631 if (iter->ent->type == TRACE_BPUTS &&
3632 trace_flags & TRACE_ITER_PRINTK &&
3633 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3634 return trace_print_bputs_msg_only(iter);
3635
48ead020
FW
3636 if (iter->ent->type == TRACE_BPRINT &&
3637 trace_flags & TRACE_ITER_PRINTK &&
3638 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3639 return trace_print_bprintk_msg_only(iter);
48ead020 3640
66896a85
FW
3641 if (iter->ent->type == TRACE_PRINT &&
3642 trace_flags & TRACE_ITER_PRINTK &&
3643 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3644 return trace_print_printk_msg_only(iter);
66896a85 3645
cb0f12aa
IM
3646 if (trace_flags & TRACE_ITER_BIN)
3647 return print_bin_fmt(iter);
3648
5e3ca0ec
IM
3649 if (trace_flags & TRACE_ITER_HEX)
3650 return print_hex_fmt(iter);
3651
f9896bf3
IM
3652 if (trace_flags & TRACE_ITER_RAW)
3653 return print_raw_fmt(iter);
3654
f9896bf3
IM
3655 return print_trace_fmt(iter);
3656}
3657
7e9a49ef
JO
3658void trace_latency_header(struct seq_file *m)
3659{
3660 struct trace_iterator *iter = m->private;
983f938a 3661 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3662
3663 /* print nothing if the buffers are empty */
3664 if (trace_empty(iter))
3665 return;
3666
3667 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3668 print_trace_header(m, iter);
3669
983f938a 3670 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3671 print_lat_help_header(m);
3672}
3673
62b915f1
JO
3674void trace_default_header(struct seq_file *m)
3675{
3676 struct trace_iterator *iter = m->private;
983f938a
SRRH
3677 struct trace_array *tr = iter->tr;
3678 unsigned long trace_flags = tr->trace_flags;
62b915f1 3679
f56e7f8e
JO
3680 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3681 return;
3682
62b915f1
JO
3683 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3684 /* print nothing if the buffers are empty */
3685 if (trace_empty(iter))
3686 return;
3687 print_trace_header(m, iter);
3688 if (!(trace_flags & TRACE_ITER_VERBOSE))
3689 print_lat_help_header(m);
3690 } else {
77271ce4
SR
3691 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3692 if (trace_flags & TRACE_ITER_IRQ_INFO)
441dae8f
JF
3693 print_func_help_header_irq(iter->trace_buffer,
3694 m, trace_flags);
77271ce4 3695 else
441dae8f
JF
3696 print_func_help_header(iter->trace_buffer, m,
3697 trace_flags);
77271ce4 3698 }
62b915f1
JO
3699 }
3700}
3701
e0a413f6
SR
3702static void test_ftrace_alive(struct seq_file *m)
3703{
3704 if (!ftrace_is_dead())
3705 return;
d79ac28f
RV
3706 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3707 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3708}
3709
d8741e2e 3710#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3711static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3712{
d79ac28f
RV
3713 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3714 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3715 "# Takes a snapshot of the main buffer.\n"
3716 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3717 "# (Doesn't have to be '2' works with any number that\n"
3718 "# is not a '0' or '1')\n");
d8741e2e 3719}
f1affcaa
SRRH
3720
3721static void show_snapshot_percpu_help(struct seq_file *m)
3722{
fa6f0cc7 3723 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3724#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3725 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3726 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3727#else
d79ac28f
RV
3728 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3729 "# Must use main snapshot file to allocate.\n");
f1affcaa 3730#endif
d79ac28f
RV
3731 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3732 "# (Doesn't have to be '2' works with any number that\n"
3733 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3734}
3735
d8741e2e
SRRH
3736static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3737{
45ad21ca 3738 if (iter->tr->allocated_snapshot)
fa6f0cc7 3739 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3740 else
fa6f0cc7 3741 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3742
fa6f0cc7 3743 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3744 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3745 show_snapshot_main_help(m);
3746 else
3747 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3748}
3749#else
3750/* Should never be called */
3751static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3752#endif
3753
bc0c38d1
SR
3754static int s_show(struct seq_file *m, void *v)
3755{
3756 struct trace_iterator *iter = v;
a63ce5b3 3757 int ret;
bc0c38d1
SR
3758
3759 if (iter->ent == NULL) {
3760 if (iter->tr) {
3761 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3762 seq_puts(m, "#\n");
e0a413f6 3763 test_ftrace_alive(m);
bc0c38d1 3764 }
d8741e2e
SRRH
3765 if (iter->snapshot && trace_empty(iter))
3766 print_snapshot_help(m, iter);
3767 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3768 iter->trace->print_header(m);
62b915f1
JO
3769 else
3770 trace_default_header(m);
3771
a63ce5b3
SR
3772 } else if (iter->leftover) {
3773 /*
3774 * If we filled the seq_file buffer earlier, we
3775 * want to just show it now.
3776 */
3777 ret = trace_print_seq(m, &iter->seq);
3778
3779 /* ret should this time be zero, but you never know */
3780 iter->leftover = ret;
3781
bc0c38d1 3782 } else {
f9896bf3 3783 print_trace_line(iter);
a63ce5b3
SR
3784 ret = trace_print_seq(m, &iter->seq);
3785 /*
3786 * If we overflow the seq_file buffer, then it will
3787 * ask us for this data again at start up.
3788 * Use that instead.
3789 * ret is 0 if seq_file write succeeded.
3790 * -1 otherwise.
3791 */
3792 iter->leftover = ret;
bc0c38d1
SR
3793 }
3794
3795 return 0;
3796}
3797
649e9c70
ON
3798/*
3799 * Should be used after trace_array_get(), trace_types_lock
3800 * ensures that i_cdev was already initialized.
3801 */
3802static inline int tracing_get_cpu(struct inode *inode)
3803{
3804 if (inode->i_cdev) /* See trace_create_cpu_file() */
3805 return (long)inode->i_cdev - 1;
3806 return RING_BUFFER_ALL_CPUS;
3807}
3808
88e9d34c 3809static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3810 .start = s_start,
3811 .next = s_next,
3812 .stop = s_stop,
3813 .show = s_show,
bc0c38d1
SR
3814};
3815
e309b41d 3816static struct trace_iterator *
6484c71c 3817__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3818{
6484c71c 3819 struct trace_array *tr = inode->i_private;
bc0c38d1 3820 struct trace_iterator *iter;
50e18b94 3821 int cpu;
bc0c38d1 3822
85a2f9b4
SR
3823 if (tracing_disabled)
3824 return ERR_PTR(-ENODEV);
60a11774 3825
50e18b94 3826 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3827 if (!iter)
3828 return ERR_PTR(-ENOMEM);
bc0c38d1 3829
72917235 3830 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3831 GFP_KERNEL);
93574fcc
DC
3832 if (!iter->buffer_iter)
3833 goto release;
3834
d7350c3f
FW
3835 /*
3836 * We make a copy of the current tracer to avoid concurrent
3837 * changes on it while we are reading.
3838 */
bc0c38d1 3839 mutex_lock(&trace_types_lock);
d7350c3f 3840 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3841 if (!iter->trace)
d7350c3f 3842 goto fail;
85a2f9b4 3843
2b6080f2 3844 *iter->trace = *tr->current_trace;
d7350c3f 3845
79f55997 3846 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3847 goto fail;
3848
12883efb
SRRH
3849 iter->tr = tr;
3850
3851#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3852 /* Currently only the top directory has a snapshot */
3853 if (tr->current_trace->print_max || snapshot)
12883efb 3854 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3855 else
12883efb
SRRH
3856#endif
3857 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3858 iter->snapshot = snapshot;
bc0c38d1 3859 iter->pos = -1;
6484c71c 3860 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3861 mutex_init(&iter->mutex);
bc0c38d1 3862
8bba1bf5
MM
3863 /* Notify the tracer early; before we stop tracing. */
3864 if (iter->trace && iter->trace->open)
a93751ca 3865 iter->trace->open(iter);
8bba1bf5 3866
12ef7d44 3867 /* Annotate start of buffers if we had overruns */
12883efb 3868 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3869 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3870
8be0709f 3871 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3872 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3873 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3874
debdd57f
HT
3875 /* stop the trace while dumping if we are not opening "snapshot" */
3876 if (!iter->snapshot)
2b6080f2 3877 tracing_stop_tr(tr);
2f26ebd5 3878
ae3b5093 3879 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3880 for_each_tracing_cpu(cpu) {
b04cc6b1 3881 iter->buffer_iter[cpu] =
12883efb 3882 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3883 }
3884 ring_buffer_read_prepare_sync();
3885 for_each_tracing_cpu(cpu) {
3886 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3887 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3888 }
3889 } else {
3890 cpu = iter->cpu_file;
3928a8a2 3891 iter->buffer_iter[cpu] =
12883efb 3892 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3893 ring_buffer_read_prepare_sync();
3894 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3895 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3896 }
3897
bc0c38d1
SR
3898 mutex_unlock(&trace_types_lock);
3899
bc0c38d1 3900 return iter;
3928a8a2 3901
d7350c3f 3902 fail:
3928a8a2 3903 mutex_unlock(&trace_types_lock);
d7350c3f 3904 kfree(iter->trace);
6d158a81 3905 kfree(iter->buffer_iter);
93574fcc 3906release:
50e18b94
JO
3907 seq_release_private(inode, file);
3908 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3909}
3910
3911int tracing_open_generic(struct inode *inode, struct file *filp)
3912{
60a11774
SR
3913 if (tracing_disabled)
3914 return -ENODEV;
3915
bc0c38d1
SR
3916 filp->private_data = inode->i_private;
3917 return 0;
3918}
3919
2e86421d
GB
3920bool tracing_is_disabled(void)
3921{
3922 return (tracing_disabled) ? true: false;
3923}
3924
7b85af63
SRRH
3925/*
3926 * Open and update trace_array ref count.
3927 * Must have the current trace_array passed to it.
3928 */
dcc30223 3929static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3930{
3931 struct trace_array *tr = inode->i_private;
3932
3933 if (tracing_disabled)
3934 return -ENODEV;
3935
3936 if (trace_array_get(tr) < 0)
3937 return -ENODEV;
3938
3939 filp->private_data = inode->i_private;
3940
3941 return 0;
7b85af63
SRRH
3942}
3943
4fd27358 3944static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3945{
6484c71c 3946 struct trace_array *tr = inode->i_private;
907f2784 3947 struct seq_file *m = file->private_data;
4acd4d00 3948 struct trace_iterator *iter;
3928a8a2 3949 int cpu;
bc0c38d1 3950
ff451961 3951 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3952 trace_array_put(tr);
4acd4d00 3953 return 0;
ff451961 3954 }
4acd4d00 3955
6484c71c 3956 /* Writes do not use seq_file */
4acd4d00 3957 iter = m->private;
bc0c38d1 3958 mutex_lock(&trace_types_lock);
a695cb58 3959
3928a8a2
SR
3960 for_each_tracing_cpu(cpu) {
3961 if (iter->buffer_iter[cpu])
3962 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3963 }
3964
bc0c38d1
SR
3965 if (iter->trace && iter->trace->close)
3966 iter->trace->close(iter);
3967
debdd57f
HT
3968 if (!iter->snapshot)
3969 /* reenable tracing if it was previously enabled */
2b6080f2 3970 tracing_start_tr(tr);
f77d09a3
AL
3971
3972 __trace_array_put(tr);
3973
bc0c38d1
SR
3974 mutex_unlock(&trace_types_lock);
3975
d7350c3f 3976 mutex_destroy(&iter->mutex);
b0dfa978 3977 free_cpumask_var(iter->started);
d7350c3f 3978 kfree(iter->trace);
6d158a81 3979 kfree(iter->buffer_iter);
50e18b94 3980 seq_release_private(inode, file);
ff451961 3981
bc0c38d1
SR
3982 return 0;
3983}
3984
7b85af63
SRRH
3985static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3986{
3987 struct trace_array *tr = inode->i_private;
3988
3989 trace_array_put(tr);
bc0c38d1
SR
3990 return 0;
3991}
3992
7b85af63
SRRH
3993static int tracing_single_release_tr(struct inode *inode, struct file *file)
3994{
3995 struct trace_array *tr = inode->i_private;
3996
3997 trace_array_put(tr);
3998
3999 return single_release(inode, file);
4000}
4001
bc0c38d1
SR
4002static int tracing_open(struct inode *inode, struct file *file)
4003{
6484c71c 4004 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
4005 struct trace_iterator *iter;
4006 int ret = 0;
bc0c38d1 4007
ff451961
SRRH
4008 if (trace_array_get(tr) < 0)
4009 return -ENODEV;
4010
4acd4d00 4011 /* If this file was open for write, then erase contents */
6484c71c
ON
4012 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4013 int cpu = tracing_get_cpu(inode);
4014
4015 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4016 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 4017 else
6484c71c 4018 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 4019 }
bc0c38d1 4020
4acd4d00 4021 if (file->f_mode & FMODE_READ) {
6484c71c 4022 iter = __tracing_open(inode, file, false);
4acd4d00
SR
4023 if (IS_ERR(iter))
4024 ret = PTR_ERR(iter);
983f938a 4025 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
4026 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4027 }
ff451961
SRRH
4028
4029 if (ret < 0)
4030 trace_array_put(tr);
4031
bc0c38d1
SR
4032 return ret;
4033}
4034
607e2ea1
SRRH
4035/*
4036 * Some tracers are not suitable for instance buffers.
4037 * A tracer is always available for the global array (toplevel)
4038 * or if it explicitly states that it is.
4039 */
4040static bool
4041trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4042{
4043 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4044}
4045
4046/* Find the next tracer that this trace array may use */
4047static struct tracer *
4048get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4049{
4050 while (t && !trace_ok_for_array(t, tr))
4051 t = t->next;
4052
4053 return t;
4054}
4055
e309b41d 4056static void *
bc0c38d1
SR
4057t_next(struct seq_file *m, void *v, loff_t *pos)
4058{
607e2ea1 4059 struct trace_array *tr = m->private;
f129e965 4060 struct tracer *t = v;
bc0c38d1
SR
4061
4062 (*pos)++;
4063
4064 if (t)
607e2ea1 4065 t = get_tracer_for_array(tr, t->next);
bc0c38d1 4066
bc0c38d1
SR
4067 return t;
4068}
4069
4070static void *t_start(struct seq_file *m, loff_t *pos)
4071{
607e2ea1 4072 struct trace_array *tr = m->private;
f129e965 4073 struct tracer *t;
bc0c38d1
SR
4074 loff_t l = 0;
4075
4076 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
4077
4078 t = get_tracer_for_array(tr, trace_types);
4079 for (; t && l < *pos; t = t_next(m, t, &l))
4080 ;
bc0c38d1
SR
4081
4082 return t;
4083}
4084
4085static void t_stop(struct seq_file *m, void *p)
4086{
4087 mutex_unlock(&trace_types_lock);
4088}
4089
4090static int t_show(struct seq_file *m, void *v)
4091{
4092 struct tracer *t = v;
4093
4094 if (!t)
4095 return 0;
4096
fa6f0cc7 4097 seq_puts(m, t->name);
bc0c38d1
SR
4098 if (t->next)
4099 seq_putc(m, ' ');
4100 else
4101 seq_putc(m, '\n');
4102
4103 return 0;
4104}
4105
88e9d34c 4106static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
4107 .start = t_start,
4108 .next = t_next,
4109 .stop = t_stop,
4110 .show = t_show,
bc0c38d1
SR
4111};
4112
4113static int show_traces_open(struct inode *inode, struct file *file)
4114{
607e2ea1
SRRH
4115 struct trace_array *tr = inode->i_private;
4116 struct seq_file *m;
4117 int ret;
4118
60a11774
SR
4119 if (tracing_disabled)
4120 return -ENODEV;
4121
607e2ea1
SRRH
4122 ret = seq_open(file, &show_traces_seq_ops);
4123 if (ret)
4124 return ret;
4125
4126 m = file->private_data;
4127 m->private = tr;
4128
4129 return 0;
bc0c38d1
SR
4130}
4131
4acd4d00
SR
4132static ssize_t
4133tracing_write_stub(struct file *filp, const char __user *ubuf,
4134 size_t count, loff_t *ppos)
4135{
4136 return count;
4137}
4138
098c879e 4139loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4140{
098c879e
SRRH
4141 int ret;
4142
364829b1 4143 if (file->f_mode & FMODE_READ)
098c879e 4144 ret = seq_lseek(file, offset, whence);
364829b1 4145 else
098c879e
SRRH
4146 file->f_pos = ret = 0;
4147
4148 return ret;
364829b1
SP
4149}
4150
5e2336a0 4151static const struct file_operations tracing_fops = {
4bf39a94
IM
4152 .open = tracing_open,
4153 .read = seq_read,
4acd4d00 4154 .write = tracing_write_stub,
098c879e 4155 .llseek = tracing_lseek,
4bf39a94 4156 .release = tracing_release,
bc0c38d1
SR
4157};
4158
5e2336a0 4159static const struct file_operations show_traces_fops = {
c7078de1
IM
4160 .open = show_traces_open,
4161 .read = seq_read,
4162 .release = seq_release,
b444786f 4163 .llseek = seq_lseek,
c7078de1
IM
4164};
4165
36dfe925
IM
4166/*
4167 * The tracer itself will not take this lock, but still we want
4168 * to provide a consistent cpumask to user-space:
4169 */
4170static DEFINE_MUTEX(tracing_cpumask_update_lock);
4171
4172/*
4173 * Temporary storage for the character representation of the
4174 * CPU bitmask (and one more byte for the newline):
4175 */
4176static char mask_str[NR_CPUS + 1];
4177
c7078de1
IM
4178static ssize_t
4179tracing_cpumask_read(struct file *filp, char __user *ubuf,
4180 size_t count, loff_t *ppos)
4181{
ccfe9e42 4182 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 4183 int len;
c7078de1
IM
4184
4185 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 4186
1a40243b
TH
4187 len = snprintf(mask_str, count, "%*pb\n",
4188 cpumask_pr_args(tr->tracing_cpumask));
4189 if (len >= count) {
36dfe925
IM
4190 count = -EINVAL;
4191 goto out_err;
4192 }
36dfe925
IM
4193 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4194
4195out_err:
c7078de1
IM
4196 mutex_unlock(&tracing_cpumask_update_lock);
4197
4198 return count;
4199}
4200
4201static ssize_t
4202tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4203 size_t count, loff_t *ppos)
4204{
ccfe9e42 4205 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4206 cpumask_var_t tracing_cpumask_new;
2b6080f2 4207 int err, cpu;
9e01c1b7
RR
4208
4209 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4210 return -ENOMEM;
c7078de1 4211
9e01c1b7 4212 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4213 if (err)
36dfe925
IM
4214 goto err_unlock;
4215
215368e8
LZ
4216 mutex_lock(&tracing_cpumask_update_lock);
4217
a5e25883 4218 local_irq_disable();
0b9b12c1 4219 arch_spin_lock(&tr->max_lock);
ab46428c 4220 for_each_tracing_cpu(cpu) {
36dfe925
IM
4221 /*
4222 * Increase/decrease the disabled counter if we are
4223 * about to flip a bit in the cpumask:
4224 */
ccfe9e42 4225 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4226 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4227 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4228 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4229 }
ccfe9e42 4230 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4231 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4232 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4233 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4234 }
4235 }
0b9b12c1 4236 arch_spin_unlock(&tr->max_lock);
a5e25883 4237 local_irq_enable();
36dfe925 4238
ccfe9e42 4239 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
4240
4241 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 4242 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4243
4244 return count;
36dfe925
IM
4245
4246err_unlock:
215368e8 4247 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4248
4249 return err;
c7078de1
IM
4250}
4251
5e2336a0 4252static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4253 .open = tracing_open_generic_tr,
c7078de1
IM
4254 .read = tracing_cpumask_read,
4255 .write = tracing_cpumask_write,
ccfe9e42 4256 .release = tracing_release_generic_tr,
b444786f 4257 .llseek = generic_file_llseek,
bc0c38d1
SR
4258};
4259
fdb372ed 4260static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4261{
d8e83d26 4262 struct tracer_opt *trace_opts;
2b6080f2 4263 struct trace_array *tr = m->private;
d8e83d26 4264 u32 tracer_flags;
d8e83d26 4265 int i;
adf9f195 4266
d8e83d26 4267 mutex_lock(&trace_types_lock);
2b6080f2
SR
4268 tracer_flags = tr->current_trace->flags->val;
4269 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4270
bc0c38d1 4271 for (i = 0; trace_options[i]; i++) {
983f938a 4272 if (tr->trace_flags & (1 << i))
fdb372ed 4273 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4274 else
fdb372ed 4275 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4276 }
4277
adf9f195
FW
4278 for (i = 0; trace_opts[i].name; i++) {
4279 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4280 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4281 else
fdb372ed 4282 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4283 }
d8e83d26 4284 mutex_unlock(&trace_types_lock);
adf9f195 4285
fdb372ed 4286 return 0;
bc0c38d1 4287}
bc0c38d1 4288
8c1a49ae 4289static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4290 struct tracer_flags *tracer_flags,
4291 struct tracer_opt *opts, int neg)
4292{
d39cdd20 4293 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4294 int ret;
bc0c38d1 4295
8c1a49ae 4296 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4297 if (ret)
4298 return ret;
4299
4300 if (neg)
4301 tracer_flags->val &= ~opts->bit;
4302 else
4303 tracer_flags->val |= opts->bit;
4304 return 0;
bc0c38d1
SR
4305}
4306
adf9f195 4307/* Try to assign a tracer specific option */
8c1a49ae 4308static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4309{
8c1a49ae 4310 struct tracer *trace = tr->current_trace;
7770841e 4311 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4312 struct tracer_opt *opts = NULL;
8d18eaaf 4313 int i;
adf9f195 4314
7770841e
Z
4315 for (i = 0; tracer_flags->opts[i].name; i++) {
4316 opts = &tracer_flags->opts[i];
adf9f195 4317
8d18eaaf 4318 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4319 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4320 }
adf9f195 4321
8d18eaaf 4322 return -EINVAL;
adf9f195
FW
4323}
4324
613f04a0
SRRH
4325/* Some tracers require overwrite to stay enabled */
4326int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4327{
4328 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4329 return -1;
4330
4331 return 0;
4332}
4333
2b6080f2 4334int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4335{
4336 /* do nothing if flag is already set */
983f938a 4337 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4338 return 0;
4339
4340 /* Give the tracer a chance to approve the change */
2b6080f2 4341 if (tr->current_trace->flag_changed)
bf6065b5 4342 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4343 return -EINVAL;
af4617bd
SR
4344
4345 if (enabled)
983f938a 4346 tr->trace_flags |= mask;
af4617bd 4347 else
983f938a 4348 tr->trace_flags &= ~mask;
e870e9a1
LZ
4349
4350 if (mask == TRACE_ITER_RECORD_CMD)
4351 trace_event_enable_cmd_record(enabled);
750912fa 4352
d914ba37
JF
4353 if (mask == TRACE_ITER_RECORD_TGID) {
4354 if (!tgid_map)
4355 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4356 GFP_KERNEL);
4357 if (!tgid_map) {
4358 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4359 return -ENOMEM;
4360 }
4361
4362 trace_event_enable_tgid_record(enabled);
4363 }
4364
c37775d5
SR
4365 if (mask == TRACE_ITER_EVENT_FORK)
4366 trace_event_follow_fork(tr, enabled);
4367
1e10486f
NK
4368 if (mask == TRACE_ITER_FUNC_FORK)
4369 ftrace_pid_follow_fork(tr, enabled);
4370
80902822 4371 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4372 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4373#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4374 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4375#endif
4376 }
81698831 4377
b9f9108c 4378 if (mask == TRACE_ITER_PRINTK) {
81698831 4379 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4380 trace_printk_control(enabled);
4381 }
613f04a0
SRRH
4382
4383 return 0;
af4617bd
SR
4384}
4385
2b6080f2 4386static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4387{
8d18eaaf 4388 char *cmp;
bc0c38d1 4389 int neg = 0;
613f04a0 4390 int ret = -ENODEV;
bc0c38d1 4391 int i;
a4d1e688 4392 size_t orig_len = strlen(option);
bc0c38d1 4393
7bcfaf54 4394 cmp = strstrip(option);
bc0c38d1 4395
8d18eaaf 4396 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
4397 neg = 1;
4398 cmp += 2;
4399 }
4400
69d34da2
SRRH
4401 mutex_lock(&trace_types_lock);
4402
bc0c38d1 4403 for (i = 0; trace_options[i]; i++) {
8d18eaaf 4404 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 4405 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
4406 break;
4407 }
4408 }
adf9f195
FW
4409
4410 /* If no option could be set, test the specific tracer options */
69d34da2 4411 if (!trace_options[i])
8c1a49ae 4412 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
4413
4414 mutex_unlock(&trace_types_lock);
bc0c38d1 4415
a4d1e688
JW
4416 /*
4417 * If the first trailing whitespace is replaced with '\0' by strstrip,
4418 * turn it back into a space.
4419 */
4420 if (orig_len > strlen(option))
4421 option[strlen(option)] = ' ';
4422
7bcfaf54
SR
4423 return ret;
4424}
4425
a4d1e688
JW
4426static void __init apply_trace_boot_options(void)
4427{
4428 char *buf = trace_boot_options_buf;
4429 char *option;
4430
4431 while (true) {
4432 option = strsep(&buf, ",");
4433
4434 if (!option)
4435 break;
a4d1e688 4436
43ed3843
SRRH
4437 if (*option)
4438 trace_set_options(&global_trace, option);
a4d1e688
JW
4439
4440 /* Put back the comma to allow this to be called again */
4441 if (buf)
4442 *(buf - 1) = ',';
4443 }
4444}
4445
7bcfaf54
SR
4446static ssize_t
4447tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4448 size_t cnt, loff_t *ppos)
4449{
2b6080f2
SR
4450 struct seq_file *m = filp->private_data;
4451 struct trace_array *tr = m->private;
7bcfaf54 4452 char buf[64];
613f04a0 4453 int ret;
7bcfaf54
SR
4454
4455 if (cnt >= sizeof(buf))
4456 return -EINVAL;
4457
4afe6495 4458 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4459 return -EFAULT;
4460
a8dd2176
SR
4461 buf[cnt] = 0;
4462
2b6080f2 4463 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4464 if (ret < 0)
4465 return ret;
7bcfaf54 4466
cf8517cf 4467 *ppos += cnt;
bc0c38d1
SR
4468
4469 return cnt;
4470}
4471
fdb372ed
LZ
4472static int tracing_trace_options_open(struct inode *inode, struct file *file)
4473{
7b85af63 4474 struct trace_array *tr = inode->i_private;
f77d09a3 4475 int ret;
7b85af63 4476
fdb372ed
LZ
4477 if (tracing_disabled)
4478 return -ENODEV;
2b6080f2 4479
7b85af63
SRRH
4480 if (trace_array_get(tr) < 0)
4481 return -ENODEV;
4482
f77d09a3
AL
4483 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4484 if (ret < 0)
4485 trace_array_put(tr);
4486
4487 return ret;
fdb372ed
LZ
4488}
4489
5e2336a0 4490static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4491 .open = tracing_trace_options_open,
4492 .read = seq_read,
4493 .llseek = seq_lseek,
7b85af63 4494 .release = tracing_single_release_tr,
ee6bce52 4495 .write = tracing_trace_options_write,
bc0c38d1
SR
4496};
4497
7bd2f24c
IM
4498static const char readme_msg[] =
4499 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4500 "# echo 0 > tracing_on : quick way to disable tracing\n"
4501 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4502 " Important files:\n"
4503 " trace\t\t\t- The static contents of the buffer\n"
4504 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4505 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4506 " current_tracer\t- function and latency tracers\n"
4507 " available_tracers\t- list of configured tracers for current_tracer\n"
4508 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4509 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4510 " trace_clock\t\t-change the clock used to order events\n"
4511 " local: Per cpu clock but may not be synced across CPUs\n"
4512 " global: Synced across CPUs but slows tracing down.\n"
4513 " counter: Not a clock, but just an increment\n"
4514 " uptime: Jiffy counter from time of boot\n"
4515 " perf: Same clock that perf events use\n"
4516#ifdef CONFIG_X86_64
4517 " x86-tsc: TSC cycle counter\n"
4518#endif
4519 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4520 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4521 " tracing_cpumask\t- Limit which CPUs to trace\n"
4522 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4523 "\t\t\t Remove sub-buffer with rmdir\n"
4524 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4525 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4526 "\t\t\t option name\n"
939c7a4f 4527 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4528#ifdef CONFIG_DYNAMIC_FTRACE
4529 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4530 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4531 "\t\t\t functions\n"
60f1d5e3 4532 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4533 "\t modules: Can select a group via module\n"
4534 "\t Format: :mod:<module-name>\n"
4535 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4536 "\t triggers: a command to perform when function is hit\n"
4537 "\t Format: <function>:<trigger>[:count]\n"
4538 "\t trigger: traceon, traceoff\n"
4539 "\t\t enable_event:<system>:<event>\n"
4540 "\t\t disable_event:<system>:<event>\n"
22f45649 4541#ifdef CONFIG_STACKTRACE
71485c45 4542 "\t\t stacktrace\n"
22f45649
SRRH
4543#endif
4544#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4545 "\t\t snapshot\n"
22f45649 4546#endif
17a280ea
SRRH
4547 "\t\t dump\n"
4548 "\t\t cpudump\n"
71485c45
SRRH
4549 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4550 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4551 "\t The first one will disable tracing every time do_fault is hit\n"
4552 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4553 "\t The first time do trap is hit and it disables tracing, the\n"
4554 "\t counter will decrement to 2. If tracing is already disabled,\n"
4555 "\t the counter will not decrement. It only decrements when the\n"
4556 "\t trigger did work\n"
4557 "\t To remove trigger without count:\n"
4558 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4559 "\t To remove trigger with a count:\n"
4560 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4561 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4562 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4563 "\t modules: Can select a group via module command :mod:\n"
4564 "\t Does not accept triggers\n"
22f45649
SRRH
4565#endif /* CONFIG_DYNAMIC_FTRACE */
4566#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4567 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4568 "\t\t (function)\n"
22f45649
SRRH
4569#endif
4570#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4571 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4572 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4573 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4574#endif
4575#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4576 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4577 "\t\t\t snapshot buffer. Read the contents for more\n"
4578 "\t\t\t information\n"
22f45649 4579#endif
991821c8 4580#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4581 " stack_trace\t\t- Shows the max stack trace when active\n"
4582 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4583 "\t\t\t Write into this file to reset the max size (trigger a\n"
4584 "\t\t\t new trace)\n"
22f45649 4585#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4586 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4587 "\t\t\t traces\n"
22f45649 4588#endif
991821c8 4589#endif /* CONFIG_STACK_TRACER */
6b0b7551 4590#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4591 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4592 "\t\t\t Write into this file to define/undefine new trace events.\n"
4593#endif
6b0b7551 4594#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4595 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4596 "\t\t\t Write into this file to define/undefine new trace events.\n"
4597#endif
6b0b7551 4598#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625 4599 "\t accepts: event-definitions (one definition per line)\n"
c3ca46ef
MH
4600 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4601 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
86425625 4602 "\t -:[<group>/]<event>\n"
6b0b7551 4603#ifdef CONFIG_KPROBE_EVENTS
86425625 4604 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4605 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4606#endif
6b0b7551 4607#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4608 "\t place: <path>:<offset>\n"
4609#endif
4610 "\t args: <name>=fetcharg[:type]\n"
4611 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4612 "\t $stack<index>, $stack, $retval, $comm\n"
4613 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4614 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4615#endif
26f25564
TZ
4616 " events/\t\t- Directory containing all trace event subsystems:\n"
4617 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4618 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4619 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4620 "\t\t\t events\n"
26f25564 4621 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4622 " events/<system>/<event>/\t- Directory containing control files for\n"
4623 "\t\t\t <event>:\n"
26f25564
TZ
4624 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4625 " filter\t\t- If set, only events passing filter are traced\n"
4626 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4627 "\t Format: <trigger>[:count][if <filter>]\n"
4628 "\t trigger: traceon, traceoff\n"
4629 "\t enable_event:<system>:<event>\n"
4630 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4631#ifdef CONFIG_HIST_TRIGGERS
4632 "\t enable_hist:<system>:<event>\n"
4633 "\t disable_hist:<system>:<event>\n"
4634#endif
26f25564 4635#ifdef CONFIG_STACKTRACE
71485c45 4636 "\t\t stacktrace\n"
26f25564
TZ
4637#endif
4638#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4639 "\t\t snapshot\n"
7ef224d1
TZ
4640#endif
4641#ifdef CONFIG_HIST_TRIGGERS
4642 "\t\t hist (see below)\n"
26f25564 4643#endif
71485c45
SRRH
4644 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4645 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4646 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4647 "\t events/block/block_unplug/trigger\n"
4648 "\t The first disables tracing every time block_unplug is hit.\n"
4649 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4650 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4651 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4652 "\t Like function triggers, the counter is only decremented if it\n"
4653 "\t enabled or disabled tracing.\n"
4654 "\t To remove a trigger without a count:\n"
4655 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4656 "\t To remove a trigger with a count:\n"
4657 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4658 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4659#ifdef CONFIG_HIST_TRIGGERS
4660 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4661 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4662 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4663 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4664 "\t [:size=#entries]\n"
e86ae9ba 4665 "\t [:pause][:continue][:clear]\n"
5463bfda 4666 "\t [:name=histname1]\n"
7ef224d1
TZ
4667 "\t [if <filter>]\n\n"
4668 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4669 "\t table using the key(s) and value(s) named, and the value of a\n"
4670 "\t sum called 'hitcount' is incremented. Keys and values\n"
4671 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4672 "\t can be any field, or the special string 'stacktrace'.\n"
4673 "\t Compound keys consisting of up to two fields can be specified\n"
4674 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4675 "\t fields. Sort keys consisting of up to two fields can be\n"
4676 "\t specified using the 'sort' keyword. The sort direction can\n"
4677 "\t be modified by appending '.descending' or '.ascending' to a\n"
4678 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4679 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4680 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4681 "\t its histogram data will be shared with other triggers of the\n"
4682 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4683 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4684 "\t table in its entirety to stdout. If there are multiple hist\n"
4685 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4686 "\t trigger in the output. The table displayed for a named\n"
4687 "\t trigger will be the same as any other instance having the\n"
4688 "\t same name. The default format used to display a given field\n"
4689 "\t can be modified by appending any of the following modifiers\n"
4690 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4691 "\t .hex display a number as a hex value\n"
4692 "\t .sym display an address as a symbol\n"
6b4827ad 4693 "\t .sym-offset display an address as a symbol and offset\n"
31696198
TZ
4694 "\t .execname display a common_pid as a program name\n"
4695 "\t .syscall display a syscall id as a syscall name\n\n"
4b94f5b7 4696 "\t .log2 display log2 value rather than raw number\n\n"
83e99914
TZ
4697 "\t The 'pause' parameter can be used to pause an existing hist\n"
4698 "\t trigger or to start a hist trigger but not log any events\n"
4699 "\t until told to do so. 'continue' can be used to start or\n"
4700 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4701 "\t The 'clear' parameter will clear the contents of a running\n"
4702 "\t hist trigger and leave its current paused/active state\n"
4703 "\t unchanged.\n\n"
d0bad49b
TZ
4704 "\t The enable_hist and disable_hist triggers can be used to\n"
4705 "\t have one event conditionally start and stop another event's\n"
4706 "\t already-attached hist trigger. The syntax is analagous to\n"
4707 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4708#endif
7bd2f24c
IM
4709;
4710
4711static ssize_t
4712tracing_readme_read(struct file *filp, char __user *ubuf,
4713 size_t cnt, loff_t *ppos)
4714{
4715 return simple_read_from_buffer(ubuf, cnt, ppos,
4716 readme_msg, strlen(readme_msg));
4717}
4718
5e2336a0 4719static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4720 .open = tracing_open_generic,
4721 .read = tracing_readme_read,
b444786f 4722 .llseek = generic_file_llseek,
7bd2f24c
IM
4723};
4724
99c621d7
MS
4725static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4726{
4727 int *ptr = v;
4728
4729 if (*pos || m->count)
4730 ptr++;
4731
4732 (*pos)++;
4733
4734 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4735 if (trace_find_tgid(*ptr))
4736 return ptr;
4737 }
4738
4739 return NULL;
4740}
4741
4742static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4743{
4744 void *v;
4745 loff_t l = 0;
4746
4747 if (!tgid_map)
4748 return NULL;
4749
4750 v = &tgid_map[0];
4751 while (l <= *pos) {
4752 v = saved_tgids_next(m, v, &l);
4753 if (!v)
4754 return NULL;
4755 }
4756
4757 return v;
4758}
4759
4760static void saved_tgids_stop(struct seq_file *m, void *v)
4761{
4762}
4763
4764static int saved_tgids_show(struct seq_file *m, void *v)
4765{
4766 int pid = (int *)v - tgid_map;
4767
4768 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4769 return 0;
4770}
4771
4772static const struct seq_operations tracing_saved_tgids_seq_ops = {
4773 .start = saved_tgids_start,
4774 .stop = saved_tgids_stop,
4775 .next = saved_tgids_next,
4776 .show = saved_tgids_show,
4777};
4778
4779static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4780{
4781 if (tracing_disabled)
4782 return -ENODEV;
4783
4784 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4785}
4786
4787
4788static const struct file_operations tracing_saved_tgids_fops = {
4789 .open = tracing_saved_tgids_open,
4790 .read = seq_read,
4791 .llseek = seq_lseek,
4792 .release = seq_release,
4793};
4794
42584c81
YY
4795static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4796{
4797 unsigned int *ptr = v;
69abe6a5 4798
42584c81
YY
4799 if (*pos || m->count)
4800 ptr++;
69abe6a5 4801
42584c81 4802 (*pos)++;
69abe6a5 4803
939c7a4f
YY
4804 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4805 ptr++) {
42584c81
YY
4806 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4807 continue;
69abe6a5 4808
42584c81
YY
4809 return ptr;
4810 }
69abe6a5 4811
42584c81
YY
4812 return NULL;
4813}
4814
4815static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4816{
4817 void *v;
4818 loff_t l = 0;
69abe6a5 4819
4c27e756
SRRH
4820 preempt_disable();
4821 arch_spin_lock(&trace_cmdline_lock);
4822
939c7a4f 4823 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4824 while (l <= *pos) {
4825 v = saved_cmdlines_next(m, v, &l);
4826 if (!v)
4827 return NULL;
69abe6a5
AP
4828 }
4829
42584c81
YY
4830 return v;
4831}
4832
4833static void saved_cmdlines_stop(struct seq_file *m, void *v)
4834{
4c27e756
SRRH
4835 arch_spin_unlock(&trace_cmdline_lock);
4836 preempt_enable();
42584c81 4837}
69abe6a5 4838
42584c81
YY
4839static int saved_cmdlines_show(struct seq_file *m, void *v)
4840{
4841 char buf[TASK_COMM_LEN];
4842 unsigned int *pid = v;
69abe6a5 4843
4c27e756 4844 __trace_find_cmdline(*pid, buf);
42584c81
YY
4845 seq_printf(m, "%d %s\n", *pid, buf);
4846 return 0;
4847}
4848
4849static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4850 .start = saved_cmdlines_start,
4851 .next = saved_cmdlines_next,
4852 .stop = saved_cmdlines_stop,
4853 .show = saved_cmdlines_show,
4854};
4855
4856static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4857{
4858 if (tracing_disabled)
4859 return -ENODEV;
4860
4861 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4862}
4863
4864static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4865 .open = tracing_saved_cmdlines_open,
4866 .read = seq_read,
4867 .llseek = seq_lseek,
4868 .release = seq_release,
69abe6a5
AP
4869};
4870
939c7a4f
YY
4871static ssize_t
4872tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4873 size_t cnt, loff_t *ppos)
4874{
4875 char buf[64];
4876 int r;
4877
4878 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4879 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4880 arch_spin_unlock(&trace_cmdline_lock);
4881
4882 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4883}
4884
4885static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4886{
4887 kfree(s->saved_cmdlines);
4888 kfree(s->map_cmdline_to_pid);
4889 kfree(s);
4890}
4891
4892static int tracing_resize_saved_cmdlines(unsigned int val)
4893{
4894 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4895
a6af8fbf 4896 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4897 if (!s)
4898 return -ENOMEM;
4899
4900 if (allocate_cmdlines_buffer(val, s) < 0) {
4901 kfree(s);
4902 return -ENOMEM;
4903 }
4904
4905 arch_spin_lock(&trace_cmdline_lock);
4906 savedcmd_temp = savedcmd;
4907 savedcmd = s;
4908 arch_spin_unlock(&trace_cmdline_lock);
4909 free_saved_cmdlines_buffer(savedcmd_temp);
4910
4911 return 0;
4912}
4913
4914static ssize_t
4915tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4916 size_t cnt, loff_t *ppos)
4917{
4918 unsigned long val;
4919 int ret;
4920
4921 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4922 if (ret)
4923 return ret;
4924
4925 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4926 if (!val || val > PID_MAX_DEFAULT)
4927 return -EINVAL;
4928
4929 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4930 if (ret < 0)
4931 return ret;
4932
4933 *ppos += cnt;
4934
4935 return cnt;
4936}
4937
4938static const struct file_operations tracing_saved_cmdlines_size_fops = {
4939 .open = tracing_open_generic,
4940 .read = tracing_saved_cmdlines_size_read,
4941 .write = tracing_saved_cmdlines_size_write,
4942};
4943
681bec03 4944#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 4945static union trace_eval_map_item *
f57a4143 4946update_eval_map(union trace_eval_map_item *ptr)
9828413d 4947{
00f4b652 4948 if (!ptr->map.eval_string) {
9828413d
SRRH
4949 if (ptr->tail.next) {
4950 ptr = ptr->tail.next;
4951 /* Set ptr to the next real item (skip head) */
4952 ptr++;
4953 } else
4954 return NULL;
4955 }
4956 return ptr;
4957}
4958
f57a4143 4959static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 4960{
23bf8cb8 4961 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4962
4963 /*
4964 * Paranoid! If ptr points to end, we don't want to increment past it.
4965 * This really should never happen.
4966 */
f57a4143 4967 ptr = update_eval_map(ptr);
9828413d
SRRH
4968 if (WARN_ON_ONCE(!ptr))
4969 return NULL;
4970
4971 ptr++;
4972
4973 (*pos)++;
4974
f57a4143 4975 ptr = update_eval_map(ptr);
9828413d
SRRH
4976
4977 return ptr;
4978}
4979
f57a4143 4980static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 4981{
23bf8cb8 4982 union trace_eval_map_item *v;
9828413d
SRRH
4983 loff_t l = 0;
4984
1793ed93 4985 mutex_lock(&trace_eval_mutex);
9828413d 4986
23bf8cb8 4987 v = trace_eval_maps;
9828413d
SRRH
4988 if (v)
4989 v++;
4990
4991 while (v && l < *pos) {
f57a4143 4992 v = eval_map_next(m, v, &l);
9828413d
SRRH
4993 }
4994
4995 return v;
4996}
4997
f57a4143 4998static void eval_map_stop(struct seq_file *m, void *v)
9828413d 4999{
1793ed93 5000 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5001}
5002
f57a4143 5003static int eval_map_show(struct seq_file *m, void *v)
9828413d 5004{
23bf8cb8 5005 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5006
5007 seq_printf(m, "%s %ld (%s)\n",
00f4b652 5008 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
5009 ptr->map.system);
5010
5011 return 0;
5012}
5013
f57a4143
JL
5014static const struct seq_operations tracing_eval_map_seq_ops = {
5015 .start = eval_map_start,
5016 .next = eval_map_next,
5017 .stop = eval_map_stop,
5018 .show = eval_map_show,
9828413d
SRRH
5019};
5020
f57a4143 5021static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
5022{
5023 if (tracing_disabled)
5024 return -ENODEV;
5025
f57a4143 5026 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
5027}
5028
f57a4143
JL
5029static const struct file_operations tracing_eval_map_fops = {
5030 .open = tracing_eval_map_open,
9828413d
SRRH
5031 .read = seq_read,
5032 .llseek = seq_lseek,
5033 .release = seq_release,
5034};
5035
23bf8cb8 5036static inline union trace_eval_map_item *
5f60b351 5037trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
5038{
5039 /* Return tail of array given the head */
5040 return ptr + ptr->head.length + 1;
5041}
5042
5043static void
f57a4143 5044trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
5045 int len)
5046{
00f4b652
JL
5047 struct trace_eval_map **stop;
5048 struct trace_eval_map **map;
23bf8cb8
JL
5049 union trace_eval_map_item *map_array;
5050 union trace_eval_map_item *ptr;
9828413d
SRRH
5051
5052 stop = start + len;
5053
5054 /*
23bf8cb8 5055 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
5056 * where the head holds the module and length of array, and the
5057 * tail holds a pointer to the next list.
5058 */
5059 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5060 if (!map_array) {
f57a4143 5061 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
5062 return;
5063 }
5064
1793ed93 5065 mutex_lock(&trace_eval_mutex);
9828413d 5066
23bf8cb8
JL
5067 if (!trace_eval_maps)
5068 trace_eval_maps = map_array;
9828413d 5069 else {
23bf8cb8 5070 ptr = trace_eval_maps;
9828413d 5071 for (;;) {
5f60b351 5072 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
5073 if (!ptr->tail.next)
5074 break;
5075 ptr = ptr->tail.next;
5076
5077 }
5078 ptr->tail.next = map_array;
5079 }
5080 map_array->head.mod = mod;
5081 map_array->head.length = len;
5082 map_array++;
5083
5084 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5085 map_array->map = **map;
5086 map_array++;
5087 }
5088 memset(map_array, 0, sizeof(*map_array));
5089
1793ed93 5090 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5091}
5092
f57a4143 5093static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 5094{
681bec03 5095 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 5096 NULL, &tracing_eval_map_fops);
9828413d
SRRH
5097}
5098
681bec03 5099#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
5100static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5101static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 5102 struct trace_eval_map **start, int len) { }
681bec03 5103#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 5104
f57a4143 5105static void trace_insert_eval_map(struct module *mod,
00f4b652 5106 struct trace_eval_map **start, int len)
0c564a53 5107{
00f4b652 5108 struct trace_eval_map **map;
0c564a53
SRRH
5109
5110 if (len <= 0)
5111 return;
5112
5113 map = start;
5114
f57a4143 5115 trace_event_eval_update(map, len);
9828413d 5116
f57a4143 5117 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
5118}
5119
bc0c38d1
SR
5120static ssize_t
5121tracing_set_trace_read(struct file *filp, char __user *ubuf,
5122 size_t cnt, loff_t *ppos)
5123{
2b6080f2 5124 struct trace_array *tr = filp->private_data;
ee6c2c1b 5125 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
5126 int r;
5127
5128 mutex_lock(&trace_types_lock);
2b6080f2 5129 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
5130 mutex_unlock(&trace_types_lock);
5131
4bf39a94 5132 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5133}
5134
b6f11df2
ACM
5135int tracer_init(struct tracer *t, struct trace_array *tr)
5136{
12883efb 5137 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
5138 return t->init(tr);
5139}
5140
12883efb 5141static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
5142{
5143 int cpu;
737223fb 5144
438ced17 5145 for_each_tracing_cpu(cpu)
12883efb 5146 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
5147}
5148
12883efb 5149#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 5150/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
5151static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5152 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
5153{
5154 int cpu, ret = 0;
5155
5156 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5157 for_each_tracing_cpu(cpu) {
12883efb
SRRH
5158 ret = ring_buffer_resize(trace_buf->buffer,
5159 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
5160 if (ret < 0)
5161 break;
12883efb
SRRH
5162 per_cpu_ptr(trace_buf->data, cpu)->entries =
5163 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
5164 }
5165 } else {
12883efb
SRRH
5166 ret = ring_buffer_resize(trace_buf->buffer,
5167 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 5168 if (ret == 0)
12883efb
SRRH
5169 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5170 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
5171 }
5172
5173 return ret;
5174}
12883efb 5175#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 5176
2b6080f2
SR
5177static int __tracing_resize_ring_buffer(struct trace_array *tr,
5178 unsigned long size, int cpu)
73c5162a
SR
5179{
5180 int ret;
5181
5182 /*
5183 * If kernel or user changes the size of the ring buffer
a123c52b
SR
5184 * we use the size that was given, and we can forget about
5185 * expanding it later.
73c5162a 5186 */
55034cd6 5187 ring_buffer_expanded = true;
73c5162a 5188
b382ede6 5189 /* May be called before buffers are initialized */
12883efb 5190 if (!tr->trace_buffer.buffer)
b382ede6
SR
5191 return 0;
5192
12883efb 5193 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
5194 if (ret < 0)
5195 return ret;
5196
12883efb 5197#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5198 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5199 !tr->current_trace->use_max_tr)
ef710e10
KM
5200 goto out;
5201
12883efb 5202 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5203 if (ret < 0) {
12883efb
SRRH
5204 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5205 &tr->trace_buffer, cpu);
73c5162a 5206 if (r < 0) {
a123c52b
SR
5207 /*
5208 * AARGH! We are left with different
5209 * size max buffer!!!!
5210 * The max buffer is our "snapshot" buffer.
5211 * When a tracer needs a snapshot (one of the
5212 * latency tracers), it swaps the max buffer
5213 * with the saved snap shot. We succeeded to
5214 * update the size of the main buffer, but failed to
5215 * update the size of the max buffer. But when we tried
5216 * to reset the main buffer to the original size, we
5217 * failed there too. This is very unlikely to
5218 * happen, but if it does, warn and kill all
5219 * tracing.
5220 */
73c5162a
SR
5221 WARN_ON(1);
5222 tracing_disabled = 1;
5223 }
5224 return ret;
5225 }
5226
438ced17 5227 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5228 set_buffer_entries(&tr->max_buffer, size);
438ced17 5229 else
12883efb 5230 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5231
ef710e10 5232 out:
12883efb
SRRH
5233#endif /* CONFIG_TRACER_MAX_TRACE */
5234
438ced17 5235 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5236 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5237 else
12883efb 5238 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5239
5240 return ret;
5241}
5242
2b6080f2
SR
5243static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5244 unsigned long size, int cpu_id)
4f271a2a 5245{
83f40318 5246 int ret = size;
4f271a2a
VN
5247
5248 mutex_lock(&trace_types_lock);
5249
438ced17
VN
5250 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5251 /* make sure, this cpu is enabled in the mask */
5252 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5253 ret = -EINVAL;
5254 goto out;
5255 }
5256 }
4f271a2a 5257
2b6080f2 5258 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5259 if (ret < 0)
5260 ret = -ENOMEM;
5261
438ced17 5262out:
4f271a2a
VN
5263 mutex_unlock(&trace_types_lock);
5264
5265 return ret;
5266}
5267
ef710e10 5268
1852fcce
SR
5269/**
5270 * tracing_update_buffers - used by tracing facility to expand ring buffers
5271 *
5272 * To save on memory when the tracing is never used on a system with it
5273 * configured in. The ring buffers are set to a minimum size. But once
5274 * a user starts to use the tracing facility, then they need to grow
5275 * to their default size.
5276 *
5277 * This function is to be called when a tracer is about to be used.
5278 */
5279int tracing_update_buffers(void)
5280{
5281 int ret = 0;
5282
1027fcb2 5283 mutex_lock(&trace_types_lock);
1852fcce 5284 if (!ring_buffer_expanded)
2b6080f2 5285 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5286 RING_BUFFER_ALL_CPUS);
1027fcb2 5287 mutex_unlock(&trace_types_lock);
1852fcce
SR
5288
5289 return ret;
5290}
5291
577b785f
SR
5292struct trace_option_dentry;
5293
37aea98b 5294static void
2b6080f2 5295create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5296
6b450d25
SRRH
5297/*
5298 * Used to clear out the tracer before deletion of an instance.
5299 * Must have trace_types_lock held.
5300 */
5301static void tracing_set_nop(struct trace_array *tr)
5302{
5303 if (tr->current_trace == &nop_trace)
5304 return;
5305
50512ab5 5306 tr->current_trace->enabled--;
6b450d25
SRRH
5307
5308 if (tr->current_trace->reset)
5309 tr->current_trace->reset(tr);
5310
5311 tr->current_trace = &nop_trace;
5312}
5313
41d9c0be 5314static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5315{
09d23a1d
SRRH
5316 /* Only enable if the directory has been created already. */
5317 if (!tr->dir)
5318 return;
5319
37aea98b 5320 create_trace_option_files(tr, t);
09d23a1d
SRRH
5321}
5322
5323static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5324{
bc0c38d1 5325 struct tracer *t;
12883efb 5326#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5327 bool had_max_tr;
12883efb 5328#endif
d9e54076 5329 int ret = 0;
bc0c38d1 5330
1027fcb2
SR
5331 mutex_lock(&trace_types_lock);
5332
73c5162a 5333 if (!ring_buffer_expanded) {
2b6080f2 5334 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5335 RING_BUFFER_ALL_CPUS);
73c5162a 5336 if (ret < 0)
59f586db 5337 goto out;
73c5162a
SR
5338 ret = 0;
5339 }
5340
bc0c38d1
SR
5341 for (t = trace_types; t; t = t->next) {
5342 if (strcmp(t->name, buf) == 0)
5343 break;
5344 }
c2931e05
FW
5345 if (!t) {
5346 ret = -EINVAL;
5347 goto out;
5348 }
2b6080f2 5349 if (t == tr->current_trace)
bc0c38d1
SR
5350 goto out;
5351
607e2ea1
SRRH
5352 /* Some tracers are only allowed for the top level buffer */
5353 if (!trace_ok_for_array(t, tr)) {
5354 ret = -EINVAL;
5355 goto out;
5356 }
5357
cf6ab6d9
SRRH
5358 /* If trace pipe files are being read, we can't change the tracer */
5359 if (tr->current_trace->ref) {
5360 ret = -EBUSY;
5361 goto out;
5362 }
5363
9f029e83 5364 trace_branch_disable();
613f04a0 5365
50512ab5 5366 tr->current_trace->enabled--;
613f04a0 5367
2b6080f2
SR
5368 if (tr->current_trace->reset)
5369 tr->current_trace->reset(tr);
34600f0e 5370
12883efb 5371 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 5372 tr->current_trace = &nop_trace;
34600f0e 5373
45ad21ca
SRRH
5374#ifdef CONFIG_TRACER_MAX_TRACE
5375 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5376
5377 if (had_max_tr && !t->use_max_tr) {
5378 /*
5379 * We need to make sure that the update_max_tr sees that
5380 * current_trace changed to nop_trace to keep it from
5381 * swapping the buffers after we resize it.
5382 * The update_max_tr is called from interrupts disabled
5383 * so a synchronized_sched() is sufficient.
5384 */
5385 synchronize_sched();
3209cff4 5386 free_snapshot(tr);
ef710e10 5387 }
12883efb 5388#endif
12883efb
SRRH
5389
5390#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5391 if (t->use_max_tr && !had_max_tr) {
3209cff4 5392 ret = alloc_snapshot(tr);
d60da506
HT
5393 if (ret < 0)
5394 goto out;
ef710e10 5395 }
12883efb 5396#endif
577b785f 5397
1c80025a 5398 if (t->init) {
b6f11df2 5399 ret = tracer_init(t, tr);
1c80025a
FW
5400 if (ret)
5401 goto out;
5402 }
bc0c38d1 5403
2b6080f2 5404 tr->current_trace = t;
50512ab5 5405 tr->current_trace->enabled++;
9f029e83 5406 trace_branch_enable(tr);
bc0c38d1
SR
5407 out:
5408 mutex_unlock(&trace_types_lock);
5409
d9e54076
PZ
5410 return ret;
5411}
5412
5413static ssize_t
5414tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5415 size_t cnt, loff_t *ppos)
5416{
607e2ea1 5417 struct trace_array *tr = filp->private_data;
ee6c2c1b 5418 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5419 int i;
5420 size_t ret;
e6e7a65a
FW
5421 int err;
5422
5423 ret = cnt;
d9e54076 5424
ee6c2c1b
LZ
5425 if (cnt > MAX_TRACER_SIZE)
5426 cnt = MAX_TRACER_SIZE;
d9e54076 5427
4afe6495 5428 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5429 return -EFAULT;
5430
5431 buf[cnt] = 0;
5432
5433 /* strip ending whitespace. */
5434 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5435 buf[i] = 0;
5436
607e2ea1 5437 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5438 if (err)
5439 return err;
d9e54076 5440
cf8517cf 5441 *ppos += ret;
bc0c38d1 5442
c2931e05 5443 return ret;
bc0c38d1
SR
5444}
5445
5446static ssize_t
6508fa76
SF
5447tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5448 size_t cnt, loff_t *ppos)
bc0c38d1 5449{
bc0c38d1
SR
5450 char buf[64];
5451 int r;
5452
cffae437 5453 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5454 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5455 if (r > sizeof(buf))
5456 r = sizeof(buf);
4bf39a94 5457 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5458}
5459
5460static ssize_t
6508fa76
SF
5461tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5462 size_t cnt, loff_t *ppos)
bc0c38d1 5463{
5e39841c 5464 unsigned long val;
c6caeeb1 5465 int ret;
bc0c38d1 5466
22fe9b54
PH
5467 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5468 if (ret)
c6caeeb1 5469 return ret;
bc0c38d1
SR
5470
5471 *ptr = val * 1000;
5472
5473 return cnt;
5474}
5475
6508fa76
SF
5476static ssize_t
5477tracing_thresh_read(struct file *filp, char __user *ubuf,
5478 size_t cnt, loff_t *ppos)
5479{
5480 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5481}
5482
5483static ssize_t
5484tracing_thresh_write(struct file *filp, const char __user *ubuf,
5485 size_t cnt, loff_t *ppos)
5486{
5487 struct trace_array *tr = filp->private_data;
5488 int ret;
5489
5490 mutex_lock(&trace_types_lock);
5491 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5492 if (ret < 0)
5493 goto out;
5494
5495 if (tr->current_trace->update_thresh) {
5496 ret = tr->current_trace->update_thresh(tr);
5497 if (ret < 0)
5498 goto out;
5499 }
5500
5501 ret = cnt;
5502out:
5503 mutex_unlock(&trace_types_lock);
5504
5505 return ret;
5506}
5507
f971cc9a 5508#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5509
6508fa76
SF
5510static ssize_t
5511tracing_max_lat_read(struct file *filp, char __user *ubuf,
5512 size_t cnt, loff_t *ppos)
5513{
5514 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5515}
5516
5517static ssize_t
5518tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5519 size_t cnt, loff_t *ppos)
5520{
5521 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5522}
5523
e428abbb
CG
5524#endif
5525
b3806b43
SR
5526static int tracing_open_pipe(struct inode *inode, struct file *filp)
5527{
15544209 5528 struct trace_array *tr = inode->i_private;
b3806b43 5529 struct trace_iterator *iter;
b04cc6b1 5530 int ret = 0;
b3806b43
SR
5531
5532 if (tracing_disabled)
5533 return -ENODEV;
5534
7b85af63
SRRH
5535 if (trace_array_get(tr) < 0)
5536 return -ENODEV;
5537
b04cc6b1
FW
5538 mutex_lock(&trace_types_lock);
5539
b3806b43
SR
5540 /* create a buffer to store the information to pass to userspace */
5541 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5542 if (!iter) {
5543 ret = -ENOMEM;
f77d09a3 5544 __trace_array_put(tr);
b04cc6b1
FW
5545 goto out;
5546 }
b3806b43 5547
3a161d99 5548 trace_seq_init(&iter->seq);
d716ff71 5549 iter->trace = tr->current_trace;
d7350c3f 5550
4462344e 5551 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5552 ret = -ENOMEM;
d7350c3f 5553 goto fail;
4462344e
RR
5554 }
5555
a309720c 5556 /* trace pipe does not show start of buffer */
4462344e 5557 cpumask_setall(iter->started);
a309720c 5558
983f938a 5559 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5560 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5561
8be0709f 5562 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5563 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5564 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5565
15544209
ON
5566 iter->tr = tr;
5567 iter->trace_buffer = &tr->trace_buffer;
5568 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5569 mutex_init(&iter->mutex);
b3806b43
SR
5570 filp->private_data = iter;
5571
107bad8b
SR
5572 if (iter->trace->pipe_open)
5573 iter->trace->pipe_open(iter);
107bad8b 5574
b444786f 5575 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5576
5577 tr->current_trace->ref++;
b04cc6b1
FW
5578out:
5579 mutex_unlock(&trace_types_lock);
5580 return ret;
d7350c3f
FW
5581
5582fail:
5583 kfree(iter->trace);
5584 kfree(iter);
7b85af63 5585 __trace_array_put(tr);
d7350c3f
FW
5586 mutex_unlock(&trace_types_lock);
5587 return ret;
b3806b43
SR
5588}
5589
5590static int tracing_release_pipe(struct inode *inode, struct file *file)
5591{
5592 struct trace_iterator *iter = file->private_data;
15544209 5593 struct trace_array *tr = inode->i_private;
b3806b43 5594
b04cc6b1
FW
5595 mutex_lock(&trace_types_lock);
5596
cf6ab6d9
SRRH
5597 tr->current_trace->ref--;
5598
29bf4a5e 5599 if (iter->trace->pipe_close)
c521efd1
SR
5600 iter->trace->pipe_close(iter);
5601
b04cc6b1
FW
5602 mutex_unlock(&trace_types_lock);
5603
4462344e 5604 free_cpumask_var(iter->started);
d7350c3f 5605 mutex_destroy(&iter->mutex);
b3806b43 5606 kfree(iter);
b3806b43 5607
7b85af63
SRRH
5608 trace_array_put(tr);
5609
b3806b43
SR
5610 return 0;
5611}
5612
2a2cc8f7 5613static unsigned int
cc60cdc9 5614trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5615{
983f938a
SRRH
5616 struct trace_array *tr = iter->tr;
5617
15693458
SRRH
5618 /* Iterators are static, they should be filled or empty */
5619 if (trace_buffer_iter(iter, iter->cpu_file))
5620 return POLLIN | POLLRDNORM;
2a2cc8f7 5621
983f938a 5622 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5623 /*
5624 * Always select as readable when in blocking mode
5625 */
5626 return POLLIN | POLLRDNORM;
15693458 5627 else
12883efb 5628 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5629 filp, poll_table);
2a2cc8f7 5630}
2a2cc8f7 5631
cc60cdc9
SR
5632static unsigned int
5633tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5634{
5635 struct trace_iterator *iter = filp->private_data;
5636
5637 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5638}
5639
d716ff71 5640/* Must be called with iter->mutex held. */
ff98781b 5641static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5642{
5643 struct trace_iterator *iter = filp->private_data;
8b8b3683 5644 int ret;
b3806b43 5645
b3806b43 5646 while (trace_empty(iter)) {
2dc8f095 5647
107bad8b 5648 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5649 return -EAGAIN;
107bad8b 5650 }
2dc8f095 5651
b3806b43 5652 /*
250bfd3d 5653 * We block until we read something and tracing is disabled.
b3806b43
SR
5654 * We still block if tracing is disabled, but we have never
5655 * read anything. This allows a user to cat this file, and
5656 * then enable tracing. But after we have read something,
5657 * we give an EOF when tracing is again disabled.
5658 *
5659 * iter->pos will be 0 if we haven't read anything.
5660 */
10246fa3 5661 if (!tracing_is_on() && iter->pos)
b3806b43 5662 break;
f4874261
SRRH
5663
5664 mutex_unlock(&iter->mutex);
5665
e30f53aa 5666 ret = wait_on_pipe(iter, false);
f4874261
SRRH
5667
5668 mutex_lock(&iter->mutex);
5669
8b8b3683
SRRH
5670 if (ret)
5671 return ret;
b3806b43
SR
5672 }
5673
ff98781b
EGM
5674 return 1;
5675}
5676
5677/*
5678 * Consumer reader.
5679 */
5680static ssize_t
5681tracing_read_pipe(struct file *filp, char __user *ubuf,
5682 size_t cnt, loff_t *ppos)
5683{
5684 struct trace_iterator *iter = filp->private_data;
5685 ssize_t sret;
5686
d7350c3f
FW
5687 /*
5688 * Avoid more than one consumer on a single file descriptor
5689 * This is just a matter of traces coherency, the ring buffer itself
5690 * is protected.
5691 */
5692 mutex_lock(&iter->mutex);
1245800c
SRRH
5693
5694 /* return any leftover data */
5695 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5696 if (sret != -EBUSY)
5697 goto out;
5698
5699 trace_seq_init(&iter->seq);
5700
ff98781b
EGM
5701 if (iter->trace->read) {
5702 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5703 if (sret)
5704 goto out;
5705 }
5706
5707waitagain:
5708 sret = tracing_wait_pipe(filp);
5709 if (sret <= 0)
5710 goto out;
5711
b3806b43 5712 /* stop when tracing is finished */
ff98781b
EGM
5713 if (trace_empty(iter)) {
5714 sret = 0;
107bad8b 5715 goto out;
ff98781b 5716 }
b3806b43
SR
5717
5718 if (cnt >= PAGE_SIZE)
5719 cnt = PAGE_SIZE - 1;
5720
53d0aa77 5721 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5722 memset(&iter->seq, 0,
5723 sizeof(struct trace_iterator) -
5724 offsetof(struct trace_iterator, seq));
ed5467da 5725 cpumask_clear(iter->started);
4823ed7e 5726 iter->pos = -1;
b3806b43 5727
4f535968 5728 trace_event_read_lock();
7e53bd42 5729 trace_access_lock(iter->cpu_file);
955b61e5 5730 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5731 enum print_line_t ret;
5ac48378 5732 int save_len = iter->seq.seq.len;
088b1e42 5733
f9896bf3 5734 ret = print_trace_line(iter);
2c4f035f 5735 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5736 /* don't print partial lines */
5ac48378 5737 iter->seq.seq.len = save_len;
b3806b43 5738 break;
088b1e42 5739 }
b91facc3
FW
5740 if (ret != TRACE_TYPE_NO_CONSUME)
5741 trace_consume(iter);
b3806b43 5742
5ac48378 5743 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5744 break;
ee5e51f5
JO
5745
5746 /*
5747 * Setting the full flag means we reached the trace_seq buffer
5748 * size and we should leave by partial output condition above.
5749 * One of the trace_seq_* functions is not used properly.
5750 */
5751 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5752 iter->ent->type);
b3806b43 5753 }
7e53bd42 5754 trace_access_unlock(iter->cpu_file);
4f535968 5755 trace_event_read_unlock();
b3806b43 5756
b3806b43 5757 /* Now copy what we have to the user */
6c6c2796 5758 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5759 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5760 trace_seq_init(&iter->seq);
9ff4b974
PP
5761
5762 /*
25985edc 5763 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5764 * entries, go back to wait for more entries.
5765 */
6c6c2796 5766 if (sret == -EBUSY)
9ff4b974 5767 goto waitagain;
b3806b43 5768
107bad8b 5769out:
d7350c3f 5770 mutex_unlock(&iter->mutex);
107bad8b 5771
6c6c2796 5772 return sret;
b3806b43
SR
5773}
5774
3c56819b
EGM
5775static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5776 unsigned int idx)
5777{
5778 __free_page(spd->pages[idx]);
5779}
5780
28dfef8f 5781static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5782 .can_merge = 0,
34cd4998 5783 .confirm = generic_pipe_buf_confirm,
92fdd98c 5784 .release = generic_pipe_buf_release,
34cd4998
SR
5785 .steal = generic_pipe_buf_steal,
5786 .get = generic_pipe_buf_get,
3c56819b
EGM
5787};
5788
34cd4998 5789static size_t
fa7c7f6e 5790tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5791{
5792 size_t count;
74f06bb7 5793 int save_len;
34cd4998
SR
5794 int ret;
5795
5796 /* Seq buffer is page-sized, exactly what we need. */
5797 for (;;) {
74f06bb7 5798 save_len = iter->seq.seq.len;
34cd4998 5799 ret = print_trace_line(iter);
74f06bb7
SRRH
5800
5801 if (trace_seq_has_overflowed(&iter->seq)) {
5802 iter->seq.seq.len = save_len;
34cd4998
SR
5803 break;
5804 }
74f06bb7
SRRH
5805
5806 /*
5807 * This should not be hit, because it should only
5808 * be set if the iter->seq overflowed. But check it
5809 * anyway to be safe.
5810 */
34cd4998 5811 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5812 iter->seq.seq.len = save_len;
5813 break;
5814 }
5815
5ac48378 5816 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5817 if (rem < count) {
5818 rem = 0;
5819 iter->seq.seq.len = save_len;
34cd4998
SR
5820 break;
5821 }
5822
74e7ff8c
LJ
5823 if (ret != TRACE_TYPE_NO_CONSUME)
5824 trace_consume(iter);
34cd4998 5825 rem -= count;
955b61e5 5826 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5827 rem = 0;
5828 iter->ent = NULL;
5829 break;
5830 }
5831 }
5832
5833 return rem;
5834}
5835
3c56819b
EGM
5836static ssize_t tracing_splice_read_pipe(struct file *filp,
5837 loff_t *ppos,
5838 struct pipe_inode_info *pipe,
5839 size_t len,
5840 unsigned int flags)
5841{
35f3d14d
JA
5842 struct page *pages_def[PIPE_DEF_BUFFERS];
5843 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5844 struct trace_iterator *iter = filp->private_data;
5845 struct splice_pipe_desc spd = {
35f3d14d
JA
5846 .pages = pages_def,
5847 .partial = partial_def,
34cd4998 5848 .nr_pages = 0, /* This gets updated below. */
047fe360 5849 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5850 .ops = &tracing_pipe_buf_ops,
5851 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5852 };
5853 ssize_t ret;
34cd4998 5854 size_t rem;
3c56819b
EGM
5855 unsigned int i;
5856
35f3d14d
JA
5857 if (splice_grow_spd(pipe, &spd))
5858 return -ENOMEM;
5859
d7350c3f 5860 mutex_lock(&iter->mutex);
3c56819b
EGM
5861
5862 if (iter->trace->splice_read) {
5863 ret = iter->trace->splice_read(iter, filp,
5864 ppos, pipe, len, flags);
5865 if (ret)
34cd4998 5866 goto out_err;
3c56819b
EGM
5867 }
5868
5869 ret = tracing_wait_pipe(filp);
5870 if (ret <= 0)
34cd4998 5871 goto out_err;
3c56819b 5872
955b61e5 5873 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5874 ret = -EFAULT;
34cd4998 5875 goto out_err;
3c56819b
EGM
5876 }
5877
4f535968 5878 trace_event_read_lock();
7e53bd42 5879 trace_access_lock(iter->cpu_file);
4f535968 5880
3c56819b 5881 /* Fill as many pages as possible. */
a786c06d 5882 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5883 spd.pages[i] = alloc_page(GFP_KERNEL);
5884 if (!spd.pages[i])
34cd4998 5885 break;
3c56819b 5886
fa7c7f6e 5887 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5888
5889 /* Copy the data into the page, so we can start over. */
5890 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5891 page_address(spd.pages[i]),
5ac48378 5892 trace_seq_used(&iter->seq));
3c56819b 5893 if (ret < 0) {
35f3d14d 5894 __free_page(spd.pages[i]);
3c56819b
EGM
5895 break;
5896 }
35f3d14d 5897 spd.partial[i].offset = 0;
5ac48378 5898 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5899
f9520750 5900 trace_seq_init(&iter->seq);
3c56819b
EGM
5901 }
5902
7e53bd42 5903 trace_access_unlock(iter->cpu_file);
4f535968 5904 trace_event_read_unlock();
d7350c3f 5905 mutex_unlock(&iter->mutex);
3c56819b
EGM
5906
5907 spd.nr_pages = i;
5908
a29054d9
SRRH
5909 if (i)
5910 ret = splice_to_pipe(pipe, &spd);
5911 else
5912 ret = 0;
35f3d14d 5913out:
047fe360 5914 splice_shrink_spd(&spd);
35f3d14d 5915 return ret;
3c56819b 5916
34cd4998 5917out_err:
d7350c3f 5918 mutex_unlock(&iter->mutex);
35f3d14d 5919 goto out;
3c56819b
EGM
5920}
5921
a98a3c3f
SR
5922static ssize_t
5923tracing_entries_read(struct file *filp, char __user *ubuf,
5924 size_t cnt, loff_t *ppos)
5925{
0bc392ee
ON
5926 struct inode *inode = file_inode(filp);
5927 struct trace_array *tr = inode->i_private;
5928 int cpu = tracing_get_cpu(inode);
438ced17
VN
5929 char buf[64];
5930 int r = 0;
5931 ssize_t ret;
a98a3c3f 5932
db526ca3 5933 mutex_lock(&trace_types_lock);
438ced17 5934
0bc392ee 5935 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5936 int cpu, buf_size_same;
5937 unsigned long size;
5938
5939 size = 0;
5940 buf_size_same = 1;
5941 /* check if all cpu sizes are same */
5942 for_each_tracing_cpu(cpu) {
5943 /* fill in the size from first enabled cpu */
5944 if (size == 0)
12883efb
SRRH
5945 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5946 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5947 buf_size_same = 0;
5948 break;
5949 }
5950 }
5951
5952 if (buf_size_same) {
5953 if (!ring_buffer_expanded)
5954 r = sprintf(buf, "%lu (expanded: %lu)\n",
5955 size >> 10,
5956 trace_buf_size >> 10);
5957 else
5958 r = sprintf(buf, "%lu\n", size >> 10);
5959 } else
5960 r = sprintf(buf, "X\n");
5961 } else
0bc392ee 5962 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5963
db526ca3
SR
5964 mutex_unlock(&trace_types_lock);
5965
438ced17
VN
5966 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5967 return ret;
a98a3c3f
SR
5968}
5969
5970static ssize_t
5971tracing_entries_write(struct file *filp, const char __user *ubuf,
5972 size_t cnt, loff_t *ppos)
5973{
0bc392ee
ON
5974 struct inode *inode = file_inode(filp);
5975 struct trace_array *tr = inode->i_private;
a98a3c3f 5976 unsigned long val;
4f271a2a 5977 int ret;
a98a3c3f 5978
22fe9b54
PH
5979 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5980 if (ret)
c6caeeb1 5981 return ret;
a98a3c3f
SR
5982
5983 /* must have at least 1 entry */
5984 if (!val)
5985 return -EINVAL;
5986
1696b2b0
SR
5987 /* value is in KB */
5988 val <<= 10;
0bc392ee 5989 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5990 if (ret < 0)
5991 return ret;
a98a3c3f 5992
cf8517cf 5993 *ppos += cnt;
a98a3c3f 5994
4f271a2a
VN
5995 return cnt;
5996}
bf5e6519 5997
f81ab074
VN
5998static ssize_t
5999tracing_total_entries_read(struct file *filp, char __user *ubuf,
6000 size_t cnt, loff_t *ppos)
6001{
6002 struct trace_array *tr = filp->private_data;
6003 char buf[64];
6004 int r, cpu;
6005 unsigned long size = 0, expanded_size = 0;
6006
6007 mutex_lock(&trace_types_lock);
6008 for_each_tracing_cpu(cpu) {
12883efb 6009 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
6010 if (!ring_buffer_expanded)
6011 expanded_size += trace_buf_size >> 10;
6012 }
6013 if (ring_buffer_expanded)
6014 r = sprintf(buf, "%lu\n", size);
6015 else
6016 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6017 mutex_unlock(&trace_types_lock);
6018
6019 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6020}
6021
4f271a2a
VN
6022static ssize_t
6023tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6024 size_t cnt, loff_t *ppos)
6025{
6026 /*
6027 * There is no need to read what the user has written, this function
6028 * is just to make sure that there is no error when "echo" is used
6029 */
6030
6031 *ppos += cnt;
a98a3c3f
SR
6032
6033 return cnt;
6034}
6035
4f271a2a
VN
6036static int
6037tracing_free_buffer_release(struct inode *inode, struct file *filp)
6038{
2b6080f2
SR
6039 struct trace_array *tr = inode->i_private;
6040
cf30cf67 6041 /* disable tracing ? */
983f938a 6042 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 6043 tracer_tracing_off(tr);
4f271a2a 6044 /* resize the ring buffer to 0 */
2b6080f2 6045 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 6046
7b85af63
SRRH
6047 trace_array_put(tr);
6048
4f271a2a
VN
6049 return 0;
6050}
6051
5bf9a1ee
PP
6052static ssize_t
6053tracing_mark_write(struct file *filp, const char __user *ubuf,
6054 size_t cnt, loff_t *fpos)
6055{
2d71619c 6056 struct trace_array *tr = filp->private_data;
d696b58c
SR
6057 struct ring_buffer_event *event;
6058 struct ring_buffer *buffer;
6059 struct print_entry *entry;
6060 unsigned long irq_flags;
656c7f0d 6061 const char faulted[] = "<faulted>";
d696b58c 6062 ssize_t written;
d696b58c
SR
6063 int size;
6064 int len;
fa32e855 6065
656c7f0d
SRRH
6066/* Used in tracing_mark_raw_write() as well */
6067#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 6068
c76f0694 6069 if (tracing_disabled)
5bf9a1ee
PP
6070 return -EINVAL;
6071
983f938a 6072 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
6073 return -EINVAL;
6074
5bf9a1ee
PP
6075 if (cnt > TRACE_BUF_SIZE)
6076 cnt = TRACE_BUF_SIZE;
6077
d696b58c 6078 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 6079
d696b58c 6080 local_save_flags(irq_flags);
656c7f0d 6081 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 6082
656c7f0d
SRRH
6083 /* If less than "<faulted>", then make sure we can still add that */
6084 if (cnt < FAULTED_SIZE)
6085 size += FAULTED_SIZE - cnt;
d696b58c 6086
2d71619c 6087 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6088 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6089 irq_flags, preempt_count());
656c7f0d 6090 if (unlikely(!event))
d696b58c 6091 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6092 return -EBADF;
d696b58c
SR
6093
6094 entry = ring_buffer_event_data(event);
6095 entry->ip = _THIS_IP_;
6096
656c7f0d
SRRH
6097 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6098 if (len) {
6099 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6100 cnt = FAULTED_SIZE;
6101 written = -EFAULT;
c13d2f7c 6102 } else
656c7f0d
SRRH
6103 written = cnt;
6104 len = cnt;
5bf9a1ee 6105
d696b58c
SR
6106 if (entry->buf[cnt - 1] != '\n') {
6107 entry->buf[cnt] = '\n';
6108 entry->buf[cnt + 1] = '\0';
6109 } else
6110 entry->buf[cnt] = '\0';
6111
7ffbd48d 6112 __buffer_unlock_commit(buffer, event);
5bf9a1ee 6113
656c7f0d
SRRH
6114 if (written > 0)
6115 *fpos += written;
5bf9a1ee 6116
fa32e855
SR
6117 return written;
6118}
6119
6120/* Limit it for now to 3K (including tag) */
6121#define RAW_DATA_MAX_SIZE (1024*3)
6122
6123static ssize_t
6124tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6125 size_t cnt, loff_t *fpos)
6126{
6127 struct trace_array *tr = filp->private_data;
6128 struct ring_buffer_event *event;
6129 struct ring_buffer *buffer;
6130 struct raw_data_entry *entry;
656c7f0d 6131 const char faulted[] = "<faulted>";
fa32e855 6132 unsigned long irq_flags;
fa32e855 6133 ssize_t written;
fa32e855
SR
6134 int size;
6135 int len;
6136
656c7f0d
SRRH
6137#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6138
fa32e855
SR
6139 if (tracing_disabled)
6140 return -EINVAL;
6141
6142 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6143 return -EINVAL;
6144
6145 /* The marker must at least have a tag id */
6146 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6147 return -EINVAL;
6148
6149 if (cnt > TRACE_BUF_SIZE)
6150 cnt = TRACE_BUF_SIZE;
6151
6152 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6153
fa32e855
SR
6154 local_save_flags(irq_flags);
6155 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
6156 if (cnt < FAULT_SIZE_ID)
6157 size += FAULT_SIZE_ID - cnt;
6158
fa32e855 6159 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6160 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6161 irq_flags, preempt_count());
656c7f0d 6162 if (!event)
fa32e855 6163 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6164 return -EBADF;
fa32e855
SR
6165
6166 entry = ring_buffer_event_data(event);
6167
656c7f0d
SRRH
6168 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6169 if (len) {
6170 entry->id = -1;
6171 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6172 written = -EFAULT;
fa32e855 6173 } else
656c7f0d 6174 written = cnt;
fa32e855
SR
6175
6176 __buffer_unlock_commit(buffer, event);
6177
656c7f0d
SRRH
6178 if (written > 0)
6179 *fpos += written;
1aa54bca
MS
6180
6181 return written;
5bf9a1ee
PP
6182}
6183
13f16d20 6184static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 6185{
2b6080f2 6186 struct trace_array *tr = m->private;
5079f326
Z
6187 int i;
6188
6189 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 6190 seq_printf(m,
5079f326 6191 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
6192 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6193 i == tr->clock_id ? "]" : "");
13f16d20 6194 seq_putc(m, '\n');
5079f326 6195
13f16d20 6196 return 0;
5079f326
Z
6197}
6198
e1e232ca 6199static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6200{
5079f326
Z
6201 int i;
6202
5079f326
Z
6203 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6204 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6205 break;
6206 }
6207 if (i == ARRAY_SIZE(trace_clocks))
6208 return -EINVAL;
6209
5079f326
Z
6210 mutex_lock(&trace_types_lock);
6211
2b6080f2
SR
6212 tr->clock_id = i;
6213
12883efb 6214 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6215
60303ed3
DS
6216 /*
6217 * New clock may not be consistent with the previous clock.
6218 * Reset the buffer so that it doesn't have incomparable timestamps.
6219 */
9457158b 6220 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6221
6222#ifdef CONFIG_TRACER_MAX_TRACE
6223 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
6224 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6225 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6226#endif
60303ed3 6227
5079f326
Z
6228 mutex_unlock(&trace_types_lock);
6229
e1e232ca
SR
6230 return 0;
6231}
6232
6233static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6234 size_t cnt, loff_t *fpos)
6235{
6236 struct seq_file *m = filp->private_data;
6237 struct trace_array *tr = m->private;
6238 char buf[64];
6239 const char *clockstr;
6240 int ret;
6241
6242 if (cnt >= sizeof(buf))
6243 return -EINVAL;
6244
4afe6495 6245 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6246 return -EFAULT;
6247
6248 buf[cnt] = 0;
6249
6250 clockstr = strstrip(buf);
6251
6252 ret = tracing_set_clock(tr, clockstr);
6253 if (ret)
6254 return ret;
6255
5079f326
Z
6256 *fpos += cnt;
6257
6258 return cnt;
6259}
6260
13f16d20
LZ
6261static int tracing_clock_open(struct inode *inode, struct file *file)
6262{
7b85af63
SRRH
6263 struct trace_array *tr = inode->i_private;
6264 int ret;
6265
13f16d20
LZ
6266 if (tracing_disabled)
6267 return -ENODEV;
2b6080f2 6268
7b85af63
SRRH
6269 if (trace_array_get(tr))
6270 return -ENODEV;
6271
6272 ret = single_open(file, tracing_clock_show, inode->i_private);
6273 if (ret < 0)
6274 trace_array_put(tr);
6275
6276 return ret;
13f16d20
LZ
6277}
6278
6de58e62
SRRH
6279struct ftrace_buffer_info {
6280 struct trace_iterator iter;
6281 void *spare;
73a757e6 6282 unsigned int spare_cpu;
6de58e62
SRRH
6283 unsigned int read;
6284};
6285
debdd57f
HT
6286#ifdef CONFIG_TRACER_SNAPSHOT
6287static int tracing_snapshot_open(struct inode *inode, struct file *file)
6288{
6484c71c 6289 struct trace_array *tr = inode->i_private;
debdd57f 6290 struct trace_iterator *iter;
2b6080f2 6291 struct seq_file *m;
debdd57f
HT
6292 int ret = 0;
6293
ff451961
SRRH
6294 if (trace_array_get(tr) < 0)
6295 return -ENODEV;
6296
debdd57f 6297 if (file->f_mode & FMODE_READ) {
6484c71c 6298 iter = __tracing_open(inode, file, true);
debdd57f
HT
6299 if (IS_ERR(iter))
6300 ret = PTR_ERR(iter);
2b6080f2
SR
6301 } else {
6302 /* Writes still need the seq_file to hold the private data */
f77d09a3 6303 ret = -ENOMEM;
2b6080f2
SR
6304 m = kzalloc(sizeof(*m), GFP_KERNEL);
6305 if (!m)
f77d09a3 6306 goto out;
2b6080f2
SR
6307 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6308 if (!iter) {
6309 kfree(m);
f77d09a3 6310 goto out;
2b6080f2 6311 }
f77d09a3
AL
6312 ret = 0;
6313
ff451961 6314 iter->tr = tr;
6484c71c
ON
6315 iter->trace_buffer = &tr->max_buffer;
6316 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6317 m->private = iter;
6318 file->private_data = m;
debdd57f 6319 }
f77d09a3 6320out:
ff451961
SRRH
6321 if (ret < 0)
6322 trace_array_put(tr);
6323
debdd57f
HT
6324 return ret;
6325}
6326
6327static ssize_t
6328tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6329 loff_t *ppos)
6330{
2b6080f2
SR
6331 struct seq_file *m = filp->private_data;
6332 struct trace_iterator *iter = m->private;
6333 struct trace_array *tr = iter->tr;
debdd57f
HT
6334 unsigned long val;
6335 int ret;
6336
6337 ret = tracing_update_buffers();
6338 if (ret < 0)
6339 return ret;
6340
6341 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6342 if (ret)
6343 return ret;
6344
6345 mutex_lock(&trace_types_lock);
6346
2b6080f2 6347 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6348 ret = -EBUSY;
6349 goto out;
6350 }
6351
6352 switch (val) {
6353 case 0:
f1affcaa
SRRH
6354 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6355 ret = -EINVAL;
6356 break;
debdd57f 6357 }
3209cff4
SRRH
6358 if (tr->allocated_snapshot)
6359 free_snapshot(tr);
debdd57f
HT
6360 break;
6361 case 1:
f1affcaa
SRRH
6362/* Only allow per-cpu swap if the ring buffer supports it */
6363#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6364 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6365 ret = -EINVAL;
6366 break;
6367 }
6368#endif
45ad21ca 6369 if (!tr->allocated_snapshot) {
3209cff4 6370 ret = alloc_snapshot(tr);
debdd57f
HT
6371 if (ret < 0)
6372 break;
debdd57f 6373 }
debdd57f
HT
6374 local_irq_disable();
6375 /* Now, we're going to swap */
f1affcaa 6376 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6377 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6378 else
ce9bae55 6379 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6380 local_irq_enable();
6381 break;
6382 default:
45ad21ca 6383 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6384 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6385 tracing_reset_online_cpus(&tr->max_buffer);
6386 else
6387 tracing_reset(&tr->max_buffer, iter->cpu_file);
6388 }
debdd57f
HT
6389 break;
6390 }
6391
6392 if (ret >= 0) {
6393 *ppos += cnt;
6394 ret = cnt;
6395 }
6396out:
6397 mutex_unlock(&trace_types_lock);
6398 return ret;
6399}
2b6080f2
SR
6400
6401static int tracing_snapshot_release(struct inode *inode, struct file *file)
6402{
6403 struct seq_file *m = file->private_data;
ff451961
SRRH
6404 int ret;
6405
6406 ret = tracing_release(inode, file);
2b6080f2
SR
6407
6408 if (file->f_mode & FMODE_READ)
ff451961 6409 return ret;
2b6080f2
SR
6410
6411 /* If write only, the seq_file is just a stub */
6412 if (m)
6413 kfree(m->private);
6414 kfree(m);
6415
6416 return 0;
6417}
6418
6de58e62
SRRH
6419static int tracing_buffers_open(struct inode *inode, struct file *filp);
6420static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6421 size_t count, loff_t *ppos);
6422static int tracing_buffers_release(struct inode *inode, struct file *file);
6423static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6424 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6425
6426static int snapshot_raw_open(struct inode *inode, struct file *filp)
6427{
6428 struct ftrace_buffer_info *info;
6429 int ret;
6430
6431 ret = tracing_buffers_open(inode, filp);
6432 if (ret < 0)
6433 return ret;
6434
6435 info = filp->private_data;
6436
6437 if (info->iter.trace->use_max_tr) {
6438 tracing_buffers_release(inode, filp);
6439 return -EBUSY;
6440 }
6441
6442 info->iter.snapshot = true;
6443 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6444
6445 return ret;
6446}
6447
debdd57f
HT
6448#endif /* CONFIG_TRACER_SNAPSHOT */
6449
6450
6508fa76
SF
6451static const struct file_operations tracing_thresh_fops = {
6452 .open = tracing_open_generic,
6453 .read = tracing_thresh_read,
6454 .write = tracing_thresh_write,
6455 .llseek = generic_file_llseek,
6456};
6457
f971cc9a 6458#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6459static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6460 .open = tracing_open_generic,
6461 .read = tracing_max_lat_read,
6462 .write = tracing_max_lat_write,
b444786f 6463 .llseek = generic_file_llseek,
bc0c38d1 6464};
e428abbb 6465#endif
bc0c38d1 6466
5e2336a0 6467static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6468 .open = tracing_open_generic,
6469 .read = tracing_set_trace_read,
6470 .write = tracing_set_trace_write,
b444786f 6471 .llseek = generic_file_llseek,
bc0c38d1
SR
6472};
6473
5e2336a0 6474static const struct file_operations tracing_pipe_fops = {
4bf39a94 6475 .open = tracing_open_pipe,
2a2cc8f7 6476 .poll = tracing_poll_pipe,
4bf39a94 6477 .read = tracing_read_pipe,
3c56819b 6478 .splice_read = tracing_splice_read_pipe,
4bf39a94 6479 .release = tracing_release_pipe,
b444786f 6480 .llseek = no_llseek,
b3806b43
SR
6481};
6482
5e2336a0 6483static const struct file_operations tracing_entries_fops = {
0bc392ee 6484 .open = tracing_open_generic_tr,
a98a3c3f
SR
6485 .read = tracing_entries_read,
6486 .write = tracing_entries_write,
b444786f 6487 .llseek = generic_file_llseek,
0bc392ee 6488 .release = tracing_release_generic_tr,
a98a3c3f
SR
6489};
6490
f81ab074 6491static const struct file_operations tracing_total_entries_fops = {
7b85af63 6492 .open = tracing_open_generic_tr,
f81ab074
VN
6493 .read = tracing_total_entries_read,
6494 .llseek = generic_file_llseek,
7b85af63 6495 .release = tracing_release_generic_tr,
f81ab074
VN
6496};
6497
4f271a2a 6498static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6499 .open = tracing_open_generic_tr,
4f271a2a
VN
6500 .write = tracing_free_buffer_write,
6501 .release = tracing_free_buffer_release,
6502};
6503
5e2336a0 6504static const struct file_operations tracing_mark_fops = {
7b85af63 6505 .open = tracing_open_generic_tr,
5bf9a1ee 6506 .write = tracing_mark_write,
b444786f 6507 .llseek = generic_file_llseek,
7b85af63 6508 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6509};
6510
fa32e855
SR
6511static const struct file_operations tracing_mark_raw_fops = {
6512 .open = tracing_open_generic_tr,
6513 .write = tracing_mark_raw_write,
6514 .llseek = generic_file_llseek,
6515 .release = tracing_release_generic_tr,
6516};
6517
5079f326 6518static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6519 .open = tracing_clock_open,
6520 .read = seq_read,
6521 .llseek = seq_lseek,
7b85af63 6522 .release = tracing_single_release_tr,
5079f326
Z
6523 .write = tracing_clock_write,
6524};
6525
debdd57f
HT
6526#ifdef CONFIG_TRACER_SNAPSHOT
6527static const struct file_operations snapshot_fops = {
6528 .open = tracing_snapshot_open,
6529 .read = seq_read,
6530 .write = tracing_snapshot_write,
098c879e 6531 .llseek = tracing_lseek,
2b6080f2 6532 .release = tracing_snapshot_release,
debdd57f 6533};
debdd57f 6534
6de58e62
SRRH
6535static const struct file_operations snapshot_raw_fops = {
6536 .open = snapshot_raw_open,
6537 .read = tracing_buffers_read,
6538 .release = tracing_buffers_release,
6539 .splice_read = tracing_buffers_splice_read,
6540 .llseek = no_llseek,
2cadf913
SR
6541};
6542
6de58e62
SRRH
6543#endif /* CONFIG_TRACER_SNAPSHOT */
6544
2cadf913
SR
6545static int tracing_buffers_open(struct inode *inode, struct file *filp)
6546{
46ef2be0 6547 struct trace_array *tr = inode->i_private;
2cadf913 6548 struct ftrace_buffer_info *info;
7b85af63 6549 int ret;
2cadf913
SR
6550
6551 if (tracing_disabled)
6552 return -ENODEV;
6553
7b85af63
SRRH
6554 if (trace_array_get(tr) < 0)
6555 return -ENODEV;
6556
2cadf913 6557 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6558 if (!info) {
6559 trace_array_put(tr);
2cadf913 6560 return -ENOMEM;
7b85af63 6561 }
2cadf913 6562
a695cb58
SRRH
6563 mutex_lock(&trace_types_lock);
6564
cc60cdc9 6565 info->iter.tr = tr;
46ef2be0 6566 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6567 info->iter.trace = tr->current_trace;
12883efb 6568 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6569 info->spare = NULL;
2cadf913 6570 /* Force reading ring buffer for first read */
cc60cdc9 6571 info->read = (unsigned int)-1;
2cadf913
SR
6572
6573 filp->private_data = info;
6574
cf6ab6d9
SRRH
6575 tr->current_trace->ref++;
6576
a695cb58
SRRH
6577 mutex_unlock(&trace_types_lock);
6578
7b85af63
SRRH
6579 ret = nonseekable_open(inode, filp);
6580 if (ret < 0)
6581 trace_array_put(tr);
6582
6583 return ret;
2cadf913
SR
6584}
6585
cc60cdc9
SR
6586static unsigned int
6587tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6588{
6589 struct ftrace_buffer_info *info = filp->private_data;
6590 struct trace_iterator *iter = &info->iter;
6591
6592 return trace_poll(iter, filp, poll_table);
6593}
6594
2cadf913
SR
6595static ssize_t
6596tracing_buffers_read(struct file *filp, char __user *ubuf,
6597 size_t count, loff_t *ppos)
6598{
6599 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6600 struct trace_iterator *iter = &info->iter;
2cadf913 6601 ssize_t ret;
6de58e62 6602 ssize_t size;
2cadf913 6603
2dc5d12b
SR
6604 if (!count)
6605 return 0;
6606
6de58e62 6607#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6608 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6609 return -EBUSY;
6de58e62
SRRH
6610#endif
6611
73a757e6 6612 if (!info->spare) {
12883efb
SRRH
6613 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6614 iter->cpu_file);
73a757e6
SRV
6615 info->spare_cpu = iter->cpu_file;
6616 }
ddd538f3 6617 if (!info->spare)
d716ff71 6618 return -ENOMEM;
ddd538f3 6619
2cadf913
SR
6620 /* Do we have previous read data to read? */
6621 if (info->read < PAGE_SIZE)
6622 goto read;
6623
b627344f 6624 again:
cc60cdc9 6625 trace_access_lock(iter->cpu_file);
12883efb 6626 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6627 &info->spare,
6628 count,
cc60cdc9
SR
6629 iter->cpu_file, 0);
6630 trace_access_unlock(iter->cpu_file);
2cadf913 6631
b627344f
SR
6632 if (ret < 0) {
6633 if (trace_empty(iter)) {
d716ff71
SRRH
6634 if ((filp->f_flags & O_NONBLOCK))
6635 return -EAGAIN;
6636
e30f53aa 6637 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
6638 if (ret)
6639 return ret;
6640
b627344f
SR
6641 goto again;
6642 }
d716ff71 6643 return 0;
b627344f 6644 }
436fc280 6645
436fc280 6646 info->read = 0;
b627344f 6647 read:
2cadf913
SR
6648 size = PAGE_SIZE - info->read;
6649 if (size > count)
6650 size = count;
6651
6652 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6653 if (ret == size)
6654 return -EFAULT;
6655
2dc5d12b
SR
6656 size -= ret;
6657
2cadf913
SR
6658 *ppos += size;
6659 info->read += size;
6660
6661 return size;
6662}
6663
6664static int tracing_buffers_release(struct inode *inode, struct file *file)
6665{
6666 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6667 struct trace_iterator *iter = &info->iter;
2cadf913 6668
a695cb58
SRRH
6669 mutex_lock(&trace_types_lock);
6670
cf6ab6d9
SRRH
6671 iter->tr->current_trace->ref--;
6672
ff451961 6673 __trace_array_put(iter->tr);
2cadf913 6674
ddd538f3 6675 if (info->spare)
73a757e6
SRV
6676 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6677 info->spare_cpu, info->spare);
2cadf913
SR
6678 kfree(info);
6679
a695cb58
SRRH
6680 mutex_unlock(&trace_types_lock);
6681
2cadf913
SR
6682 return 0;
6683}
6684
6685struct buffer_ref {
6686 struct ring_buffer *buffer;
6687 void *page;
73a757e6 6688 int cpu;
2cadf913
SR
6689 int ref;
6690};
6691
6692static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6693 struct pipe_buffer *buf)
6694{
6695 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6696
6697 if (--ref->ref)
6698 return;
6699
73a757e6 6700 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6701 kfree(ref);
6702 buf->private = 0;
6703}
6704
2cadf913
SR
6705static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6706 struct pipe_buffer *buf)
6707{
6708 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6709
6710 ref->ref++;
6711}
6712
6713/* Pipe buffer operations for a buffer. */
28dfef8f 6714static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6715 .can_merge = 0,
2cadf913
SR
6716 .confirm = generic_pipe_buf_confirm,
6717 .release = buffer_pipe_buf_release,
d55cb6cf 6718 .steal = generic_pipe_buf_steal,
2cadf913
SR
6719 .get = buffer_pipe_buf_get,
6720};
6721
6722/*
6723 * Callback from splice_to_pipe(), if we need to release some pages
6724 * at the end of the spd in case we error'ed out in filling the pipe.
6725 */
6726static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6727{
6728 struct buffer_ref *ref =
6729 (struct buffer_ref *)spd->partial[i].private;
6730
6731 if (--ref->ref)
6732 return;
6733
73a757e6 6734 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6735 kfree(ref);
6736 spd->partial[i].private = 0;
6737}
6738
6739static ssize_t
6740tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6741 struct pipe_inode_info *pipe, size_t len,
6742 unsigned int flags)
6743{
6744 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6745 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6746 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6747 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6748 struct splice_pipe_desc spd = {
35f3d14d
JA
6749 .pages = pages_def,
6750 .partial = partial_def,
047fe360 6751 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6752 .ops = &buffer_pipe_buf_ops,
6753 .spd_release = buffer_spd_release,
6754 };
6755 struct buffer_ref *ref;
93459c6c 6756 int entries, size, i;
07906da7 6757 ssize_t ret = 0;
2cadf913 6758
6de58e62 6759#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6760 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6761 return -EBUSY;
6de58e62
SRRH
6762#endif
6763
d716ff71
SRRH
6764 if (*ppos & (PAGE_SIZE - 1))
6765 return -EINVAL;
93cfb3c9
LJ
6766
6767 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6768 if (len < PAGE_SIZE)
6769 return -EINVAL;
93cfb3c9
LJ
6770 len &= PAGE_MASK;
6771 }
6772
1ae2293d
AV
6773 if (splice_grow_spd(pipe, &spd))
6774 return -ENOMEM;
6775
cc60cdc9
SR
6776 again:
6777 trace_access_lock(iter->cpu_file);
12883efb 6778 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6779
a786c06d 6780 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6781 struct page *page;
6782 int r;
6783
6784 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6785 if (!ref) {
6786 ret = -ENOMEM;
2cadf913 6787 break;
07906da7 6788 }
2cadf913 6789
7267fa68 6790 ref->ref = 1;
12883efb 6791 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6792 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 6793 if (!ref->page) {
07906da7 6794 ret = -ENOMEM;
2cadf913
SR
6795 kfree(ref);
6796 break;
6797 }
73a757e6 6798 ref->cpu = iter->cpu_file;
2cadf913
SR
6799
6800 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6801 len, iter->cpu_file, 1);
2cadf913 6802 if (r < 0) {
73a757e6
SRV
6803 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6804 ref->page);
2cadf913
SR
6805 kfree(ref);
6806 break;
6807 }
6808
6809 /*
6810 * zero out any left over data, this is going to
6811 * user land.
6812 */
6813 size = ring_buffer_page_len(ref->page);
6814 if (size < PAGE_SIZE)
6815 memset(ref->page + size, 0, PAGE_SIZE - size);
6816
6817 page = virt_to_page(ref->page);
6818
6819 spd.pages[i] = page;
6820 spd.partial[i].len = PAGE_SIZE;
6821 spd.partial[i].offset = 0;
6822 spd.partial[i].private = (unsigned long)ref;
6823 spd.nr_pages++;
93cfb3c9 6824 *ppos += PAGE_SIZE;
93459c6c 6825
12883efb 6826 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6827 }
6828
cc60cdc9 6829 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6830 spd.nr_pages = i;
6831
6832 /* did we read anything? */
6833 if (!spd.nr_pages) {
07906da7 6834 if (ret)
1ae2293d 6835 goto out;
d716ff71 6836
1ae2293d 6837 ret = -EAGAIN;
d716ff71 6838 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6839 goto out;
07906da7 6840
e30f53aa 6841 ret = wait_on_pipe(iter, true);
8b8b3683 6842 if (ret)
1ae2293d 6843 goto out;
e30f53aa 6844
cc60cdc9 6845 goto again;
2cadf913
SR
6846 }
6847
6848 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6849out:
047fe360 6850 splice_shrink_spd(&spd);
6de58e62 6851
2cadf913
SR
6852 return ret;
6853}
6854
6855static const struct file_operations tracing_buffers_fops = {
6856 .open = tracing_buffers_open,
6857 .read = tracing_buffers_read,
cc60cdc9 6858 .poll = tracing_buffers_poll,
2cadf913
SR
6859 .release = tracing_buffers_release,
6860 .splice_read = tracing_buffers_splice_read,
6861 .llseek = no_llseek,
6862};
6863
c8d77183
SR
6864static ssize_t
6865tracing_stats_read(struct file *filp, char __user *ubuf,
6866 size_t count, loff_t *ppos)
6867{
4d3435b8
ON
6868 struct inode *inode = file_inode(filp);
6869 struct trace_array *tr = inode->i_private;
12883efb 6870 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6871 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6872 struct trace_seq *s;
6873 unsigned long cnt;
c64e148a
VN
6874 unsigned long long t;
6875 unsigned long usec_rem;
c8d77183 6876
e4f2d10f 6877 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 6878 if (!s)
a646365c 6879 return -ENOMEM;
c8d77183
SR
6880
6881 trace_seq_init(s);
6882
12883efb 6883 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6884 trace_seq_printf(s, "entries: %ld\n", cnt);
6885
12883efb 6886 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6887 trace_seq_printf(s, "overrun: %ld\n", cnt);
6888
12883efb 6889 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6890 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6891
12883efb 6892 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
6893 trace_seq_printf(s, "bytes: %ld\n", cnt);
6894
58e8eedf 6895 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 6896 /* local or global for trace_clock */
12883efb 6897 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
6898 usec_rem = do_div(t, USEC_PER_SEC);
6899 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6900 t, usec_rem);
6901
12883efb 6902 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
6903 usec_rem = do_div(t, USEC_PER_SEC);
6904 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6905 } else {
6906 /* counter or tsc mode for trace_clock */
6907 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 6908 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 6909
11043d8b 6910 trace_seq_printf(s, "now ts: %llu\n",
12883efb 6911 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 6912 }
c64e148a 6913
12883efb 6914 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
6915 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6916
12883efb 6917 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
6918 trace_seq_printf(s, "read events: %ld\n", cnt);
6919
5ac48378
SRRH
6920 count = simple_read_from_buffer(ubuf, count, ppos,
6921 s->buffer, trace_seq_used(s));
c8d77183
SR
6922
6923 kfree(s);
6924
6925 return count;
6926}
6927
6928static const struct file_operations tracing_stats_fops = {
4d3435b8 6929 .open = tracing_open_generic_tr,
c8d77183 6930 .read = tracing_stats_read,
b444786f 6931 .llseek = generic_file_llseek,
4d3435b8 6932 .release = tracing_release_generic_tr,
c8d77183
SR
6933};
6934
bc0c38d1
SR
6935#ifdef CONFIG_DYNAMIC_FTRACE
6936
6937static ssize_t
b807c3d0 6938tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
6939 size_t cnt, loff_t *ppos)
6940{
6941 unsigned long *p = filp->private_data;
6a9c981b 6942 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
6943 int r;
6944
6a9c981b 6945 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
6946 buf[r++] = '\n';
6947
6a9c981b 6948 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
6949}
6950
5e2336a0 6951static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 6952 .open = tracing_open_generic,
b807c3d0 6953 .read = tracing_read_dyn_info,
b444786f 6954 .llseek = generic_file_llseek,
bc0c38d1 6955};
77fd5c15 6956#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 6957
77fd5c15
SRRH
6958#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6959static void
bca6c8d0 6960ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6961 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6962 void *data)
77fd5c15 6963{
cab50379 6964 tracing_snapshot_instance(tr);
77fd5c15 6965}
bc0c38d1 6966
77fd5c15 6967static void
bca6c8d0 6968ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6969 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6970 void *data)
bc0c38d1 6971{
6e444319 6972 struct ftrace_func_mapper *mapper = data;
1a93f8bd 6973 long *count = NULL;
77fd5c15 6974
1a93f8bd
SRV
6975 if (mapper)
6976 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6977
6978 if (count) {
6979
6980 if (*count <= 0)
6981 return;
bc0c38d1 6982
77fd5c15 6983 (*count)--;
1a93f8bd 6984 }
77fd5c15 6985
cab50379 6986 tracing_snapshot_instance(tr);
77fd5c15
SRRH
6987}
6988
6989static int
6990ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6991 struct ftrace_probe_ops *ops, void *data)
6992{
6e444319 6993 struct ftrace_func_mapper *mapper = data;
1a93f8bd 6994 long *count = NULL;
77fd5c15
SRRH
6995
6996 seq_printf(m, "%ps:", (void *)ip);
6997
fa6f0cc7 6998 seq_puts(m, "snapshot");
77fd5c15 6999
1a93f8bd
SRV
7000 if (mapper)
7001 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7002
7003 if (count)
7004 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 7005 else
1a93f8bd 7006 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
7007
7008 return 0;
7009}
7010
1a93f8bd 7011static int
b5f081b5 7012ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7013 unsigned long ip, void *init_data, void **data)
1a93f8bd 7014{
6e444319
SRV
7015 struct ftrace_func_mapper *mapper = *data;
7016
7017 if (!mapper) {
7018 mapper = allocate_ftrace_func_mapper();
7019 if (!mapper)
7020 return -ENOMEM;
7021 *data = mapper;
7022 }
1a93f8bd 7023
6e444319 7024 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
7025}
7026
7027static void
b5f081b5 7028ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7029 unsigned long ip, void *data)
1a93f8bd 7030{
6e444319
SRV
7031 struct ftrace_func_mapper *mapper = data;
7032
7033 if (!ip) {
7034 if (!mapper)
7035 return;
7036 free_ftrace_func_mapper(mapper, NULL);
7037 return;
7038 }
1a93f8bd
SRV
7039
7040 ftrace_func_mapper_remove_ip(mapper, ip);
7041}
7042
77fd5c15
SRRH
7043static struct ftrace_probe_ops snapshot_probe_ops = {
7044 .func = ftrace_snapshot,
7045 .print = ftrace_snapshot_print,
7046};
7047
7048static struct ftrace_probe_ops snapshot_count_probe_ops = {
7049 .func = ftrace_count_snapshot,
7050 .print = ftrace_snapshot_print,
1a93f8bd
SRV
7051 .init = ftrace_snapshot_init,
7052 .free = ftrace_snapshot_free,
77fd5c15
SRRH
7053};
7054
7055static int
04ec7bb6 7056ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
7057 char *glob, char *cmd, char *param, int enable)
7058{
7059 struct ftrace_probe_ops *ops;
7060 void *count = (void *)-1;
7061 char *number;
7062 int ret;
7063
0f179765
SRV
7064 if (!tr)
7065 return -ENODEV;
7066
77fd5c15
SRRH
7067 /* hash funcs only work with set_ftrace_filter */
7068 if (!enable)
7069 return -EINVAL;
7070
7071 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7072
d3d532d7 7073 if (glob[0] == '!')
7b60f3d8 7074 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
7075
7076 if (!param)
7077 goto out_reg;
7078
7079 number = strsep(&param, ":");
7080
7081 if (!strlen(number))
7082 goto out_reg;
7083
7084 /*
7085 * We use the callback data field (which is a pointer)
7086 * as our counter.
7087 */
7088 ret = kstrtoul(number, 0, (unsigned long *)&count);
7089 if (ret)
7090 return ret;
7091
7092 out_reg:
4c174688 7093 ret = alloc_snapshot(tr);
df62db5b
SRV
7094 if (ret < 0)
7095 goto out;
77fd5c15 7096
4c174688 7097 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 7098
df62db5b 7099 out:
77fd5c15
SRRH
7100 return ret < 0 ? ret : 0;
7101}
7102
7103static struct ftrace_func_command ftrace_snapshot_cmd = {
7104 .name = "snapshot",
7105 .func = ftrace_trace_snapshot_callback,
7106};
7107
38de93ab 7108static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
7109{
7110 return register_ftrace_command(&ftrace_snapshot_cmd);
7111}
7112#else
38de93ab 7113static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 7114#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 7115
7eeafbca 7116static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 7117{
8434dc93
SRRH
7118 if (WARN_ON(!tr->dir))
7119 return ERR_PTR(-ENODEV);
7120
7121 /* Top directory uses NULL as the parent */
7122 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7123 return NULL;
7124
7125 /* All sub buffers have a descriptor */
2b6080f2 7126 return tr->dir;
bc0c38d1
SR
7127}
7128
2b6080f2 7129static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 7130{
b04cc6b1
FW
7131 struct dentry *d_tracer;
7132
2b6080f2
SR
7133 if (tr->percpu_dir)
7134 return tr->percpu_dir;
b04cc6b1 7135
7eeafbca 7136 d_tracer = tracing_get_dentry(tr);
14a5ae40 7137 if (IS_ERR(d_tracer))
b04cc6b1
FW
7138 return NULL;
7139
8434dc93 7140 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 7141
2b6080f2 7142 WARN_ONCE(!tr->percpu_dir,
8434dc93 7143 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 7144
2b6080f2 7145 return tr->percpu_dir;
b04cc6b1
FW
7146}
7147
649e9c70
ON
7148static struct dentry *
7149trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7150 void *data, long cpu, const struct file_operations *fops)
7151{
7152 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7153
7154 if (ret) /* See tracing_get_cpu() */
7682c918 7155 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
7156 return ret;
7157}
7158
2b6080f2 7159static void
8434dc93 7160tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 7161{
2b6080f2 7162 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 7163 struct dentry *d_cpu;
dd49a38c 7164 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 7165
0a3d7ce7
NK
7166 if (!d_percpu)
7167 return;
7168
dd49a38c 7169 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 7170 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 7171 if (!d_cpu) {
a395d6a7 7172 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
7173 return;
7174 }
b04cc6b1 7175
8656e7a2 7176 /* per cpu trace_pipe */
649e9c70 7177 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 7178 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
7179
7180 /* per cpu trace */
649e9c70 7181 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 7182 tr, cpu, &tracing_fops);
7f96f93f 7183
649e9c70 7184 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 7185 tr, cpu, &tracing_buffers_fops);
7f96f93f 7186
649e9c70 7187 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 7188 tr, cpu, &tracing_stats_fops);
438ced17 7189
649e9c70 7190 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 7191 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
7192
7193#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 7194 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 7195 tr, cpu, &snapshot_fops);
6de58e62 7196
649e9c70 7197 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 7198 tr, cpu, &snapshot_raw_fops);
f1affcaa 7199#endif
b04cc6b1
FW
7200}
7201
60a11774
SR
7202#ifdef CONFIG_FTRACE_SELFTEST
7203/* Let selftest have access to static functions in this file */
7204#include "trace_selftest.c"
7205#endif
7206
577b785f
SR
7207static ssize_t
7208trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7209 loff_t *ppos)
7210{
7211 struct trace_option_dentry *topt = filp->private_data;
7212 char *buf;
7213
7214 if (topt->flags->val & topt->opt->bit)
7215 buf = "1\n";
7216 else
7217 buf = "0\n";
7218
7219 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7220}
7221
7222static ssize_t
7223trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7224 loff_t *ppos)
7225{
7226 struct trace_option_dentry *topt = filp->private_data;
7227 unsigned long val;
577b785f
SR
7228 int ret;
7229
22fe9b54
PH
7230 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7231 if (ret)
577b785f
SR
7232 return ret;
7233
8d18eaaf
LZ
7234 if (val != 0 && val != 1)
7235 return -EINVAL;
577b785f 7236
8d18eaaf 7237 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7238 mutex_lock(&trace_types_lock);
8c1a49ae 7239 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7240 topt->opt, !val);
577b785f
SR
7241 mutex_unlock(&trace_types_lock);
7242 if (ret)
7243 return ret;
577b785f
SR
7244 }
7245
7246 *ppos += cnt;
7247
7248 return cnt;
7249}
7250
7251
7252static const struct file_operations trace_options_fops = {
7253 .open = tracing_open_generic,
7254 .read = trace_options_read,
7255 .write = trace_options_write,
b444786f 7256 .llseek = generic_file_llseek,
577b785f
SR
7257};
7258
9a38a885
SRRH
7259/*
7260 * In order to pass in both the trace_array descriptor as well as the index
7261 * to the flag that the trace option file represents, the trace_array
7262 * has a character array of trace_flags_index[], which holds the index
7263 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7264 * The address of this character array is passed to the flag option file
7265 * read/write callbacks.
7266 *
7267 * In order to extract both the index and the trace_array descriptor,
7268 * get_tr_index() uses the following algorithm.
7269 *
7270 * idx = *ptr;
7271 *
7272 * As the pointer itself contains the address of the index (remember
7273 * index[1] == 1).
7274 *
7275 * Then to get the trace_array descriptor, by subtracting that index
7276 * from the ptr, we get to the start of the index itself.
7277 *
7278 * ptr - idx == &index[0]
7279 *
7280 * Then a simple container_of() from that pointer gets us to the
7281 * trace_array descriptor.
7282 */
7283static void get_tr_index(void *data, struct trace_array **ptr,
7284 unsigned int *pindex)
7285{
7286 *pindex = *(unsigned char *)data;
7287
7288 *ptr = container_of(data - *pindex, struct trace_array,
7289 trace_flags_index);
7290}
7291
a8259075
SR
7292static ssize_t
7293trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7294 loff_t *ppos)
7295{
9a38a885
SRRH
7296 void *tr_index = filp->private_data;
7297 struct trace_array *tr;
7298 unsigned int index;
a8259075
SR
7299 char *buf;
7300
9a38a885
SRRH
7301 get_tr_index(tr_index, &tr, &index);
7302
7303 if (tr->trace_flags & (1 << index))
a8259075
SR
7304 buf = "1\n";
7305 else
7306 buf = "0\n";
7307
7308 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7309}
7310
7311static ssize_t
7312trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7313 loff_t *ppos)
7314{
9a38a885
SRRH
7315 void *tr_index = filp->private_data;
7316 struct trace_array *tr;
7317 unsigned int index;
a8259075
SR
7318 unsigned long val;
7319 int ret;
7320
9a38a885
SRRH
7321 get_tr_index(tr_index, &tr, &index);
7322
22fe9b54
PH
7323 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7324 if (ret)
a8259075
SR
7325 return ret;
7326
f2d84b65 7327 if (val != 0 && val != 1)
a8259075 7328 return -EINVAL;
69d34da2
SRRH
7329
7330 mutex_lock(&trace_types_lock);
2b6080f2 7331 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7332 mutex_unlock(&trace_types_lock);
a8259075 7333
613f04a0
SRRH
7334 if (ret < 0)
7335 return ret;
7336
a8259075
SR
7337 *ppos += cnt;
7338
7339 return cnt;
7340}
7341
a8259075
SR
7342static const struct file_operations trace_options_core_fops = {
7343 .open = tracing_open_generic,
7344 .read = trace_options_core_read,
7345 .write = trace_options_core_write,
b444786f 7346 .llseek = generic_file_llseek,
a8259075
SR
7347};
7348
5452af66 7349struct dentry *trace_create_file(const char *name,
f4ae40a6 7350 umode_t mode,
5452af66
FW
7351 struct dentry *parent,
7352 void *data,
7353 const struct file_operations *fops)
7354{
7355 struct dentry *ret;
7356
8434dc93 7357 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7358 if (!ret)
a395d6a7 7359 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7360
7361 return ret;
7362}
7363
7364
2b6080f2 7365static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7366{
7367 struct dentry *d_tracer;
a8259075 7368
2b6080f2
SR
7369 if (tr->options)
7370 return tr->options;
a8259075 7371
7eeafbca 7372 d_tracer = tracing_get_dentry(tr);
14a5ae40 7373 if (IS_ERR(d_tracer))
a8259075
SR
7374 return NULL;
7375
8434dc93 7376 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7377 if (!tr->options) {
a395d6a7 7378 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7379 return NULL;
7380 }
7381
2b6080f2 7382 return tr->options;
a8259075
SR
7383}
7384
577b785f 7385static void
2b6080f2
SR
7386create_trace_option_file(struct trace_array *tr,
7387 struct trace_option_dentry *topt,
577b785f
SR
7388 struct tracer_flags *flags,
7389 struct tracer_opt *opt)
7390{
7391 struct dentry *t_options;
577b785f 7392
2b6080f2 7393 t_options = trace_options_init_dentry(tr);
577b785f
SR
7394 if (!t_options)
7395 return;
7396
7397 topt->flags = flags;
7398 topt->opt = opt;
2b6080f2 7399 topt->tr = tr;
577b785f 7400
5452af66 7401 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7402 &trace_options_fops);
7403
577b785f
SR
7404}
7405
37aea98b 7406static void
2b6080f2 7407create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7408{
7409 struct trace_option_dentry *topts;
37aea98b 7410 struct trace_options *tr_topts;
577b785f
SR
7411 struct tracer_flags *flags;
7412 struct tracer_opt *opts;
7413 int cnt;
37aea98b 7414 int i;
577b785f
SR
7415
7416 if (!tracer)
37aea98b 7417 return;
577b785f
SR
7418
7419 flags = tracer->flags;
7420
7421 if (!flags || !flags->opts)
37aea98b
SRRH
7422 return;
7423
7424 /*
7425 * If this is an instance, only create flags for tracers
7426 * the instance may have.
7427 */
7428 if (!trace_ok_for_array(tracer, tr))
7429 return;
7430
7431 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7432 /* Make sure there's no duplicate flags. */
7433 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7434 return;
7435 }
577b785f
SR
7436
7437 opts = flags->opts;
7438
7439 for (cnt = 0; opts[cnt].name; cnt++)
7440 ;
7441
0cfe8245 7442 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7443 if (!topts)
37aea98b
SRRH
7444 return;
7445
7446 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7447 GFP_KERNEL);
7448 if (!tr_topts) {
7449 kfree(topts);
7450 return;
7451 }
7452
7453 tr->topts = tr_topts;
7454 tr->topts[tr->nr_topts].tracer = tracer;
7455 tr->topts[tr->nr_topts].topts = topts;
7456 tr->nr_topts++;
577b785f 7457
41d9c0be 7458 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7459 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7460 &opts[cnt]);
41d9c0be
SRRH
7461 WARN_ONCE(topts[cnt].entry == NULL,
7462 "Failed to create trace option: %s",
7463 opts[cnt].name);
7464 }
577b785f
SR
7465}
7466
a8259075 7467static struct dentry *
2b6080f2
SR
7468create_trace_option_core_file(struct trace_array *tr,
7469 const char *option, long index)
a8259075
SR
7470{
7471 struct dentry *t_options;
a8259075 7472
2b6080f2 7473 t_options = trace_options_init_dentry(tr);
a8259075
SR
7474 if (!t_options)
7475 return NULL;
7476
9a38a885
SRRH
7477 return trace_create_file(option, 0644, t_options,
7478 (void *)&tr->trace_flags_index[index],
7479 &trace_options_core_fops);
a8259075
SR
7480}
7481
16270145 7482static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7483{
7484 struct dentry *t_options;
16270145 7485 bool top_level = tr == &global_trace;
a8259075
SR
7486 int i;
7487
2b6080f2 7488 t_options = trace_options_init_dentry(tr);
a8259075
SR
7489 if (!t_options)
7490 return;
7491
16270145
SRRH
7492 for (i = 0; trace_options[i]; i++) {
7493 if (top_level ||
7494 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7495 create_trace_option_core_file(tr, trace_options[i], i);
7496 }
a8259075
SR
7497}
7498
499e5470
SR
7499static ssize_t
7500rb_simple_read(struct file *filp, char __user *ubuf,
7501 size_t cnt, loff_t *ppos)
7502{
348f0fc2 7503 struct trace_array *tr = filp->private_data;
499e5470
SR
7504 char buf[64];
7505 int r;
7506
10246fa3 7507 r = tracer_tracing_is_on(tr);
499e5470
SR
7508 r = sprintf(buf, "%d\n", r);
7509
7510 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7511}
7512
7513static ssize_t
7514rb_simple_write(struct file *filp, const char __user *ubuf,
7515 size_t cnt, loff_t *ppos)
7516{
348f0fc2 7517 struct trace_array *tr = filp->private_data;
12883efb 7518 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7519 unsigned long val;
7520 int ret;
7521
7522 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7523 if (ret)
7524 return ret;
7525
7526 if (buffer) {
2df8f8a6
SR
7527 mutex_lock(&trace_types_lock);
7528 if (val) {
10246fa3 7529 tracer_tracing_on(tr);
2b6080f2
SR
7530 if (tr->current_trace->start)
7531 tr->current_trace->start(tr);
2df8f8a6 7532 } else {
10246fa3 7533 tracer_tracing_off(tr);
2b6080f2
SR
7534 if (tr->current_trace->stop)
7535 tr->current_trace->stop(tr);
2df8f8a6
SR
7536 }
7537 mutex_unlock(&trace_types_lock);
499e5470
SR
7538 }
7539
7540 (*ppos)++;
7541
7542 return cnt;
7543}
7544
7545static const struct file_operations rb_simple_fops = {
7b85af63 7546 .open = tracing_open_generic_tr,
499e5470
SR
7547 .read = rb_simple_read,
7548 .write = rb_simple_write,
7b85af63 7549 .release = tracing_release_generic_tr,
499e5470
SR
7550 .llseek = default_llseek,
7551};
7552
277ba044
SR
7553struct dentry *trace_instance_dir;
7554
7555static void
8434dc93 7556init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7557
55034cd6
SRRH
7558static int
7559allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7560{
7561 enum ring_buffer_flags rb_flags;
737223fb 7562
983f938a 7563 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7564
dced341b
SRRH
7565 buf->tr = tr;
7566
55034cd6
SRRH
7567 buf->buffer = ring_buffer_alloc(size, rb_flags);
7568 if (!buf->buffer)
7569 return -ENOMEM;
737223fb 7570
55034cd6
SRRH
7571 buf->data = alloc_percpu(struct trace_array_cpu);
7572 if (!buf->data) {
7573 ring_buffer_free(buf->buffer);
7574 return -ENOMEM;
7575 }
737223fb 7576
737223fb
SRRH
7577 /* Allocate the first page for all buffers */
7578 set_buffer_entries(&tr->trace_buffer,
7579 ring_buffer_size(tr->trace_buffer.buffer, 0));
7580
55034cd6
SRRH
7581 return 0;
7582}
737223fb 7583
55034cd6
SRRH
7584static int allocate_trace_buffers(struct trace_array *tr, int size)
7585{
7586 int ret;
737223fb 7587
55034cd6
SRRH
7588 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7589 if (ret)
7590 return ret;
737223fb 7591
55034cd6
SRRH
7592#ifdef CONFIG_TRACER_MAX_TRACE
7593 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7594 allocate_snapshot ? size : 1);
7595 if (WARN_ON(ret)) {
737223fb 7596 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
7597 free_percpu(tr->trace_buffer.data);
7598 return -ENOMEM;
7599 }
7600 tr->allocated_snapshot = allocate_snapshot;
737223fb 7601
55034cd6
SRRH
7602 /*
7603 * Only the top level trace array gets its snapshot allocated
7604 * from the kernel command line.
7605 */
7606 allocate_snapshot = false;
737223fb 7607#endif
55034cd6 7608 return 0;
737223fb
SRRH
7609}
7610
f0b70cc4
SRRH
7611static void free_trace_buffer(struct trace_buffer *buf)
7612{
7613 if (buf->buffer) {
7614 ring_buffer_free(buf->buffer);
7615 buf->buffer = NULL;
7616 free_percpu(buf->data);
7617 buf->data = NULL;
7618 }
7619}
7620
23aaa3c1
SRRH
7621static void free_trace_buffers(struct trace_array *tr)
7622{
7623 if (!tr)
7624 return;
7625
f0b70cc4 7626 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7627
7628#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7629 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7630#endif
7631}
7632
9a38a885
SRRH
7633static void init_trace_flags_index(struct trace_array *tr)
7634{
7635 int i;
7636
7637 /* Used by the trace options files */
7638 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7639 tr->trace_flags_index[i] = i;
7640}
7641
37aea98b
SRRH
7642static void __update_tracer_options(struct trace_array *tr)
7643{
7644 struct tracer *t;
7645
7646 for (t = trace_types; t; t = t->next)
7647 add_tracer_options(tr, t);
7648}
7649
7650static void update_tracer_options(struct trace_array *tr)
7651{
7652 mutex_lock(&trace_types_lock);
7653 __update_tracer_options(tr);
7654 mutex_unlock(&trace_types_lock);
7655}
7656
eae47358 7657static int instance_mkdir(const char *name)
737223fb 7658{
277ba044
SR
7659 struct trace_array *tr;
7660 int ret;
277ba044
SR
7661
7662 mutex_lock(&trace_types_lock);
7663
7664 ret = -EEXIST;
7665 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7666 if (tr->name && strcmp(tr->name, name) == 0)
7667 goto out_unlock;
7668 }
7669
7670 ret = -ENOMEM;
7671 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7672 if (!tr)
7673 goto out_unlock;
7674
7675 tr->name = kstrdup(name, GFP_KERNEL);
7676 if (!tr->name)
7677 goto out_free_tr;
7678
ccfe9e42
AL
7679 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7680 goto out_free_tr;
7681
20550622 7682 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7683
ccfe9e42
AL
7684 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7685
277ba044
SR
7686 raw_spin_lock_init(&tr->start_lock);
7687
0b9b12c1
SRRH
7688 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7689
277ba044
SR
7690 tr->current_trace = &nop_trace;
7691
7692 INIT_LIST_HEAD(&tr->systems);
7693 INIT_LIST_HEAD(&tr->events);
7694
737223fb 7695 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7696 goto out_free_tr;
7697
8434dc93 7698 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7699 if (!tr->dir)
7700 goto out_free_tr;
7701
7702 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7703 if (ret) {
8434dc93 7704 tracefs_remove_recursive(tr->dir);
277ba044 7705 goto out_free_tr;
609e85a7 7706 }
277ba044 7707
04ec7bb6
SRV
7708 ftrace_init_trace_array(tr);
7709
8434dc93 7710 init_tracer_tracefs(tr, tr->dir);
9a38a885 7711 init_trace_flags_index(tr);
37aea98b 7712 __update_tracer_options(tr);
277ba044
SR
7713
7714 list_add(&tr->list, &ftrace_trace_arrays);
7715
7716 mutex_unlock(&trace_types_lock);
7717
7718 return 0;
7719
7720 out_free_tr:
23aaa3c1 7721 free_trace_buffers(tr);
ccfe9e42 7722 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7723 kfree(tr->name);
7724 kfree(tr);
7725
7726 out_unlock:
7727 mutex_unlock(&trace_types_lock);
7728
7729 return ret;
7730
7731}
7732
eae47358 7733static int instance_rmdir(const char *name)
0c8916c3
SR
7734{
7735 struct trace_array *tr;
7736 int found = 0;
7737 int ret;
37aea98b 7738 int i;
0c8916c3
SR
7739
7740 mutex_lock(&trace_types_lock);
7741
7742 ret = -ENODEV;
7743 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7744 if (tr->name && strcmp(tr->name, name) == 0) {
7745 found = 1;
7746 break;
7747 }
7748 }
7749 if (!found)
7750 goto out_unlock;
7751
a695cb58 7752 ret = -EBUSY;
cf6ab6d9 7753 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7754 goto out_unlock;
7755
0c8916c3
SR
7756 list_del(&tr->list);
7757
20550622
SRRH
7758 /* Disable all the flags that were enabled coming in */
7759 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7760 if ((1 << i) & ZEROED_TRACE_FLAGS)
7761 set_tracer_flag(tr, 1 << i, 0);
7762 }
7763
6b450d25 7764 tracing_set_nop(tr);
a0e6369e 7765 clear_ftrace_function_probes(tr);
0c8916c3 7766 event_trace_del_tracer(tr);
d879d0b8 7767 ftrace_clear_pids(tr);
591dffda 7768 ftrace_destroy_function_files(tr);
681a4a2f 7769 tracefs_remove_recursive(tr->dir);
a9fcaaac 7770 free_trace_buffers(tr);
0c8916c3 7771
37aea98b
SRRH
7772 for (i = 0; i < tr->nr_topts; i++) {
7773 kfree(tr->topts[i].topts);
7774 }
7775 kfree(tr->topts);
7776
db9108e0 7777 free_cpumask_var(tr->tracing_cpumask);
0c8916c3
SR
7778 kfree(tr->name);
7779 kfree(tr);
7780
7781 ret = 0;
7782
7783 out_unlock:
7784 mutex_unlock(&trace_types_lock);
7785
7786 return ret;
7787}
7788
277ba044
SR
7789static __init void create_trace_instances(struct dentry *d_tracer)
7790{
eae47358
SRRH
7791 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7792 instance_mkdir,
7793 instance_rmdir);
277ba044
SR
7794 if (WARN_ON(!trace_instance_dir))
7795 return;
277ba044
SR
7796}
7797
2b6080f2 7798static void
8434dc93 7799init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7800{
121aaee7 7801 int cpu;
2b6080f2 7802
607e2ea1
SRRH
7803 trace_create_file("available_tracers", 0444, d_tracer,
7804 tr, &show_traces_fops);
7805
7806 trace_create_file("current_tracer", 0644, d_tracer,
7807 tr, &set_tracer_fops);
7808
ccfe9e42
AL
7809 trace_create_file("tracing_cpumask", 0644, d_tracer,
7810 tr, &tracing_cpumask_fops);
7811
2b6080f2
SR
7812 trace_create_file("trace_options", 0644, d_tracer,
7813 tr, &tracing_iter_fops);
7814
7815 trace_create_file("trace", 0644, d_tracer,
6484c71c 7816 tr, &tracing_fops);
2b6080f2
SR
7817
7818 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 7819 tr, &tracing_pipe_fops);
2b6080f2
SR
7820
7821 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 7822 tr, &tracing_entries_fops);
2b6080f2
SR
7823
7824 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7825 tr, &tracing_total_entries_fops);
7826
238ae93d 7827 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
7828 tr, &tracing_free_buffer_fops);
7829
7830 trace_create_file("trace_marker", 0220, d_tracer,
7831 tr, &tracing_mark_fops);
7832
fa32e855
SR
7833 trace_create_file("trace_marker_raw", 0220, d_tracer,
7834 tr, &tracing_mark_raw_fops);
7835
2b6080f2
SR
7836 trace_create_file("trace_clock", 0644, d_tracer, tr,
7837 &trace_clock_fops);
7838
7839 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 7840 tr, &rb_simple_fops);
ce9bae55 7841
16270145
SRRH
7842 create_trace_options_dir(tr);
7843
f971cc9a 7844#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
7845 trace_create_file("tracing_max_latency", 0644, d_tracer,
7846 &tr->max_latency, &tracing_max_lat_fops);
7847#endif
7848
591dffda
SRRH
7849 if (ftrace_create_function_files(tr, d_tracer))
7850 WARN(1, "Could not allocate function filter files");
7851
ce9bae55
SRRH
7852#ifdef CONFIG_TRACER_SNAPSHOT
7853 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 7854 tr, &snapshot_fops);
ce9bae55 7855#endif
121aaee7
SRRH
7856
7857 for_each_tracing_cpu(cpu)
8434dc93 7858 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 7859
345ddcc8 7860 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
7861}
7862
93faccbb 7863static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
7864{
7865 struct vfsmount *mnt;
7866 struct file_system_type *type;
7867
7868 /*
7869 * To maintain backward compatibility for tools that mount
7870 * debugfs to get to the tracing facility, tracefs is automatically
7871 * mounted to the debugfs/tracing directory.
7872 */
7873 type = get_fs_type("tracefs");
7874 if (!type)
7875 return NULL;
93faccbb 7876 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
7877 put_filesystem(type);
7878 if (IS_ERR(mnt))
7879 return NULL;
7880 mntget(mnt);
7881
7882 return mnt;
7883}
7884
7eeafbca
SRRH
7885/**
7886 * tracing_init_dentry - initialize top level trace array
7887 *
7888 * This is called when creating files or directories in the tracing
7889 * directory. It is called via fs_initcall() by any of the boot up code
7890 * and expects to return the dentry of the top level tracing directory.
7891 */
7892struct dentry *tracing_init_dentry(void)
7893{
7894 struct trace_array *tr = &global_trace;
7895
f76180bc 7896 /* The top level trace array uses NULL as parent */
7eeafbca 7897 if (tr->dir)
f76180bc 7898 return NULL;
7eeafbca 7899
8b129199
JW
7900 if (WARN_ON(!tracefs_initialized()) ||
7901 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7902 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
7903 return ERR_PTR(-ENODEV);
7904
f76180bc
SRRH
7905 /*
7906 * As there may still be users that expect the tracing
7907 * files to exist in debugfs/tracing, we must automount
7908 * the tracefs file system there, so older tools still
7909 * work with the newer kerenl.
7910 */
7911 tr->dir = debugfs_create_automount("tracing", NULL,
7912 trace_automount, NULL);
7eeafbca
SRRH
7913 if (!tr->dir) {
7914 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7915 return ERR_PTR(-ENOMEM);
7916 }
7917
8434dc93 7918 return NULL;
7eeafbca
SRRH
7919}
7920
00f4b652
JL
7921extern struct trace_eval_map *__start_ftrace_eval_maps[];
7922extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 7923
5f60b351 7924static void __init trace_eval_init(void)
0c564a53 7925{
3673b8e4
SRRH
7926 int len;
7927
02fd7f68 7928 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 7929 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
7930}
7931
7932#ifdef CONFIG_MODULES
f57a4143 7933static void trace_module_add_evals(struct module *mod)
3673b8e4 7934{
99be647c 7935 if (!mod->num_trace_evals)
3673b8e4
SRRH
7936 return;
7937
7938 /*
7939 * Modules with bad taint do not have events created, do
7940 * not bother with enums either.
7941 */
7942 if (trace_module_has_bad_taint(mod))
7943 return;
7944
f57a4143 7945 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
7946}
7947
681bec03 7948#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 7949static void trace_module_remove_evals(struct module *mod)
9828413d 7950{
23bf8cb8
JL
7951 union trace_eval_map_item *map;
7952 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 7953
99be647c 7954 if (!mod->num_trace_evals)
9828413d
SRRH
7955 return;
7956
1793ed93 7957 mutex_lock(&trace_eval_mutex);
9828413d 7958
23bf8cb8 7959 map = trace_eval_maps;
9828413d
SRRH
7960
7961 while (map) {
7962 if (map->head.mod == mod)
7963 break;
5f60b351 7964 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
7965 last = &map->tail.next;
7966 map = map->tail.next;
7967 }
7968 if (!map)
7969 goto out;
7970
5f60b351 7971 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
7972 kfree(map);
7973 out:
1793ed93 7974 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
7975}
7976#else
f57a4143 7977static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 7978#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 7979
3673b8e4
SRRH
7980static int trace_module_notify(struct notifier_block *self,
7981 unsigned long val, void *data)
7982{
7983 struct module *mod = data;
7984
7985 switch (val) {
7986 case MODULE_STATE_COMING:
f57a4143 7987 trace_module_add_evals(mod);
3673b8e4 7988 break;
9828413d 7989 case MODULE_STATE_GOING:
f57a4143 7990 trace_module_remove_evals(mod);
9828413d 7991 break;
3673b8e4
SRRH
7992 }
7993
7994 return 0;
0c564a53
SRRH
7995}
7996
3673b8e4
SRRH
7997static struct notifier_block trace_module_nb = {
7998 .notifier_call = trace_module_notify,
7999 .priority = 0,
8000};
9828413d 8001#endif /* CONFIG_MODULES */
3673b8e4 8002
8434dc93 8003static __init int tracer_init_tracefs(void)
bc0c38d1
SR
8004{
8005 struct dentry *d_tracer;
bc0c38d1 8006
7e53bd42
LJ
8007 trace_access_lock_init();
8008
bc0c38d1 8009 d_tracer = tracing_init_dentry();
14a5ae40 8010 if (IS_ERR(d_tracer))
ed6f1c99 8011 return 0;
bc0c38d1 8012
8434dc93 8013 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 8014 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 8015
5452af66 8016 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 8017 &global_trace, &tracing_thresh_fops);
a8259075 8018
339ae5d3 8019 trace_create_file("README", 0444, d_tracer,
5452af66
FW
8020 NULL, &tracing_readme_fops);
8021
69abe6a5
AP
8022 trace_create_file("saved_cmdlines", 0444, d_tracer,
8023 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 8024
939c7a4f
YY
8025 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8026 NULL, &tracing_saved_cmdlines_size_fops);
8027
99c621d7
MS
8028 trace_create_file("saved_tgids", 0444, d_tracer,
8029 NULL, &tracing_saved_tgids_fops);
8030
5f60b351 8031 trace_eval_init();
0c564a53 8032
f57a4143 8033 trace_create_eval_file(d_tracer);
9828413d 8034
3673b8e4
SRRH
8035#ifdef CONFIG_MODULES
8036 register_module_notifier(&trace_module_nb);
8037#endif
8038
bc0c38d1 8039#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
8040 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8041 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 8042#endif
b04cc6b1 8043
277ba044 8044 create_trace_instances(d_tracer);
5452af66 8045
37aea98b 8046 update_tracer_options(&global_trace);
09d23a1d 8047
b5ad384e 8048 return 0;
bc0c38d1
SR
8049}
8050
3f5a54e3
SR
8051static int trace_panic_handler(struct notifier_block *this,
8052 unsigned long event, void *unused)
8053{
944ac425 8054 if (ftrace_dump_on_oops)
cecbca96 8055 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8056 return NOTIFY_OK;
8057}
8058
8059static struct notifier_block trace_panic_notifier = {
8060 .notifier_call = trace_panic_handler,
8061 .next = NULL,
8062 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8063};
8064
8065static int trace_die_handler(struct notifier_block *self,
8066 unsigned long val,
8067 void *data)
8068{
8069 switch (val) {
8070 case DIE_OOPS:
944ac425 8071 if (ftrace_dump_on_oops)
cecbca96 8072 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8073 break;
8074 default:
8075 break;
8076 }
8077 return NOTIFY_OK;
8078}
8079
8080static struct notifier_block trace_die_notifier = {
8081 .notifier_call = trace_die_handler,
8082 .priority = 200
8083};
8084
8085/*
8086 * printk is set to max of 1024, we really don't need it that big.
8087 * Nothing should be printing 1000 characters anyway.
8088 */
8089#define TRACE_MAX_PRINT 1000
8090
8091/*
8092 * Define here KERN_TRACE so that we have one place to modify
8093 * it if we decide to change what log level the ftrace dump
8094 * should be at.
8095 */
428aee14 8096#define KERN_TRACE KERN_EMERG
3f5a54e3 8097
955b61e5 8098void
3f5a54e3
SR
8099trace_printk_seq(struct trace_seq *s)
8100{
8101 /* Probably should print a warning here. */
3a161d99
SRRH
8102 if (s->seq.len >= TRACE_MAX_PRINT)
8103 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 8104
820b75f6
SRRH
8105 /*
8106 * More paranoid code. Although the buffer size is set to
8107 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8108 * an extra layer of protection.
8109 */
8110 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8111 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
8112
8113 /* should be zero ended, but we are paranoid. */
3a161d99 8114 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
8115
8116 printk(KERN_TRACE "%s", s->buffer);
8117
f9520750 8118 trace_seq_init(s);
3f5a54e3
SR
8119}
8120
955b61e5
JW
8121void trace_init_global_iter(struct trace_iterator *iter)
8122{
8123 iter->tr = &global_trace;
2b6080f2 8124 iter->trace = iter->tr->current_trace;
ae3b5093 8125 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 8126 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
8127
8128 if (iter->trace && iter->trace->open)
8129 iter->trace->open(iter);
8130
8131 /* Annotate start of buffers if we had overruns */
8132 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8133 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8134
8135 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8136 if (trace_clocks[iter->tr->clock_id].in_ns)
8137 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
8138}
8139
7fe70b57 8140void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 8141{
3f5a54e3
SR
8142 /* use static because iter can be a bit big for the stack */
8143 static struct trace_iterator iter;
7fe70b57 8144 static atomic_t dump_running;
983f938a 8145 struct trace_array *tr = &global_trace;
cf586b61 8146 unsigned int old_userobj;
d769041f
SR
8147 unsigned long flags;
8148 int cnt = 0, cpu;
3f5a54e3 8149
7fe70b57
SRRH
8150 /* Only allow one dump user at a time. */
8151 if (atomic_inc_return(&dump_running) != 1) {
8152 atomic_dec(&dump_running);
8153 return;
8154 }
3f5a54e3 8155
7fe70b57
SRRH
8156 /*
8157 * Always turn off tracing when we dump.
8158 * We don't need to show trace output of what happens
8159 * between multiple crashes.
8160 *
8161 * If the user does a sysrq-z, then they can re-enable
8162 * tracing with echo 1 > tracing_on.
8163 */
0ee6b6cf 8164 tracing_off();
cf586b61 8165
7fe70b57 8166 local_irq_save(flags);
3f5a54e3 8167
38dbe0b1 8168 /* Simulate the iterator */
955b61e5
JW
8169 trace_init_global_iter(&iter);
8170
d769041f 8171 for_each_tracing_cpu(cpu) {
5e2d5ef8 8172 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
8173 }
8174
983f938a 8175 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 8176
b54d3de9 8177 /* don't look at user memory in panic mode */
983f938a 8178 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 8179
cecbca96
FW
8180 switch (oops_dump_mode) {
8181 case DUMP_ALL:
ae3b5093 8182 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8183 break;
8184 case DUMP_ORIG:
8185 iter.cpu_file = raw_smp_processor_id();
8186 break;
8187 case DUMP_NONE:
8188 goto out_enable;
8189 default:
8190 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 8191 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8192 }
8193
8194 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 8195
7fe70b57
SRRH
8196 /* Did function tracer already get disabled? */
8197 if (ftrace_is_dead()) {
8198 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8199 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8200 }
8201
3f5a54e3
SR
8202 /*
8203 * We need to stop all tracing on all CPUS to read the
8204 * the next buffer. This is a bit expensive, but is
8205 * not done often. We fill all what we can read,
8206 * and then release the locks again.
8207 */
8208
3f5a54e3
SR
8209 while (!trace_empty(&iter)) {
8210
8211 if (!cnt)
8212 printk(KERN_TRACE "---------------------------------\n");
8213
8214 cnt++;
8215
8216 /* reset all but tr, trace, and overruns */
8217 memset(&iter.seq, 0,
8218 sizeof(struct trace_iterator) -
8219 offsetof(struct trace_iterator, seq));
8220 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8221 iter.pos = -1;
8222
955b61e5 8223 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8224 int ret;
8225
8226 ret = print_trace_line(&iter);
8227 if (ret != TRACE_TYPE_NO_CONSUME)
8228 trace_consume(&iter);
3f5a54e3 8229 }
b892e5c8 8230 touch_nmi_watchdog();
3f5a54e3
SR
8231
8232 trace_printk_seq(&iter.seq);
8233 }
8234
8235 if (!cnt)
8236 printk(KERN_TRACE " (ftrace buffer empty)\n");
8237 else
8238 printk(KERN_TRACE "---------------------------------\n");
8239
cecbca96 8240 out_enable:
983f938a 8241 tr->trace_flags |= old_userobj;
cf586b61 8242
7fe70b57
SRRH
8243 for_each_tracing_cpu(cpu) {
8244 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8245 }
7fe70b57 8246 atomic_dec(&dump_running);
cd891ae0 8247 local_irq_restore(flags);
3f5a54e3 8248}
a8eecf22 8249EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8250
3928a8a2 8251__init static int tracer_alloc_buffers(void)
bc0c38d1 8252{
73c5162a 8253 int ring_buf_size;
9e01c1b7 8254 int ret = -ENOMEM;
4c11d7ae 8255
b5e87c05
SRRH
8256 /*
8257 * Make sure we don't accidently add more trace options
8258 * than we have bits for.
8259 */
9a38a885 8260 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8261
9e01c1b7
RR
8262 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8263 goto out;
8264
ccfe9e42 8265 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8266 goto out_free_buffer_mask;
4c11d7ae 8267
07d777fe
SR
8268 /* Only allocate trace_printk buffers if a trace_printk exists */
8269 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8270 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8271 trace_printk_init_buffers();
8272
73c5162a
SR
8273 /* To save memory, keep the ring buffer size to its minimum */
8274 if (ring_buffer_expanded)
8275 ring_buf_size = trace_buf_size;
8276 else
8277 ring_buf_size = 1;
8278
9e01c1b7 8279 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8280 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8281
2b6080f2
SR
8282 raw_spin_lock_init(&global_trace.start_lock);
8283
b32614c0
SAS
8284 /*
8285 * The prepare callbacks allocates some memory for the ring buffer. We
8286 * don't free the buffer if the if the CPU goes down. If we were to free
8287 * the buffer, then the user would lose any trace that was in the
8288 * buffer. The memory will be removed once the "instance" is removed.
8289 */
8290 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8291 "trace/RB:preapre", trace_rb_cpu_prepare,
8292 NULL);
8293 if (ret < 0)
8294 goto out_free_cpumask;
2c4a33ab
SRRH
8295 /* Used for event triggers */
8296 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8297 if (!temp_buffer)
b32614c0 8298 goto out_rm_hp_state;
2c4a33ab 8299
939c7a4f
YY
8300 if (trace_create_savedcmd() < 0)
8301 goto out_free_temp_buffer;
8302
9e01c1b7 8303 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8304 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8305 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8306 WARN_ON(1);
939c7a4f 8307 goto out_free_savedcmd;
4c11d7ae 8308 }
a7603ff4 8309
499e5470
SR
8310 if (global_trace.buffer_disabled)
8311 tracing_off();
4c11d7ae 8312
e1e232ca
SR
8313 if (trace_boot_clock) {
8314 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8315 if (ret < 0)
a395d6a7
JP
8316 pr_warn("Trace clock %s not defined, going back to default\n",
8317 trace_boot_clock);
e1e232ca
SR
8318 }
8319
ca164318
SRRH
8320 /*
8321 * register_tracer() might reference current_trace, so it
8322 * needs to be set before we register anything. This is
8323 * just a bootstrap of current_trace anyway.
8324 */
2b6080f2
SR
8325 global_trace.current_trace = &nop_trace;
8326
0b9b12c1
SRRH
8327 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8328
4104d326
SRRH
8329 ftrace_init_global_array_ops(&global_trace);
8330
9a38a885
SRRH
8331 init_trace_flags_index(&global_trace);
8332
ca164318
SRRH
8333 register_tracer(&nop_trace);
8334
dbeafd0d
SRV
8335 /* Function tracing may start here (via kernel command line) */
8336 init_function_trace();
8337
60a11774
SR
8338 /* All seems OK, enable tracing */
8339 tracing_disabled = 0;
3928a8a2 8340
3f5a54e3
SR
8341 atomic_notifier_chain_register(&panic_notifier_list,
8342 &trace_panic_notifier);
8343
8344 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8345
ae63b31e
SR
8346 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8347
8348 INIT_LIST_HEAD(&global_trace.systems);
8349 INIT_LIST_HEAD(&global_trace.events);
8350 list_add(&global_trace.list, &ftrace_trace_arrays);
8351
a4d1e688 8352 apply_trace_boot_options();
7bcfaf54 8353
77fd5c15
SRRH
8354 register_snapshot_cmd();
8355
2fc1dfbe 8356 return 0;
3f5a54e3 8357
939c7a4f
YY
8358out_free_savedcmd:
8359 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8360out_free_temp_buffer:
8361 ring_buffer_free(temp_buffer);
b32614c0
SAS
8362out_rm_hp_state:
8363 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8364out_free_cpumask:
ccfe9e42 8365 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8366out_free_buffer_mask:
8367 free_cpumask_var(tracing_buffer_mask);
8368out:
8369 return ret;
bc0c38d1 8370}
b2821ae6 8371
e725c731 8372void __init early_trace_init(void)
5f893b26 8373{
0daa2302
SRRH
8374 if (tracepoint_printk) {
8375 tracepoint_print_iter =
8376 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8377 if (WARN_ON(!tracepoint_print_iter))
8378 tracepoint_printk = 0;
42391745
SRRH
8379 else
8380 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8381 }
5f893b26 8382 tracer_alloc_buffers();
e725c731
SRV
8383}
8384
8385void __init trace_init(void)
8386{
0c564a53 8387 trace_event_init();
5f893b26
SRRH
8388}
8389
b2821ae6
SR
8390__init static int clear_boot_tracer(void)
8391{
8392 /*
8393 * The default tracer at boot buffer is an init section.
8394 * This function is called in lateinit. If we did not
8395 * find the boot tracer, then clear it out, to prevent
8396 * later registration from accessing the buffer that is
8397 * about to be freed.
8398 */
8399 if (!default_bootup_tracer)
8400 return 0;
8401
8402 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8403 default_bootup_tracer);
8404 default_bootup_tracer = NULL;
8405
8406 return 0;
8407}
8408
8434dc93 8409fs_initcall(tracer_init_tracefs);
b2821ae6 8410late_initcall(clear_boot_tracer);