]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
tracing: Move sleep-time and graph-time options out of the core trace_flags
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
9288f99a 103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 104
955b61e5 105cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 106
944ac425
SR
107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 121 */
cecbca96
FW
122
123enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 124
de7edd31
SRRH
125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
9828413d
SRRH
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
607e2ea1 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 165
ee6c2c1b
LZ
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 168static char *default_bootup_tracer;
d9e54076 169
55034cd6
SRRH
170static bool allocate_snapshot;
171
1beee96b 172static int __init set_cmdline_ftrace(char *str)
d9e54076 173{
67012ab1 174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 175 default_bootup_tracer = bootup_tracer_buf;
73c5162a 176 /* We are using ftrace early, expand it */
55034cd6 177 ring_buffer_expanded = true;
d9e54076
PZ
178 return 1;
179}
1beee96b 180__setup("ftrace=", set_cmdline_ftrace);
d9e54076 181
944ac425
SR
182static int __init set_ftrace_dump_on_oops(char *str)
183{
cecbca96
FW
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
944ac425
SR
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 197
de7edd31
SRRH
198static int __init stop_trace_on_warning(char *str)
199{
933ff9f2
LCG
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
de7edd31
SRRH
202 return 1;
203}
933ff9f2 204__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 205
3209cff4 206static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
3209cff4 213__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 214
7bcfaf54
SR
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
67012ab1 221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
e1e232ca
SR
227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
0daa2302
SRRH
238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
de7edd31 245
cf8e3474 246unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
4fcdae83
SR
253/*
254 * The global_trace is the descriptor that holds the tracing
255 * buffers for the live tracing. For each CPU, it contains
256 * a link list of pages that will store trace entries. The
257 * page descriptor of the pages in the memory is used to hold
258 * the link list by linking the lru item in the page descriptor
259 * to each of the pages in the buffer per CPU.
260 *
261 * For each active CPU there is a data field that holds the
262 * pages for the buffer for that CPU. Each CPU has the same number
263 * of pages allocated for its buffer.
264 */
bc0c38d1
SR
265static struct trace_array global_trace;
266
ae63b31e 267LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 268
ff451961
SRRH
269int trace_array_get(struct trace_array *this_tr)
270{
271 struct trace_array *tr;
272 int ret = -ENODEV;
273
274 mutex_lock(&trace_types_lock);
275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
276 if (tr == this_tr) {
277 tr->ref++;
278 ret = 0;
279 break;
280 }
281 }
282 mutex_unlock(&trace_types_lock);
283
284 return ret;
285}
286
287static void __trace_array_put(struct trace_array *this_tr)
288{
289 WARN_ON(!this_tr->ref);
290 this_tr->ref--;
291}
292
293void trace_array_put(struct trace_array *this_tr)
294{
295 mutex_lock(&trace_types_lock);
296 __trace_array_put(this_tr);
297 mutex_unlock(&trace_types_lock);
298}
299
7f1d2f82 300int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
301 struct ring_buffer *buffer,
302 struct ring_buffer_event *event)
eb02ce01 303{
5d6ad960 304 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
305 !filter_match_preds(file->filter, rec)) {
306 ring_buffer_discard_commit(buffer, event);
307 return 1;
308 }
309
310 return 0;
311}
312EXPORT_SYMBOL_GPL(filter_check_discard);
313
2425bcb9 314int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
315 struct ring_buffer *buffer,
316 struct ring_buffer_event *event)
317{
318 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
319 !filter_match_preds(call->filter, rec)) {
320 ring_buffer_discard_commit(buffer, event);
321 return 1;
322 }
323
324 return 0;
eb02ce01 325}
f306cc82 326EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 327
ad1438a0 328static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
329{
330 u64 ts;
331
332 /* Early boot up does not have a buffer yet */
9457158b 333 if (!buf->buffer)
37886f6a
SR
334 return trace_clock_local();
335
9457158b
AL
336 ts = ring_buffer_time_stamp(buf->buffer, cpu);
337 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
338
339 return ts;
340}
bc0c38d1 341
9457158b
AL
342cycle_t ftrace_now(int cpu)
343{
344 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
345}
346
10246fa3
SRRH
347/**
348 * tracing_is_enabled - Show if global_trace has been disabled
349 *
350 * Shows if the global trace has been enabled or not. It uses the
351 * mirror flag "buffer_disabled" to be used in fast paths such as for
352 * the irqsoff tracer. But it may be inaccurate due to races. If you
353 * need to know the accurate state, use tracing_is_on() which is a little
354 * slower, but accurate.
355 */
9036990d
SR
356int tracing_is_enabled(void)
357{
10246fa3
SRRH
358 /*
359 * For quick access (irqsoff uses this in fast path), just
360 * return the mirror variable of the state of the ring buffer.
361 * It's a little racy, but we don't really care.
362 */
363 smp_rmb();
364 return !global_trace.buffer_disabled;
9036990d
SR
365}
366
4fcdae83 367/*
3928a8a2
SR
368 * trace_buf_size is the size in bytes that is allocated
369 * for a buffer. Note, the number of bytes is always rounded
370 * to page size.
3f5a54e3
SR
371 *
372 * This number is purposely set to a low number of 16384.
373 * If the dump on oops happens, it will be much appreciated
374 * to not have to wait for all that output. Anyway this can be
375 * boot time and run time configurable.
4fcdae83 376 */
3928a8a2 377#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 378
3928a8a2 379static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 380
4fcdae83 381/* trace_types holds a link list of available tracers. */
bc0c38d1 382static struct tracer *trace_types __read_mostly;
4fcdae83 383
4fcdae83
SR
384/*
385 * trace_types_lock is used to protect the trace_types list.
4fcdae83 386 */
a8227415 387DEFINE_MUTEX(trace_types_lock);
4fcdae83 388
7e53bd42
LJ
389/*
390 * serialize the access of the ring buffer
391 *
392 * ring buffer serializes readers, but it is low level protection.
393 * The validity of the events (which returns by ring_buffer_peek() ..etc)
394 * are not protected by ring buffer.
395 *
396 * The content of events may become garbage if we allow other process consumes
397 * these events concurrently:
398 * A) the page of the consumed events may become a normal page
399 * (not reader page) in ring buffer, and this page will be rewrited
400 * by events producer.
401 * B) The page of the consumed events may become a page for splice_read,
402 * and this page will be returned to system.
403 *
404 * These primitives allow multi process access to different cpu ring buffer
405 * concurrently.
406 *
407 * These primitives don't distinguish read-only and read-consume access.
408 * Multi read-only access are also serialized.
409 */
410
411#ifdef CONFIG_SMP
412static DECLARE_RWSEM(all_cpu_access_lock);
413static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
ae3b5093 417 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
418 /* gain it for accessing the whole ring buffer. */
419 down_write(&all_cpu_access_lock);
420 } else {
421 /* gain it for accessing a cpu ring buffer. */
422
ae3b5093 423 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
424 down_read(&all_cpu_access_lock);
425
426 /* Secondly block other access to this @cpu ring buffer. */
427 mutex_lock(&per_cpu(cpu_access_lock, cpu));
428 }
429}
430
431static inline void trace_access_unlock(int cpu)
432{
ae3b5093 433 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
434 up_write(&all_cpu_access_lock);
435 } else {
436 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
437 up_read(&all_cpu_access_lock);
438 }
439}
440
441static inline void trace_access_lock_init(void)
442{
443 int cpu;
444
445 for_each_possible_cpu(cpu)
446 mutex_init(&per_cpu(cpu_access_lock, cpu));
447}
448
449#else
450
451static DEFINE_MUTEX(access_lock);
452
453static inline void trace_access_lock(int cpu)
454{
455 (void)cpu;
456 mutex_lock(&access_lock);
457}
458
459static inline void trace_access_unlock(int cpu)
460{
461 (void)cpu;
462 mutex_unlock(&access_lock);
463}
464
465static inline void trace_access_lock_init(void)
466{
467}
468
469#endif
470
d78a4614
SRRH
471#ifdef CONFIG_STACKTRACE
472static void __ftrace_trace_stack(struct ring_buffer *buffer,
473 unsigned long flags,
474 int skip, int pc, struct pt_regs *regs);
73dddbb5
SRRH
475static inline void ftrace_trace_stack(struct ring_buffer *buffer,
476 unsigned long flags,
477 int skip, int pc, struct pt_regs *regs);
ca475e83 478
d78a4614
SRRH
479#else
480static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
481 unsigned long flags,
482 int skip, int pc, struct pt_regs *regs)
483{
484}
ca475e83 485static inline void ftrace_trace_stack(struct ring_buffer *buffer,
73dddbb5
SRRH
486 unsigned long flags,
487 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
488{
489}
490
d78a4614
SRRH
491#endif
492
ee6bce52 493/* trace_flags holds trace_options default values */
729358da 494unsigned long trace_flags =
55577204 495 FUNCTION_DEFAULT_FLAGS |
729358da
SRRH
496 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
497 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |
498 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
8179e8a1 499 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS
729358da 500 ;
e7e2ee89 501
5280bcef 502static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
503{
504 if (tr->trace_buffer.buffer)
505 ring_buffer_record_on(tr->trace_buffer.buffer);
506 /*
507 * This flag is looked at when buffers haven't been allocated
508 * yet, or by some tracers (like irqsoff), that just want to
509 * know if the ring buffer has been disabled, but it can handle
510 * races of where it gets disabled but we still do a record.
511 * As the check is in the fast path of the tracers, it is more
512 * important to be fast than accurate.
513 */
514 tr->buffer_disabled = 0;
515 /* Make the flag seen by readers */
516 smp_wmb();
517}
518
499e5470
SR
519/**
520 * tracing_on - enable tracing buffers
521 *
522 * This function enables tracing buffers that may have been
523 * disabled with tracing_off.
524 */
525void tracing_on(void)
526{
10246fa3 527 tracer_tracing_on(&global_trace);
499e5470
SR
528}
529EXPORT_SYMBOL_GPL(tracing_on);
530
09ae7234
SRRH
531/**
532 * __trace_puts - write a constant string into the trace buffer.
533 * @ip: The address of the caller
534 * @str: The constant string to write
535 * @size: The size of the string.
536 */
537int __trace_puts(unsigned long ip, const char *str, int size)
538{
539 struct ring_buffer_event *event;
540 struct ring_buffer *buffer;
541 struct print_entry *entry;
542 unsigned long irq_flags;
543 int alloc;
8abfb872
J
544 int pc;
545
f0160a5a
J
546 if (!(trace_flags & TRACE_ITER_PRINTK))
547 return 0;
548
8abfb872 549 pc = preempt_count();
09ae7234 550
3132e107
SRRH
551 if (unlikely(tracing_selftest_running || tracing_disabled))
552 return 0;
553
09ae7234
SRRH
554 alloc = sizeof(*entry) + size + 2; /* possible \n added */
555
556 local_save_flags(irq_flags);
557 buffer = global_trace.trace_buffer.buffer;
558 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 559 irq_flags, pc);
09ae7234
SRRH
560 if (!event)
561 return 0;
562
563 entry = ring_buffer_event_data(event);
564 entry->ip = ip;
565
566 memcpy(&entry->buf, str, size);
567
568 /* Add a newline if necessary */
569 if (entry->buf[size - 1] != '\n') {
570 entry->buf[size] = '\n';
571 entry->buf[size + 1] = '\0';
572 } else
573 entry->buf[size] = '\0';
574
575 __buffer_unlock_commit(buffer, event);
73dddbb5 576 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
577
578 return size;
579}
580EXPORT_SYMBOL_GPL(__trace_puts);
581
582/**
583 * __trace_bputs - write the pointer to a constant string into trace buffer
584 * @ip: The address of the caller
585 * @str: The constant string to write to the buffer to
586 */
587int __trace_bputs(unsigned long ip, const char *str)
588{
589 struct ring_buffer_event *event;
590 struct ring_buffer *buffer;
591 struct bputs_entry *entry;
592 unsigned long irq_flags;
593 int size = sizeof(struct bputs_entry);
8abfb872
J
594 int pc;
595
f0160a5a
J
596 if (!(trace_flags & TRACE_ITER_PRINTK))
597 return 0;
598
8abfb872 599 pc = preempt_count();
09ae7234 600
3132e107
SRRH
601 if (unlikely(tracing_selftest_running || tracing_disabled))
602 return 0;
603
09ae7234
SRRH
604 local_save_flags(irq_flags);
605 buffer = global_trace.trace_buffer.buffer;
606 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 607 irq_flags, pc);
09ae7234
SRRH
608 if (!event)
609 return 0;
610
611 entry = ring_buffer_event_data(event);
612 entry->ip = ip;
613 entry->str = str;
614
615 __buffer_unlock_commit(buffer, event);
73dddbb5 616 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
617
618 return 1;
619}
620EXPORT_SYMBOL_GPL(__trace_bputs);
621
ad909e21
SRRH
622#ifdef CONFIG_TRACER_SNAPSHOT
623/**
624 * trace_snapshot - take a snapshot of the current buffer.
625 *
626 * This causes a swap between the snapshot buffer and the current live
627 * tracing buffer. You can use this to take snapshots of the live
628 * trace when some condition is triggered, but continue to trace.
629 *
630 * Note, make sure to allocate the snapshot with either
631 * a tracing_snapshot_alloc(), or by doing it manually
632 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
633 *
634 * If the snapshot buffer is not allocated, it will stop tracing.
635 * Basically making a permanent snapshot.
636 */
637void tracing_snapshot(void)
638{
639 struct trace_array *tr = &global_trace;
640 struct tracer *tracer = tr->current_trace;
641 unsigned long flags;
642
1b22e382
SRRH
643 if (in_nmi()) {
644 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
645 internal_trace_puts("*** snapshot is being ignored ***\n");
646 return;
647 }
648
ad909e21 649 if (!tr->allocated_snapshot) {
ca268da6
SRRH
650 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
651 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
652 tracing_off();
653 return;
654 }
655
656 /* Note, snapshot can not be used when the tracer uses it */
657 if (tracer->use_max_tr) {
ca268da6
SRRH
658 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
659 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
660 return;
661 }
662
663 local_irq_save(flags);
664 update_max_tr(tr, current, smp_processor_id());
665 local_irq_restore(flags);
666}
1b22e382 667EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
668
669static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
670 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
671static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
672
673static int alloc_snapshot(struct trace_array *tr)
674{
675 int ret;
676
677 if (!tr->allocated_snapshot) {
678
679 /* allocate spare buffer */
680 ret = resize_buffer_duplicate_size(&tr->max_buffer,
681 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
682 if (ret < 0)
683 return ret;
684
685 tr->allocated_snapshot = true;
686 }
687
688 return 0;
689}
690
ad1438a0 691static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
692{
693 /*
694 * We don't free the ring buffer. instead, resize it because
695 * The max_tr ring buffer has some state (e.g. ring->clock) and
696 * we want preserve it.
697 */
698 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
699 set_buffer_entries(&tr->max_buffer, 1);
700 tracing_reset_online_cpus(&tr->max_buffer);
701 tr->allocated_snapshot = false;
702}
ad909e21 703
93e31ffb
TZ
704/**
705 * tracing_alloc_snapshot - allocate snapshot buffer.
706 *
707 * This only allocates the snapshot buffer if it isn't already
708 * allocated - it doesn't also take a snapshot.
709 *
710 * This is meant to be used in cases where the snapshot buffer needs
711 * to be set up for events that can't sleep but need to be able to
712 * trigger a snapshot.
713 */
714int tracing_alloc_snapshot(void)
715{
716 struct trace_array *tr = &global_trace;
717 int ret;
718
719 ret = alloc_snapshot(tr);
720 WARN_ON(ret < 0);
721
722 return ret;
723}
724EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
725
ad909e21
SRRH
726/**
727 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
728 *
729 * This is similar to trace_snapshot(), but it will allocate the
730 * snapshot buffer if it isn't already allocated. Use this only
731 * where it is safe to sleep, as the allocation may sleep.
732 *
733 * This causes a swap between the snapshot buffer and the current live
734 * tracing buffer. You can use this to take snapshots of the live
735 * trace when some condition is triggered, but continue to trace.
736 */
737void tracing_snapshot_alloc(void)
738{
ad909e21
SRRH
739 int ret;
740
93e31ffb
TZ
741 ret = tracing_alloc_snapshot();
742 if (ret < 0)
3209cff4 743 return;
ad909e21
SRRH
744
745 tracing_snapshot();
746}
1b22e382 747EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
748#else
749void tracing_snapshot(void)
750{
751 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
752}
1b22e382 753EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
754int tracing_alloc_snapshot(void)
755{
756 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
757 return -ENODEV;
758}
759EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
760void tracing_snapshot_alloc(void)
761{
762 /* Give warning */
763 tracing_snapshot();
764}
1b22e382 765EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
766#endif /* CONFIG_TRACER_SNAPSHOT */
767
5280bcef 768static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
769{
770 if (tr->trace_buffer.buffer)
771 ring_buffer_record_off(tr->trace_buffer.buffer);
772 /*
773 * This flag is looked at when buffers haven't been allocated
774 * yet, or by some tracers (like irqsoff), that just want to
775 * know if the ring buffer has been disabled, but it can handle
776 * races of where it gets disabled but we still do a record.
777 * As the check is in the fast path of the tracers, it is more
778 * important to be fast than accurate.
779 */
780 tr->buffer_disabled = 1;
781 /* Make the flag seen by readers */
782 smp_wmb();
783}
784
499e5470
SR
785/**
786 * tracing_off - turn off tracing buffers
787 *
788 * This function stops the tracing buffers from recording data.
789 * It does not disable any overhead the tracers themselves may
790 * be causing. This function simply causes all recording to
791 * the ring buffers to fail.
792 */
793void tracing_off(void)
794{
10246fa3 795 tracer_tracing_off(&global_trace);
499e5470
SR
796}
797EXPORT_SYMBOL_GPL(tracing_off);
798
de7edd31
SRRH
799void disable_trace_on_warning(void)
800{
801 if (__disable_trace_on_warning)
802 tracing_off();
803}
804
10246fa3
SRRH
805/**
806 * tracer_tracing_is_on - show real state of ring buffer enabled
807 * @tr : the trace array to know if ring buffer is enabled
808 *
809 * Shows real state of the ring buffer if it is enabled or not.
810 */
5280bcef 811static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
812{
813 if (tr->trace_buffer.buffer)
814 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
815 return !tr->buffer_disabled;
816}
817
499e5470
SR
818/**
819 * tracing_is_on - show state of ring buffers enabled
820 */
821int tracing_is_on(void)
822{
10246fa3 823 return tracer_tracing_is_on(&global_trace);
499e5470
SR
824}
825EXPORT_SYMBOL_GPL(tracing_is_on);
826
3928a8a2 827static int __init set_buf_size(char *str)
bc0c38d1 828{
3928a8a2 829 unsigned long buf_size;
c6caeeb1 830
bc0c38d1
SR
831 if (!str)
832 return 0;
9d612bef 833 buf_size = memparse(str, &str);
c6caeeb1 834 /* nr_entries can not be zero */
9d612bef 835 if (buf_size == 0)
c6caeeb1 836 return 0;
3928a8a2 837 trace_buf_size = buf_size;
bc0c38d1
SR
838 return 1;
839}
3928a8a2 840__setup("trace_buf_size=", set_buf_size);
bc0c38d1 841
0e950173
TB
842static int __init set_tracing_thresh(char *str)
843{
87abb3b1 844 unsigned long threshold;
0e950173
TB
845 int ret;
846
847 if (!str)
848 return 0;
bcd83ea6 849 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
850 if (ret < 0)
851 return 0;
87abb3b1 852 tracing_thresh = threshold * 1000;
0e950173
TB
853 return 1;
854}
855__setup("tracing_thresh=", set_tracing_thresh);
856
57f50be1
SR
857unsigned long nsecs_to_usecs(unsigned long nsecs)
858{
859 return nsecs / 1000;
860}
861
a3418a36
SRRH
862/*
863 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
864 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
865 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
866 * of strings in the order that the enums were defined.
867 */
868#undef C
869#define C(a, b) b
870
4fcdae83 871/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 872static const char *trace_options[] = {
a3418a36 873 TRACE_FLAGS
bc0c38d1
SR
874 NULL
875};
876
5079f326
Z
877static struct {
878 u64 (*func)(void);
879 const char *name;
8be0709f 880 int in_ns; /* is this clock in nanoseconds? */
5079f326 881} trace_clocks[] = {
1b3e5c09
TG
882 { trace_clock_local, "local", 1 },
883 { trace_clock_global, "global", 1 },
884 { trace_clock_counter, "counter", 0 },
e7fda6c4 885 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
886 { trace_clock, "perf", 1 },
887 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 888 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 889 ARCH_TRACE_CLOCKS
5079f326
Z
890};
891
b63f39ea 892/*
893 * trace_parser_get_init - gets the buffer for trace parser
894 */
895int trace_parser_get_init(struct trace_parser *parser, int size)
896{
897 memset(parser, 0, sizeof(*parser));
898
899 parser->buffer = kmalloc(size, GFP_KERNEL);
900 if (!parser->buffer)
901 return 1;
902
903 parser->size = size;
904 return 0;
905}
906
907/*
908 * trace_parser_put - frees the buffer for trace parser
909 */
910void trace_parser_put(struct trace_parser *parser)
911{
912 kfree(parser->buffer);
913}
914
915/*
916 * trace_get_user - reads the user input string separated by space
917 * (matched by isspace(ch))
918 *
919 * For each string found the 'struct trace_parser' is updated,
920 * and the function returns.
921 *
922 * Returns number of bytes read.
923 *
924 * See kernel/trace/trace.h for 'struct trace_parser' details.
925 */
926int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
927 size_t cnt, loff_t *ppos)
928{
929 char ch;
930 size_t read = 0;
931 ssize_t ret;
932
933 if (!*ppos)
934 trace_parser_clear(parser);
935
936 ret = get_user(ch, ubuf++);
937 if (ret)
938 goto out;
939
940 read++;
941 cnt--;
942
943 /*
944 * The parser is not finished with the last write,
945 * continue reading the user input without skipping spaces.
946 */
947 if (!parser->cont) {
948 /* skip white space */
949 while (cnt && isspace(ch)) {
950 ret = get_user(ch, ubuf++);
951 if (ret)
952 goto out;
953 read++;
954 cnt--;
955 }
956
957 /* only spaces were written */
958 if (isspace(ch)) {
959 *ppos += read;
960 ret = read;
961 goto out;
962 }
963
964 parser->idx = 0;
965 }
966
967 /* read the non-space input */
968 while (cnt && !isspace(ch)) {
3c235a33 969 if (parser->idx < parser->size - 1)
b63f39ea 970 parser->buffer[parser->idx++] = ch;
971 else {
972 ret = -EINVAL;
973 goto out;
974 }
975 ret = get_user(ch, ubuf++);
976 if (ret)
977 goto out;
978 read++;
979 cnt--;
980 }
981
982 /* We either got finished input or we have to wait for another call. */
983 if (isspace(ch)) {
984 parser->buffer[parser->idx] = 0;
985 parser->cont = false;
057db848 986 } else if (parser->idx < parser->size - 1) {
b63f39ea 987 parser->cont = true;
988 parser->buffer[parser->idx++] = ch;
057db848
SR
989 } else {
990 ret = -EINVAL;
991 goto out;
b63f39ea 992 }
993
994 *ppos += read;
995 ret = read;
996
997out:
998 return ret;
999}
1000
3a161d99 1001/* TODO add a seq_buf_to_buffer() */
b8b94265 1002static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1003{
1004 int len;
3c56819b 1005
5ac48378 1006 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1007 return -EBUSY;
1008
5ac48378 1009 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1010 if (cnt > len)
1011 cnt = len;
3a161d99 1012 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1013
3a161d99 1014 s->seq.readpos += cnt;
3c56819b
EGM
1015 return cnt;
1016}
1017
0e950173
TB
1018unsigned long __read_mostly tracing_thresh;
1019
5d4a9dba 1020#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1021/*
1022 * Copy the new maximum trace into the separate maximum-trace
1023 * structure. (this way the maximum trace is permanently saved,
1024 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1025 */
1026static void
1027__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1028{
12883efb
SRRH
1029 struct trace_buffer *trace_buf = &tr->trace_buffer;
1030 struct trace_buffer *max_buf = &tr->max_buffer;
1031 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1032 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1033
12883efb
SRRH
1034 max_buf->cpu = cpu;
1035 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1036
6d9b3fa5 1037 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1038 max_data->critical_start = data->critical_start;
1039 max_data->critical_end = data->critical_end;
5d4a9dba 1040
1acaa1b2 1041 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1042 max_data->pid = tsk->pid;
f17a5194
SRRH
1043 /*
1044 * If tsk == current, then use current_uid(), as that does not use
1045 * RCU. The irq tracer can be called out of RCU scope.
1046 */
1047 if (tsk == current)
1048 max_data->uid = current_uid();
1049 else
1050 max_data->uid = task_uid(tsk);
1051
8248ac05
SR
1052 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1053 max_data->policy = tsk->policy;
1054 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1055
1056 /* record this tasks comm */
1057 tracing_record_cmdline(tsk);
1058}
1059
4fcdae83
SR
1060/**
1061 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1062 * @tr: tracer
1063 * @tsk: the task with the latency
1064 * @cpu: The cpu that initiated the trace.
1065 *
1066 * Flip the buffers between the @tr and the max_tr and record information
1067 * about which task was the cause of this latency.
1068 */
e309b41d 1069void
bc0c38d1
SR
1070update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1071{
2721e72d 1072 struct ring_buffer *buf;
bc0c38d1 1073
2b6080f2 1074 if (tr->stop_count)
b8de7bd1
SR
1075 return;
1076
4c11d7ae 1077 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1078
45ad21ca 1079 if (!tr->allocated_snapshot) {
debdd57f 1080 /* Only the nop tracer should hit this when disabling */
2b6080f2 1081 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1082 return;
debdd57f 1083 }
34600f0e 1084
0b9b12c1 1085 arch_spin_lock(&tr->max_lock);
3928a8a2 1086
12883efb
SRRH
1087 buf = tr->trace_buffer.buffer;
1088 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1089 tr->max_buffer.buffer = buf;
3928a8a2 1090
bc0c38d1 1091 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1092 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1093}
1094
1095/**
1096 * update_max_tr_single - only copy one trace over, and reset the rest
1097 * @tr - tracer
1098 * @tsk - task with the latency
1099 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1100 *
1101 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1102 */
e309b41d 1103void
bc0c38d1
SR
1104update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1105{
3928a8a2 1106 int ret;
bc0c38d1 1107
2b6080f2 1108 if (tr->stop_count)
b8de7bd1
SR
1109 return;
1110
4c11d7ae 1111 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1112 if (!tr->allocated_snapshot) {
2930e04d 1113 /* Only the nop tracer should hit this when disabling */
9e8529af 1114 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1115 return;
2930e04d 1116 }
ef710e10 1117
0b9b12c1 1118 arch_spin_lock(&tr->max_lock);
bc0c38d1 1119
12883efb 1120 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1121
e8165dbb
SR
1122 if (ret == -EBUSY) {
1123 /*
1124 * We failed to swap the buffer due to a commit taking
1125 * place on this CPU. We fail to record, but we reset
1126 * the max trace buffer (no one writes directly to it)
1127 * and flag that it failed.
1128 */
12883efb 1129 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1130 "Failed to swap buffers due to commit in progress\n");
1131 }
1132
e8165dbb 1133 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1134
1135 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1136 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1137}
5d4a9dba 1138#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1139
e30f53aa 1140static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1141{
15693458
SRRH
1142 /* Iterators are static, they should be filled or empty */
1143 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1144 return 0;
0d5c6e1c 1145
e30f53aa
RV
1146 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1147 full);
0d5c6e1c
SR
1148}
1149
f4e781c0
SRRH
1150#ifdef CONFIG_FTRACE_STARTUP_TEST
1151static int run_tracer_selftest(struct tracer *type)
1152{
1153 struct trace_array *tr = &global_trace;
1154 struct tracer *saved_tracer = tr->current_trace;
1155 int ret;
0d5c6e1c 1156
f4e781c0
SRRH
1157 if (!type->selftest || tracing_selftest_disabled)
1158 return 0;
0d5c6e1c
SR
1159
1160 /*
f4e781c0
SRRH
1161 * Run a selftest on this tracer.
1162 * Here we reset the trace buffer, and set the current
1163 * tracer to be this tracer. The tracer can then run some
1164 * internal tracing to verify that everything is in order.
1165 * If we fail, we do not register this tracer.
0d5c6e1c 1166 */
f4e781c0 1167 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1168
f4e781c0
SRRH
1169 tr->current_trace = type;
1170
1171#ifdef CONFIG_TRACER_MAX_TRACE
1172 if (type->use_max_tr) {
1173 /* If we expanded the buffers, make sure the max is expanded too */
1174 if (ring_buffer_expanded)
1175 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1176 RING_BUFFER_ALL_CPUS);
1177 tr->allocated_snapshot = true;
1178 }
1179#endif
1180
1181 /* the test is responsible for initializing and enabling */
1182 pr_info("Testing tracer %s: ", type->name);
1183 ret = type->selftest(type, tr);
1184 /* the test is responsible for resetting too */
1185 tr->current_trace = saved_tracer;
1186 if (ret) {
1187 printk(KERN_CONT "FAILED!\n");
1188 /* Add the warning after printing 'FAILED' */
1189 WARN_ON(1);
1190 return -1;
1191 }
1192 /* Only reset on passing, to avoid touching corrupted buffers */
1193 tracing_reset_online_cpus(&tr->trace_buffer);
1194
1195#ifdef CONFIG_TRACER_MAX_TRACE
1196 if (type->use_max_tr) {
1197 tr->allocated_snapshot = false;
0d5c6e1c 1198
f4e781c0
SRRH
1199 /* Shrink the max buffer again */
1200 if (ring_buffer_expanded)
1201 ring_buffer_resize(tr->max_buffer.buffer, 1,
1202 RING_BUFFER_ALL_CPUS);
1203 }
1204#endif
1205
1206 printk(KERN_CONT "PASSED\n");
1207 return 0;
1208}
1209#else
1210static inline int run_tracer_selftest(struct tracer *type)
1211{
1212 return 0;
0d5c6e1c 1213}
f4e781c0 1214#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1215
41d9c0be
SRRH
1216static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1217
4fcdae83
SR
1218/**
1219 * register_tracer - register a tracer with the ftrace system.
1220 * @type - the plugin for the tracer
1221 *
1222 * Register a new plugin tracer.
1223 */
bc0c38d1
SR
1224int register_tracer(struct tracer *type)
1225{
1226 struct tracer *t;
bc0c38d1
SR
1227 int ret = 0;
1228
1229 if (!type->name) {
1230 pr_info("Tracer must have a name\n");
1231 return -1;
1232 }
1233
24a461d5 1234 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1235 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1236 return -1;
1237 }
1238
bc0c38d1 1239 mutex_lock(&trace_types_lock);
86fa2f60 1240
8e1b82e0
FW
1241 tracing_selftest_running = true;
1242
bc0c38d1
SR
1243 for (t = trace_types; t; t = t->next) {
1244 if (strcmp(type->name, t->name) == 0) {
1245 /* already found */
ee6c2c1b 1246 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1247 type->name);
1248 ret = -1;
1249 goto out;
1250 }
1251 }
1252
adf9f195
FW
1253 if (!type->set_flag)
1254 type->set_flag = &dummy_set_flag;
1255 if (!type->flags)
1256 type->flags = &dummy_tracer_flags;
1257 else
1258 if (!type->flags->opts)
1259 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1260
f4e781c0
SRRH
1261 ret = run_tracer_selftest(type);
1262 if (ret < 0)
1263 goto out;
60a11774 1264
bc0c38d1
SR
1265 type->next = trace_types;
1266 trace_types = type;
41d9c0be 1267 add_tracer_options(&global_trace, type);
60a11774 1268
bc0c38d1 1269 out:
8e1b82e0 1270 tracing_selftest_running = false;
bc0c38d1
SR
1271 mutex_unlock(&trace_types_lock);
1272
dac74940
SR
1273 if (ret || !default_bootup_tracer)
1274 goto out_unlock;
1275
ee6c2c1b 1276 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1277 goto out_unlock;
1278
1279 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1280 /* Do we want this tracer to start on bootup? */
607e2ea1 1281 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1282 default_bootup_tracer = NULL;
1283 /* disable other selftests, since this will break it. */
55034cd6 1284 tracing_selftest_disabled = true;
b2821ae6 1285#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1286 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1287 type->name);
b2821ae6 1288#endif
b2821ae6 1289
dac74940 1290 out_unlock:
bc0c38d1
SR
1291 return ret;
1292}
1293
12883efb 1294void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1295{
12883efb 1296 struct ring_buffer *buffer = buf->buffer;
f633903a 1297
a5416411
HT
1298 if (!buffer)
1299 return;
1300
f633903a
SR
1301 ring_buffer_record_disable(buffer);
1302
1303 /* Make sure all commits have finished */
1304 synchronize_sched();
68179686 1305 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1306
1307 ring_buffer_record_enable(buffer);
1308}
1309
12883efb 1310void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1311{
12883efb 1312 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1313 int cpu;
1314
a5416411
HT
1315 if (!buffer)
1316 return;
1317
621968cd
SR
1318 ring_buffer_record_disable(buffer);
1319
1320 /* Make sure all commits have finished */
1321 synchronize_sched();
1322
9457158b 1323 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1324
1325 for_each_online_cpu(cpu)
68179686 1326 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1327
1328 ring_buffer_record_enable(buffer);
213cc060
PE
1329}
1330
09d8091c 1331/* Must have trace_types_lock held */
873c642f 1332void tracing_reset_all_online_cpus(void)
9456f0fa 1333{
873c642f
SRRH
1334 struct trace_array *tr;
1335
873c642f 1336 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1337 tracing_reset_online_cpus(&tr->trace_buffer);
1338#ifdef CONFIG_TRACER_MAX_TRACE
1339 tracing_reset_online_cpus(&tr->max_buffer);
1340#endif
873c642f 1341 }
9456f0fa
SR
1342}
1343
939c7a4f 1344#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1345#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1346static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1347struct saved_cmdlines_buffer {
1348 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1349 unsigned *map_cmdline_to_pid;
1350 unsigned cmdline_num;
1351 int cmdline_idx;
1352 char *saved_cmdlines;
1353};
1354static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1355
25b0b44a 1356/* temporary disable recording */
4fd27358 1357static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1358
939c7a4f
YY
1359static inline char *get_saved_cmdlines(int idx)
1360{
1361 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1362}
1363
1364static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1365{
939c7a4f
YY
1366 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1367}
1368
1369static int allocate_cmdlines_buffer(unsigned int val,
1370 struct saved_cmdlines_buffer *s)
1371{
1372 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1373 GFP_KERNEL);
1374 if (!s->map_cmdline_to_pid)
1375 return -ENOMEM;
1376
1377 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1378 if (!s->saved_cmdlines) {
1379 kfree(s->map_cmdline_to_pid);
1380 return -ENOMEM;
1381 }
1382
1383 s->cmdline_idx = 0;
1384 s->cmdline_num = val;
1385 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1386 sizeof(s->map_pid_to_cmdline));
1387 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1388 val * sizeof(*s->map_cmdline_to_pid));
1389
1390 return 0;
1391}
1392
1393static int trace_create_savedcmd(void)
1394{
1395 int ret;
1396
a6af8fbf 1397 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1398 if (!savedcmd)
1399 return -ENOMEM;
1400
1401 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1402 if (ret < 0) {
1403 kfree(savedcmd);
1404 savedcmd = NULL;
1405 return -ENOMEM;
1406 }
1407
1408 return 0;
bc0c38d1
SR
1409}
1410
b5130b1e
CE
1411int is_tracing_stopped(void)
1412{
2b6080f2 1413 return global_trace.stop_count;
b5130b1e
CE
1414}
1415
0f048701
SR
1416/**
1417 * tracing_start - quick start of the tracer
1418 *
1419 * If tracing is enabled but was stopped by tracing_stop,
1420 * this will start the tracer back up.
1421 */
1422void tracing_start(void)
1423{
1424 struct ring_buffer *buffer;
1425 unsigned long flags;
1426
1427 if (tracing_disabled)
1428 return;
1429
2b6080f2
SR
1430 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1431 if (--global_trace.stop_count) {
1432 if (global_trace.stop_count < 0) {
b06a8301
SR
1433 /* Someone screwed up their debugging */
1434 WARN_ON_ONCE(1);
2b6080f2 1435 global_trace.stop_count = 0;
b06a8301 1436 }
0f048701
SR
1437 goto out;
1438 }
1439
a2f80714 1440 /* Prevent the buffers from switching */
0b9b12c1 1441 arch_spin_lock(&global_trace.max_lock);
0f048701 1442
12883efb 1443 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1444 if (buffer)
1445 ring_buffer_record_enable(buffer);
1446
12883efb
SRRH
1447#ifdef CONFIG_TRACER_MAX_TRACE
1448 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1449 if (buffer)
1450 ring_buffer_record_enable(buffer);
12883efb 1451#endif
0f048701 1452
0b9b12c1 1453 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1454
0f048701 1455 out:
2b6080f2
SR
1456 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1457}
1458
1459static void tracing_start_tr(struct trace_array *tr)
1460{
1461 struct ring_buffer *buffer;
1462 unsigned long flags;
1463
1464 if (tracing_disabled)
1465 return;
1466
1467 /* If global, we need to also start the max tracer */
1468 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1469 return tracing_start();
1470
1471 raw_spin_lock_irqsave(&tr->start_lock, flags);
1472
1473 if (--tr->stop_count) {
1474 if (tr->stop_count < 0) {
1475 /* Someone screwed up their debugging */
1476 WARN_ON_ONCE(1);
1477 tr->stop_count = 0;
1478 }
1479 goto out;
1480 }
1481
12883efb 1482 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1483 if (buffer)
1484 ring_buffer_record_enable(buffer);
1485
1486 out:
1487 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1488}
1489
1490/**
1491 * tracing_stop - quick stop of the tracer
1492 *
1493 * Light weight way to stop tracing. Use in conjunction with
1494 * tracing_start.
1495 */
1496void tracing_stop(void)
1497{
1498 struct ring_buffer *buffer;
1499 unsigned long flags;
1500
2b6080f2
SR
1501 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1502 if (global_trace.stop_count++)
0f048701
SR
1503 goto out;
1504
a2f80714 1505 /* Prevent the buffers from switching */
0b9b12c1 1506 arch_spin_lock(&global_trace.max_lock);
a2f80714 1507
12883efb 1508 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1509 if (buffer)
1510 ring_buffer_record_disable(buffer);
1511
12883efb
SRRH
1512#ifdef CONFIG_TRACER_MAX_TRACE
1513 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1514 if (buffer)
1515 ring_buffer_record_disable(buffer);
12883efb 1516#endif
0f048701 1517
0b9b12c1 1518 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1519
0f048701 1520 out:
2b6080f2
SR
1521 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1522}
1523
1524static void tracing_stop_tr(struct trace_array *tr)
1525{
1526 struct ring_buffer *buffer;
1527 unsigned long flags;
1528
1529 /* If global, we need to also stop the max tracer */
1530 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1531 return tracing_stop();
1532
1533 raw_spin_lock_irqsave(&tr->start_lock, flags);
1534 if (tr->stop_count++)
1535 goto out;
1536
12883efb 1537 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1538 if (buffer)
1539 ring_buffer_record_disable(buffer);
1540
1541 out:
1542 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1543}
1544
e309b41d 1545void trace_stop_cmdline_recording(void);
bc0c38d1 1546
379cfdac 1547static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1548{
a635cf04 1549 unsigned pid, idx;
bc0c38d1
SR
1550
1551 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1552 return 0;
bc0c38d1
SR
1553
1554 /*
1555 * It's not the end of the world if we don't get
1556 * the lock, but we also don't want to spin
1557 * nor do we want to disable interrupts,
1558 * so if we miss here, then better luck next time.
1559 */
0199c4e6 1560 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1561 return 0;
bc0c38d1 1562
939c7a4f 1563 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1564 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1565 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1566
a635cf04
CE
1567 /*
1568 * Check whether the cmdline buffer at idx has a pid
1569 * mapped. We are going to overwrite that entry so we
1570 * need to clear the map_pid_to_cmdline. Otherwise we
1571 * would read the new comm for the old pid.
1572 */
939c7a4f 1573 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1574 if (pid != NO_CMDLINE_MAP)
939c7a4f 1575 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1576
939c7a4f
YY
1577 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1578 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1579
939c7a4f 1580 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1581 }
1582
939c7a4f 1583 set_cmdline(idx, tsk->comm);
bc0c38d1 1584
0199c4e6 1585 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1586
1587 return 1;
bc0c38d1
SR
1588}
1589
4c27e756 1590static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1591{
bc0c38d1
SR
1592 unsigned map;
1593
4ca53085
SR
1594 if (!pid) {
1595 strcpy(comm, "<idle>");
1596 return;
1597 }
bc0c38d1 1598
74bf4076
SR
1599 if (WARN_ON_ONCE(pid < 0)) {
1600 strcpy(comm, "<XXX>");
1601 return;
1602 }
1603
4ca53085
SR
1604 if (pid > PID_MAX_DEFAULT) {
1605 strcpy(comm, "<...>");
1606 return;
1607 }
bc0c38d1 1608
939c7a4f 1609 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1610 if (map != NO_CMDLINE_MAP)
939c7a4f 1611 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1612 else
1613 strcpy(comm, "<...>");
4c27e756
SRRH
1614}
1615
1616void trace_find_cmdline(int pid, char comm[])
1617{
1618 preempt_disable();
1619 arch_spin_lock(&trace_cmdline_lock);
1620
1621 __trace_find_cmdline(pid, comm);
bc0c38d1 1622
0199c4e6 1623 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1624 preempt_enable();
bc0c38d1
SR
1625}
1626
e309b41d 1627void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1628{
0fb9656d 1629 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1630 return;
1631
7ffbd48d
SR
1632 if (!__this_cpu_read(trace_cmdline_save))
1633 return;
1634
379cfdac
SRRH
1635 if (trace_save_cmdline(tsk))
1636 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1637}
1638
45dcd8b8 1639void
38697053
SR
1640tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1641 int pc)
bc0c38d1
SR
1642{
1643 struct task_struct *tsk = current;
bc0c38d1 1644
777e208d
SR
1645 entry->preempt_count = pc & 0xff;
1646 entry->pid = (tsk) ? tsk->pid : 0;
1647 entry->flags =
9244489a 1648#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1649 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1650#else
1651 TRACE_FLAG_IRQS_NOSUPPORT |
1652#endif
bc0c38d1
SR
1653 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1654 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1655 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1656 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1657}
f413cdb8 1658EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1659
e77405ad
SR
1660struct ring_buffer_event *
1661trace_buffer_lock_reserve(struct ring_buffer *buffer,
1662 int type,
1663 unsigned long len,
1664 unsigned long flags, int pc)
51a763dd
ACM
1665{
1666 struct ring_buffer_event *event;
1667
e77405ad 1668 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1669 if (event != NULL) {
1670 struct trace_entry *ent = ring_buffer_event_data(event);
1671
1672 tracing_generic_entry_update(ent, flags, pc);
1673 ent->type = type;
1674 }
1675
1676 return event;
1677}
51a763dd 1678
7ffbd48d
SR
1679void
1680__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1681{
1682 __this_cpu_write(trace_cmdline_save, true);
1683 ring_buffer_unlock_commit(buffer, event);
1684}
1685
b7f0c959
SRRH
1686void trace_buffer_unlock_commit(struct trace_array *tr,
1687 struct ring_buffer *buffer,
1688 struct ring_buffer_event *event,
1689 unsigned long flags, int pc)
51a763dd 1690{
7ffbd48d 1691 __buffer_unlock_commit(buffer, event);
51a763dd 1692
73dddbb5 1693 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
e77405ad 1694 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1695}
0d5c6e1c 1696EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1697
2c4a33ab
SRRH
1698static struct ring_buffer *temp_buffer;
1699
ccb469a1
SR
1700struct ring_buffer_event *
1701trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1702 struct trace_event_file *trace_file,
ccb469a1
SR
1703 int type, unsigned long len,
1704 unsigned long flags, int pc)
1705{
2c4a33ab
SRRH
1706 struct ring_buffer_event *entry;
1707
7f1d2f82 1708 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1709 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1710 type, len, flags, pc);
2c4a33ab
SRRH
1711 /*
1712 * If tracing is off, but we have triggers enabled
1713 * we still need to look at the event data. Use the temp_buffer
1714 * to store the trace event for the tigger to use. It's recusive
1715 * safe and will not be recorded anywhere.
1716 */
5d6ad960 1717 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1718 *current_rb = temp_buffer;
1719 entry = trace_buffer_lock_reserve(*current_rb,
1720 type, len, flags, pc);
1721 }
1722 return entry;
ccb469a1
SR
1723}
1724EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1725
ef5580d0 1726struct ring_buffer_event *
e77405ad
SR
1727trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1728 int type, unsigned long len,
ef5580d0
SR
1729 unsigned long flags, int pc)
1730{
12883efb 1731 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1732 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1733 type, len, flags, pc);
1734}
94487d6d 1735EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1736
b7f0c959
SRRH
1737void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1738 struct ring_buffer *buffer,
0d5c6e1c
SR
1739 struct ring_buffer_event *event,
1740 unsigned long flags, int pc,
1741 struct pt_regs *regs)
1fd8df2c 1742{
7ffbd48d 1743 __buffer_unlock_commit(buffer, event);
1fd8df2c 1744
73dddbb5 1745 ftrace_trace_stack(buffer, flags, 6, pc, regs);
1fd8df2c
MH
1746 ftrace_trace_userstack(buffer, flags, pc);
1747}
0d5c6e1c 1748EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1749
e77405ad
SR
1750void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1751 struct ring_buffer_event *event)
77d9f465 1752{
e77405ad 1753 ring_buffer_discard_commit(buffer, event);
ef5580d0 1754}
12acd473 1755EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1756
e309b41d 1757void
7be42151 1758trace_function(struct trace_array *tr,
38697053
SR
1759 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1760 int pc)
bc0c38d1 1761{
2425bcb9 1762 struct trace_event_call *call = &event_function;
12883efb 1763 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1764 struct ring_buffer_event *event;
777e208d 1765 struct ftrace_entry *entry;
bc0c38d1 1766
d769041f 1767 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1768 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1769 return;
1770
e77405ad 1771 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1772 flags, pc);
3928a8a2
SR
1773 if (!event)
1774 return;
1775 entry = ring_buffer_event_data(event);
777e208d
SR
1776 entry->ip = ip;
1777 entry->parent_ip = parent_ip;
e1112b4d 1778
f306cc82 1779 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1780 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1781}
1782
c0a0d0d3 1783#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1784
1785#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1786struct ftrace_stack {
1787 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1788};
1789
1790static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1791static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1792
e77405ad 1793static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1794 unsigned long flags,
1fd8df2c 1795 int skip, int pc, struct pt_regs *regs)
86387f7e 1796{
2425bcb9 1797 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1798 struct ring_buffer_event *event;
777e208d 1799 struct stack_entry *entry;
86387f7e 1800 struct stack_trace trace;
4a9bd3f1
SR
1801 int use_stack;
1802 int size = FTRACE_STACK_ENTRIES;
1803
1804 trace.nr_entries = 0;
1805 trace.skip = skip;
1806
1807 /*
1808 * Since events can happen in NMIs there's no safe way to
1809 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1810 * or NMI comes in, it will just have to use the default
1811 * FTRACE_STACK_SIZE.
1812 */
1813 preempt_disable_notrace();
1814
82146529 1815 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1816 /*
1817 * We don't need any atomic variables, just a barrier.
1818 * If an interrupt comes in, we don't care, because it would
1819 * have exited and put the counter back to what we want.
1820 * We just need a barrier to keep gcc from moving things
1821 * around.
1822 */
1823 barrier();
1824 if (use_stack == 1) {
bdffd893 1825 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1826 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1827
1828 if (regs)
1829 save_stack_trace_regs(regs, &trace);
1830 else
1831 save_stack_trace(&trace);
1832
1833 if (trace.nr_entries > size)
1834 size = trace.nr_entries;
1835 } else
1836 /* From now on, use_stack is a boolean */
1837 use_stack = 0;
1838
1839 size *= sizeof(unsigned long);
86387f7e 1840
e77405ad 1841 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1842 sizeof(*entry) + size, flags, pc);
3928a8a2 1843 if (!event)
4a9bd3f1
SR
1844 goto out;
1845 entry = ring_buffer_event_data(event);
86387f7e 1846
4a9bd3f1
SR
1847 memset(&entry->caller, 0, size);
1848
1849 if (use_stack)
1850 memcpy(&entry->caller, trace.entries,
1851 trace.nr_entries * sizeof(unsigned long));
1852 else {
1853 trace.max_entries = FTRACE_STACK_ENTRIES;
1854 trace.entries = entry->caller;
1855 if (regs)
1856 save_stack_trace_regs(regs, &trace);
1857 else
1858 save_stack_trace(&trace);
1859 }
1860
1861 entry->size = trace.nr_entries;
86387f7e 1862
f306cc82 1863 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1864 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1865
1866 out:
1867 /* Again, don't let gcc optimize things here */
1868 barrier();
82146529 1869 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1870 preempt_enable_notrace();
1871
f0a920d5
IM
1872}
1873
73dddbb5
SRRH
1874static inline void ftrace_trace_stack(struct ring_buffer *buffer,
1875 unsigned long flags,
1876 int skip, int pc, struct pt_regs *regs)
53614991
SR
1877{
1878 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1879 return;
1880
73dddbb5 1881 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1882}
1883
c0a0d0d3
FW
1884void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1885 int pc)
38697053 1886{
12883efb 1887 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1888}
1889
03889384
SR
1890/**
1891 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1892 * @skip: Number of functions to skip (helper handlers)
03889384 1893 */
c142be8e 1894void trace_dump_stack(int skip)
03889384
SR
1895{
1896 unsigned long flags;
1897
1898 if (tracing_disabled || tracing_selftest_running)
e36c5458 1899 return;
03889384
SR
1900
1901 local_save_flags(flags);
1902
c142be8e
SRRH
1903 /*
1904 * Skip 3 more, seems to get us at the caller of
1905 * this function.
1906 */
1907 skip += 3;
1908 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1909 flags, skip, preempt_count(), NULL);
03889384
SR
1910}
1911
91e86e56
SR
1912static DEFINE_PER_CPU(int, user_stack_count);
1913
e77405ad
SR
1914void
1915ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1916{
2425bcb9 1917 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1918 struct ring_buffer_event *event;
02b67518
TE
1919 struct userstack_entry *entry;
1920 struct stack_trace trace;
02b67518
TE
1921
1922 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1923 return;
1924
b6345879
SR
1925 /*
1926 * NMIs can not handle page faults, even with fix ups.
1927 * The save user stack can (and often does) fault.
1928 */
1929 if (unlikely(in_nmi()))
1930 return;
02b67518 1931
91e86e56
SR
1932 /*
1933 * prevent recursion, since the user stack tracing may
1934 * trigger other kernel events.
1935 */
1936 preempt_disable();
1937 if (__this_cpu_read(user_stack_count))
1938 goto out;
1939
1940 __this_cpu_inc(user_stack_count);
1941
e77405ad 1942 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1943 sizeof(*entry), flags, pc);
02b67518 1944 if (!event)
1dbd1951 1945 goto out_drop_count;
02b67518 1946 entry = ring_buffer_event_data(event);
02b67518 1947
48659d31 1948 entry->tgid = current->tgid;
02b67518
TE
1949 memset(&entry->caller, 0, sizeof(entry->caller));
1950
1951 trace.nr_entries = 0;
1952 trace.max_entries = FTRACE_STACK_ENTRIES;
1953 trace.skip = 0;
1954 trace.entries = entry->caller;
1955
1956 save_stack_trace_user(&trace);
f306cc82 1957 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1958 __buffer_unlock_commit(buffer, event);
91e86e56 1959
1dbd1951 1960 out_drop_count:
91e86e56 1961 __this_cpu_dec(user_stack_count);
91e86e56
SR
1962 out:
1963 preempt_enable();
02b67518
TE
1964}
1965
4fd27358
HE
1966#ifdef UNUSED
1967static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1968{
7be42151 1969 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1970}
4fd27358 1971#endif /* UNUSED */
02b67518 1972
c0a0d0d3
FW
1973#endif /* CONFIG_STACKTRACE */
1974
07d777fe
SR
1975/* created for use with alloc_percpu */
1976struct trace_buffer_struct {
1977 char buffer[TRACE_BUF_SIZE];
1978};
1979
1980static struct trace_buffer_struct *trace_percpu_buffer;
1981static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1982static struct trace_buffer_struct *trace_percpu_irq_buffer;
1983static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1984
1985/*
1986 * The buffer used is dependent on the context. There is a per cpu
1987 * buffer for normal context, softirq contex, hard irq context and
1988 * for NMI context. Thise allows for lockless recording.
1989 *
1990 * Note, if the buffers failed to be allocated, then this returns NULL
1991 */
1992static char *get_trace_buf(void)
1993{
1994 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1995
1996 /*
1997 * If we have allocated per cpu buffers, then we do not
1998 * need to do any locking.
1999 */
2000 if (in_nmi())
2001 percpu_buffer = trace_percpu_nmi_buffer;
2002 else if (in_irq())
2003 percpu_buffer = trace_percpu_irq_buffer;
2004 else if (in_softirq())
2005 percpu_buffer = trace_percpu_sirq_buffer;
2006 else
2007 percpu_buffer = trace_percpu_buffer;
2008
2009 if (!percpu_buffer)
2010 return NULL;
2011
d8a0349c 2012 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2013}
2014
2015static int alloc_percpu_trace_buffer(void)
2016{
2017 struct trace_buffer_struct *buffers;
2018 struct trace_buffer_struct *sirq_buffers;
2019 struct trace_buffer_struct *irq_buffers;
2020 struct trace_buffer_struct *nmi_buffers;
2021
2022 buffers = alloc_percpu(struct trace_buffer_struct);
2023 if (!buffers)
2024 goto err_warn;
2025
2026 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2027 if (!sirq_buffers)
2028 goto err_sirq;
2029
2030 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2031 if (!irq_buffers)
2032 goto err_irq;
2033
2034 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!nmi_buffers)
2036 goto err_nmi;
2037
2038 trace_percpu_buffer = buffers;
2039 trace_percpu_sirq_buffer = sirq_buffers;
2040 trace_percpu_irq_buffer = irq_buffers;
2041 trace_percpu_nmi_buffer = nmi_buffers;
2042
2043 return 0;
2044
2045 err_nmi:
2046 free_percpu(irq_buffers);
2047 err_irq:
2048 free_percpu(sirq_buffers);
2049 err_sirq:
2050 free_percpu(buffers);
2051 err_warn:
2052 WARN(1, "Could not allocate percpu trace_printk buffer");
2053 return -ENOMEM;
2054}
2055
81698831
SR
2056static int buffers_allocated;
2057
07d777fe
SR
2058void trace_printk_init_buffers(void)
2059{
07d777fe
SR
2060 if (buffers_allocated)
2061 return;
2062
2063 if (alloc_percpu_trace_buffer())
2064 return;
2065
2184db46
SR
2066 /* trace_printk() is for debug use only. Don't use it in production. */
2067
69a1c994
BP
2068 pr_warning("\n");
2069 pr_warning("**********************************************************\n");
2184db46
SR
2070 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2071 pr_warning("** **\n");
2072 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2073 pr_warning("** **\n");
2074 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2075 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2076 pr_warning("** **\n");
2077 pr_warning("** If you see this message and you are not debugging **\n");
2078 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2079 pr_warning("** **\n");
2080 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2081 pr_warning("**********************************************************\n");
07d777fe 2082
b382ede6
SR
2083 /* Expand the buffers to set size */
2084 tracing_update_buffers();
2085
07d777fe 2086 buffers_allocated = 1;
81698831
SR
2087
2088 /*
2089 * trace_printk_init_buffers() can be called by modules.
2090 * If that happens, then we need to start cmdline recording
2091 * directly here. If the global_trace.buffer is already
2092 * allocated here, then this was called by module code.
2093 */
12883efb 2094 if (global_trace.trace_buffer.buffer)
81698831
SR
2095 tracing_start_cmdline_record();
2096}
2097
2098void trace_printk_start_comm(void)
2099{
2100 /* Start tracing comms if trace printk is set */
2101 if (!buffers_allocated)
2102 return;
2103 tracing_start_cmdline_record();
2104}
2105
2106static void trace_printk_start_stop_comm(int enabled)
2107{
2108 if (!buffers_allocated)
2109 return;
2110
2111 if (enabled)
2112 tracing_start_cmdline_record();
2113 else
2114 tracing_stop_cmdline_record();
07d777fe
SR
2115}
2116
769b0441 2117/**
48ead020 2118 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2119 *
2120 */
40ce74f1 2121int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2122{
2425bcb9 2123 struct trace_event_call *call = &event_bprint;
769b0441 2124 struct ring_buffer_event *event;
e77405ad 2125 struct ring_buffer *buffer;
769b0441 2126 struct trace_array *tr = &global_trace;
48ead020 2127 struct bprint_entry *entry;
769b0441 2128 unsigned long flags;
07d777fe
SR
2129 char *tbuffer;
2130 int len = 0, size, pc;
769b0441
FW
2131
2132 if (unlikely(tracing_selftest_running || tracing_disabled))
2133 return 0;
2134
2135 /* Don't pollute graph traces with trace_vprintk internals */
2136 pause_graph_tracing();
2137
2138 pc = preempt_count();
5168ae50 2139 preempt_disable_notrace();
769b0441 2140
07d777fe
SR
2141 tbuffer = get_trace_buf();
2142 if (!tbuffer) {
2143 len = 0;
769b0441 2144 goto out;
07d777fe 2145 }
769b0441 2146
07d777fe 2147 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2148
07d777fe
SR
2149 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2150 goto out;
769b0441 2151
07d777fe 2152 local_save_flags(flags);
769b0441 2153 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2154 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2155 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2156 flags, pc);
769b0441 2157 if (!event)
07d777fe 2158 goto out;
769b0441
FW
2159 entry = ring_buffer_event_data(event);
2160 entry->ip = ip;
769b0441
FW
2161 entry->fmt = fmt;
2162
07d777fe 2163 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2164 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2165 __buffer_unlock_commit(buffer, event);
73dddbb5 2166 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
d931369b 2167 }
769b0441 2168
769b0441 2169out:
5168ae50 2170 preempt_enable_notrace();
769b0441
FW
2171 unpause_graph_tracing();
2172
2173 return len;
2174}
48ead020
FW
2175EXPORT_SYMBOL_GPL(trace_vbprintk);
2176
12883efb
SRRH
2177static int
2178__trace_array_vprintk(struct ring_buffer *buffer,
2179 unsigned long ip, const char *fmt, va_list args)
48ead020 2180{
2425bcb9 2181 struct trace_event_call *call = &event_print;
48ead020 2182 struct ring_buffer_event *event;
07d777fe 2183 int len = 0, size, pc;
48ead020 2184 struct print_entry *entry;
07d777fe
SR
2185 unsigned long flags;
2186 char *tbuffer;
48ead020
FW
2187
2188 if (tracing_disabled || tracing_selftest_running)
2189 return 0;
2190
07d777fe
SR
2191 /* Don't pollute graph traces with trace_vprintk internals */
2192 pause_graph_tracing();
2193
48ead020
FW
2194 pc = preempt_count();
2195 preempt_disable_notrace();
48ead020 2196
07d777fe
SR
2197
2198 tbuffer = get_trace_buf();
2199 if (!tbuffer) {
2200 len = 0;
48ead020 2201 goto out;
07d777fe 2202 }
48ead020 2203
3558a5ac 2204 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2205
07d777fe 2206 local_save_flags(flags);
48ead020 2207 size = sizeof(*entry) + len + 1;
e77405ad 2208 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2209 flags, pc);
48ead020 2210 if (!event)
07d777fe 2211 goto out;
48ead020 2212 entry = ring_buffer_event_data(event);
c13d2f7c 2213 entry->ip = ip;
48ead020 2214
3558a5ac 2215 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2216 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2217 __buffer_unlock_commit(buffer, event);
73dddbb5 2218 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
d931369b 2219 }
48ead020
FW
2220 out:
2221 preempt_enable_notrace();
07d777fe 2222 unpause_graph_tracing();
48ead020
FW
2223
2224 return len;
2225}
659372d3 2226
12883efb
SRRH
2227int trace_array_vprintk(struct trace_array *tr,
2228 unsigned long ip, const char *fmt, va_list args)
2229{
2230 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2231}
2232
2233int trace_array_printk(struct trace_array *tr,
2234 unsigned long ip, const char *fmt, ...)
2235{
2236 int ret;
2237 va_list ap;
2238
2239 if (!(trace_flags & TRACE_ITER_PRINTK))
2240 return 0;
2241
2242 va_start(ap, fmt);
2243 ret = trace_array_vprintk(tr, ip, fmt, ap);
2244 va_end(ap);
2245 return ret;
2246}
2247
2248int trace_array_printk_buf(struct ring_buffer *buffer,
2249 unsigned long ip, const char *fmt, ...)
2250{
2251 int ret;
2252 va_list ap;
2253
2254 if (!(trace_flags & TRACE_ITER_PRINTK))
2255 return 0;
2256
2257 va_start(ap, fmt);
2258 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2259 va_end(ap);
2260 return ret;
2261}
2262
659372d3
SR
2263int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2264{
a813a159 2265 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2266}
769b0441
FW
2267EXPORT_SYMBOL_GPL(trace_vprintk);
2268
e2ac8ef5 2269static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2270{
6d158a81
SR
2271 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2272
5a90f577 2273 iter->idx++;
6d158a81
SR
2274 if (buf_iter)
2275 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2276}
2277
e309b41d 2278static struct trace_entry *
bc21b478
SR
2279peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2280 unsigned long *lost_events)
dd0e545f 2281{
3928a8a2 2282 struct ring_buffer_event *event;
6d158a81 2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2284
d769041f
SR
2285 if (buf_iter)
2286 event = ring_buffer_iter_peek(buf_iter, ts);
2287 else
12883efb 2288 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2289 lost_events);
d769041f 2290
4a9bd3f1
SR
2291 if (event) {
2292 iter->ent_size = ring_buffer_event_length(event);
2293 return ring_buffer_event_data(event);
2294 }
2295 iter->ent_size = 0;
2296 return NULL;
dd0e545f 2297}
d769041f 2298
dd0e545f 2299static struct trace_entry *
bc21b478
SR
2300__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2301 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2302{
12883efb 2303 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2304 struct trace_entry *ent, *next = NULL;
aa27497c 2305 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2306 int cpu_file = iter->cpu_file;
3928a8a2 2307 u64 next_ts = 0, ts;
bc0c38d1 2308 int next_cpu = -1;
12b5da34 2309 int next_size = 0;
bc0c38d1
SR
2310 int cpu;
2311
b04cc6b1
FW
2312 /*
2313 * If we are in a per_cpu trace file, don't bother by iterating over
2314 * all cpu and peek directly.
2315 */
ae3b5093 2316 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2317 if (ring_buffer_empty_cpu(buffer, cpu_file))
2318 return NULL;
bc21b478 2319 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2320 if (ent_cpu)
2321 *ent_cpu = cpu_file;
2322
2323 return ent;
2324 }
2325
ab46428c 2326 for_each_tracing_cpu(cpu) {
dd0e545f 2327
3928a8a2
SR
2328 if (ring_buffer_empty_cpu(buffer, cpu))
2329 continue;
dd0e545f 2330
bc21b478 2331 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2332
cdd31cd2
IM
2333 /*
2334 * Pick the entry with the smallest timestamp:
2335 */
3928a8a2 2336 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2337 next = ent;
2338 next_cpu = cpu;
3928a8a2 2339 next_ts = ts;
bc21b478 2340 next_lost = lost_events;
12b5da34 2341 next_size = iter->ent_size;
bc0c38d1
SR
2342 }
2343 }
2344
12b5da34
SR
2345 iter->ent_size = next_size;
2346
bc0c38d1
SR
2347 if (ent_cpu)
2348 *ent_cpu = next_cpu;
2349
3928a8a2
SR
2350 if (ent_ts)
2351 *ent_ts = next_ts;
2352
bc21b478
SR
2353 if (missing_events)
2354 *missing_events = next_lost;
2355
bc0c38d1
SR
2356 return next;
2357}
2358
dd0e545f 2359/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2360struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2361 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2362{
bc21b478 2363 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2364}
2365
2366/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2367void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2368{
bc21b478
SR
2369 iter->ent = __find_next_entry(iter, &iter->cpu,
2370 &iter->lost_events, &iter->ts);
dd0e545f 2371
3928a8a2 2372 if (iter->ent)
e2ac8ef5 2373 trace_iterator_increment(iter);
dd0e545f 2374
3928a8a2 2375 return iter->ent ? iter : NULL;
b3806b43 2376}
bc0c38d1 2377
e309b41d 2378static void trace_consume(struct trace_iterator *iter)
b3806b43 2379{
12883efb 2380 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2381 &iter->lost_events);
bc0c38d1
SR
2382}
2383
e309b41d 2384static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2385{
2386 struct trace_iterator *iter = m->private;
bc0c38d1 2387 int i = (int)*pos;
4e3c3333 2388 void *ent;
bc0c38d1 2389
a63ce5b3
SR
2390 WARN_ON_ONCE(iter->leftover);
2391
bc0c38d1
SR
2392 (*pos)++;
2393
2394 /* can't go backwards */
2395 if (iter->idx > i)
2396 return NULL;
2397
2398 if (iter->idx < 0)
955b61e5 2399 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2400 else
2401 ent = iter;
2402
2403 while (ent && iter->idx < i)
955b61e5 2404 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2405
2406 iter->pos = *pos;
2407
bc0c38d1
SR
2408 return ent;
2409}
2410
955b61e5 2411void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2412{
2f26ebd5
SR
2413 struct ring_buffer_event *event;
2414 struct ring_buffer_iter *buf_iter;
2415 unsigned long entries = 0;
2416 u64 ts;
2417
12883efb 2418 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2419
6d158a81
SR
2420 buf_iter = trace_buffer_iter(iter, cpu);
2421 if (!buf_iter)
2f26ebd5
SR
2422 return;
2423
2f26ebd5
SR
2424 ring_buffer_iter_reset(buf_iter);
2425
2426 /*
2427 * We could have the case with the max latency tracers
2428 * that a reset never took place on a cpu. This is evident
2429 * by the timestamp being before the start of the buffer.
2430 */
2431 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2432 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2433 break;
2434 entries++;
2435 ring_buffer_read(buf_iter, NULL);
2436 }
2437
12883efb 2438 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2439}
2440
d7350c3f 2441/*
d7350c3f
FW
2442 * The current tracer is copied to avoid a global locking
2443 * all around.
2444 */
bc0c38d1
SR
2445static void *s_start(struct seq_file *m, loff_t *pos)
2446{
2447 struct trace_iterator *iter = m->private;
2b6080f2 2448 struct trace_array *tr = iter->tr;
b04cc6b1 2449 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2450 void *p = NULL;
2451 loff_t l = 0;
3928a8a2 2452 int cpu;
bc0c38d1 2453
2fd196ec
HT
2454 /*
2455 * copy the tracer to avoid using a global lock all around.
2456 * iter->trace is a copy of current_trace, the pointer to the
2457 * name may be used instead of a strcmp(), as iter->trace->name
2458 * will point to the same string as current_trace->name.
2459 */
bc0c38d1 2460 mutex_lock(&trace_types_lock);
2b6080f2
SR
2461 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2462 *iter->trace = *tr->current_trace;
d7350c3f 2463 mutex_unlock(&trace_types_lock);
bc0c38d1 2464
12883efb 2465#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2466 if (iter->snapshot && iter->trace->use_max_tr)
2467 return ERR_PTR(-EBUSY);
12883efb 2468#endif
debdd57f
HT
2469
2470 if (!iter->snapshot)
2471 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2472
bc0c38d1
SR
2473 if (*pos != iter->pos) {
2474 iter->ent = NULL;
2475 iter->cpu = 0;
2476 iter->idx = -1;
2477
ae3b5093 2478 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2479 for_each_tracing_cpu(cpu)
2f26ebd5 2480 tracing_iter_reset(iter, cpu);
b04cc6b1 2481 } else
2f26ebd5 2482 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2483
ac91d854 2484 iter->leftover = 0;
bc0c38d1
SR
2485 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2486 ;
2487
2488 } else {
a63ce5b3
SR
2489 /*
2490 * If we overflowed the seq_file before, then we want
2491 * to just reuse the trace_seq buffer again.
2492 */
2493 if (iter->leftover)
2494 p = iter;
2495 else {
2496 l = *pos - 1;
2497 p = s_next(m, p, &l);
2498 }
bc0c38d1
SR
2499 }
2500
4f535968 2501 trace_event_read_lock();
7e53bd42 2502 trace_access_lock(cpu_file);
bc0c38d1
SR
2503 return p;
2504}
2505
2506static void s_stop(struct seq_file *m, void *p)
2507{
7e53bd42
LJ
2508 struct trace_iterator *iter = m->private;
2509
12883efb 2510#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2511 if (iter->snapshot && iter->trace->use_max_tr)
2512 return;
12883efb 2513#endif
debdd57f
HT
2514
2515 if (!iter->snapshot)
2516 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2517
7e53bd42 2518 trace_access_unlock(iter->cpu_file);
4f535968 2519 trace_event_read_unlock();
bc0c38d1
SR
2520}
2521
39eaf7ef 2522static void
12883efb
SRRH
2523get_total_entries(struct trace_buffer *buf,
2524 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2525{
2526 unsigned long count;
2527 int cpu;
2528
2529 *total = 0;
2530 *entries = 0;
2531
2532 for_each_tracing_cpu(cpu) {
12883efb 2533 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2534 /*
2535 * If this buffer has skipped entries, then we hold all
2536 * entries for the trace and we need to ignore the
2537 * ones before the time stamp.
2538 */
12883efb
SRRH
2539 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2540 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2541 /* total is the same as the entries */
2542 *total += count;
2543 } else
2544 *total += count +
12883efb 2545 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2546 *entries += count;
2547 }
2548}
2549
e309b41d 2550static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2551{
d79ac28f
RV
2552 seq_puts(m, "# _------=> CPU# \n"
2553 "# / _-----=> irqs-off \n"
2554 "# | / _----=> need-resched \n"
2555 "# || / _---=> hardirq/softirq \n"
2556 "# ||| / _--=> preempt-depth \n"
2557 "# |||| / delay \n"
2558 "# cmd pid ||||| time | caller \n"
2559 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2560}
2561
12883efb 2562static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2563{
39eaf7ef
SR
2564 unsigned long total;
2565 unsigned long entries;
2566
12883efb 2567 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2568 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2569 entries, total, num_online_cpus());
2570 seq_puts(m, "#\n");
2571}
2572
12883efb 2573static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2574{
12883efb 2575 print_event_info(buf, m);
d79ac28f
RV
2576 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2577 "# | | | | |\n");
bc0c38d1
SR
2578}
2579
12883efb 2580static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2581{
12883efb 2582 print_event_info(buf, m);
d79ac28f
RV
2583 seq_puts(m, "# _-----=> irqs-off\n"
2584 "# / _----=> need-resched\n"
2585 "# | / _---=> hardirq/softirq\n"
2586 "# || / _--=> preempt-depth\n"
2587 "# ||| / delay\n"
2588 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2589 "# | | | |||| | |\n");
77271ce4 2590}
bc0c38d1 2591
62b915f1 2592void
bc0c38d1
SR
2593print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2594{
2595 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2596 struct trace_buffer *buf = iter->trace_buffer;
2597 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2598 struct tracer *type = iter->trace;
39eaf7ef
SR
2599 unsigned long entries;
2600 unsigned long total;
bc0c38d1
SR
2601 const char *name = "preemption";
2602
d840f718 2603 name = type->name;
bc0c38d1 2604
12883efb 2605 get_total_entries(buf, &total, &entries);
bc0c38d1 2606
888b55dc 2607 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2608 name, UTS_RELEASE);
888b55dc 2609 seq_puts(m, "# -----------------------------------"
bc0c38d1 2610 "---------------------------------\n");
888b55dc 2611 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2612 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2613 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2614 entries,
4c11d7ae 2615 total,
12883efb 2616 buf->cpu,
bc0c38d1
SR
2617#if defined(CONFIG_PREEMPT_NONE)
2618 "server",
2619#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2620 "desktop",
b5c21b45 2621#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2622 "preempt",
2623#else
2624 "unknown",
2625#endif
2626 /* These are reserved for later use */
2627 0, 0, 0, 0);
2628#ifdef CONFIG_SMP
2629 seq_printf(m, " #P:%d)\n", num_online_cpus());
2630#else
2631 seq_puts(m, ")\n");
2632#endif
888b55dc
KM
2633 seq_puts(m, "# -----------------\n");
2634 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2635 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2636 data->comm, data->pid,
2637 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2638 data->policy, data->rt_priority);
888b55dc 2639 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2640
2641 if (data->critical_start) {
888b55dc 2642 seq_puts(m, "# => started at: ");
214023c3
SR
2643 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2644 trace_print_seq(m, &iter->seq);
888b55dc 2645 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2646 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2647 trace_print_seq(m, &iter->seq);
8248ac05 2648 seq_puts(m, "\n#\n");
bc0c38d1
SR
2649 }
2650
888b55dc 2651 seq_puts(m, "#\n");
bc0c38d1
SR
2652}
2653
a309720c
SR
2654static void test_cpu_buff_start(struct trace_iterator *iter)
2655{
2656 struct trace_seq *s = &iter->seq;
2657
12ef7d44
SR
2658 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2659 return;
2660
2661 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2662 return;
2663
4462344e 2664 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2665 return;
2666
12883efb 2667 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2668 return;
2669
4462344e 2670 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2671
2672 /* Don't print started cpu buffer for the first entry of the trace */
2673 if (iter->idx > 1)
2674 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2675 iter->cpu);
a309720c
SR
2676}
2677
2c4f035f 2678static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2679{
214023c3 2680 struct trace_seq *s = &iter->seq;
bc0c38d1 2681 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2682 struct trace_entry *entry;
f633cef0 2683 struct trace_event *event;
bc0c38d1 2684
4e3c3333 2685 entry = iter->ent;
dd0e545f 2686
a309720c
SR
2687 test_cpu_buff_start(iter);
2688
c4a8e8be 2689 event = ftrace_find_event(entry->type);
bc0c38d1 2690
c4a8e8be 2691 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2692 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2693 trace_print_lat_context(iter);
2694 else
2695 trace_print_context(iter);
c4a8e8be 2696 }
bc0c38d1 2697
19a7fe20
SRRH
2698 if (trace_seq_has_overflowed(s))
2699 return TRACE_TYPE_PARTIAL_LINE;
2700
268ccda0 2701 if (event)
a9a57763 2702 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2703
19a7fe20 2704 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2705
19a7fe20 2706 return trace_handle_return(s);
bc0c38d1
SR
2707}
2708
2c4f035f 2709static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2710{
2711 struct trace_seq *s = &iter->seq;
2712 struct trace_entry *entry;
f633cef0 2713 struct trace_event *event;
f9896bf3
IM
2714
2715 entry = iter->ent;
dd0e545f 2716
19a7fe20
SRRH
2717 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2718 trace_seq_printf(s, "%d %d %llu ",
2719 entry->pid, iter->cpu, iter->ts);
2720
2721 if (trace_seq_has_overflowed(s))
2722 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2723
f633cef0 2724 event = ftrace_find_event(entry->type);
268ccda0 2725 if (event)
a9a57763 2726 return event->funcs->raw(iter, 0, event);
d9793bd8 2727
19a7fe20 2728 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2729
19a7fe20 2730 return trace_handle_return(s);
f9896bf3
IM
2731}
2732
2c4f035f 2733static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2734{
2735 struct trace_seq *s = &iter->seq;
2736 unsigned char newline = '\n';
2737 struct trace_entry *entry;
f633cef0 2738 struct trace_event *event;
5e3ca0ec
IM
2739
2740 entry = iter->ent;
dd0e545f 2741
c4a8e8be 2742 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2743 SEQ_PUT_HEX_FIELD(s, entry->pid);
2744 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2745 SEQ_PUT_HEX_FIELD(s, iter->ts);
2746 if (trace_seq_has_overflowed(s))
2747 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2748 }
5e3ca0ec 2749
f633cef0 2750 event = ftrace_find_event(entry->type);
268ccda0 2751 if (event) {
a9a57763 2752 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2753 if (ret != TRACE_TYPE_HANDLED)
2754 return ret;
2755 }
7104f300 2756
19a7fe20 2757 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2758
19a7fe20 2759 return trace_handle_return(s);
5e3ca0ec
IM
2760}
2761
2c4f035f 2762static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2763{
2764 struct trace_seq *s = &iter->seq;
2765 struct trace_entry *entry;
f633cef0 2766 struct trace_event *event;
cb0f12aa
IM
2767
2768 entry = iter->ent;
dd0e545f 2769
c4a8e8be 2770 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2771 SEQ_PUT_FIELD(s, entry->pid);
2772 SEQ_PUT_FIELD(s, iter->cpu);
2773 SEQ_PUT_FIELD(s, iter->ts);
2774 if (trace_seq_has_overflowed(s))
2775 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2776 }
cb0f12aa 2777
f633cef0 2778 event = ftrace_find_event(entry->type);
a9a57763
SR
2779 return event ? event->funcs->binary(iter, 0, event) :
2780 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2781}
2782
62b915f1 2783int trace_empty(struct trace_iterator *iter)
bc0c38d1 2784{
6d158a81 2785 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2786 int cpu;
2787
9aba60fe 2788 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2789 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2790 cpu = iter->cpu_file;
6d158a81
SR
2791 buf_iter = trace_buffer_iter(iter, cpu);
2792 if (buf_iter) {
2793 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2794 return 0;
2795 } else {
12883efb 2796 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2797 return 0;
2798 }
2799 return 1;
2800 }
2801
ab46428c 2802 for_each_tracing_cpu(cpu) {
6d158a81
SR
2803 buf_iter = trace_buffer_iter(iter, cpu);
2804 if (buf_iter) {
2805 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2806 return 0;
2807 } else {
12883efb 2808 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2809 return 0;
2810 }
bc0c38d1 2811 }
d769041f 2812
797d3712 2813 return 1;
bc0c38d1
SR
2814}
2815
4f535968 2816/* Called with trace_event_read_lock() held. */
955b61e5 2817enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2818{
2c4f035f
FW
2819 enum print_line_t ret;
2820
19a7fe20
SRRH
2821 if (iter->lost_events) {
2822 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2823 iter->cpu, iter->lost_events);
2824 if (trace_seq_has_overflowed(&iter->seq))
2825 return TRACE_TYPE_PARTIAL_LINE;
2826 }
bc21b478 2827
2c4f035f
FW
2828 if (iter->trace && iter->trace->print_line) {
2829 ret = iter->trace->print_line(iter);
2830 if (ret != TRACE_TYPE_UNHANDLED)
2831 return ret;
2832 }
72829bc3 2833
09ae7234
SRRH
2834 if (iter->ent->type == TRACE_BPUTS &&
2835 trace_flags & TRACE_ITER_PRINTK &&
2836 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2837 return trace_print_bputs_msg_only(iter);
2838
48ead020
FW
2839 if (iter->ent->type == TRACE_BPRINT &&
2840 trace_flags & TRACE_ITER_PRINTK &&
2841 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2842 return trace_print_bprintk_msg_only(iter);
48ead020 2843
66896a85
FW
2844 if (iter->ent->type == TRACE_PRINT &&
2845 trace_flags & TRACE_ITER_PRINTK &&
2846 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2847 return trace_print_printk_msg_only(iter);
66896a85 2848
cb0f12aa
IM
2849 if (trace_flags & TRACE_ITER_BIN)
2850 return print_bin_fmt(iter);
2851
5e3ca0ec
IM
2852 if (trace_flags & TRACE_ITER_HEX)
2853 return print_hex_fmt(iter);
2854
f9896bf3
IM
2855 if (trace_flags & TRACE_ITER_RAW)
2856 return print_raw_fmt(iter);
2857
f9896bf3
IM
2858 return print_trace_fmt(iter);
2859}
2860
7e9a49ef
JO
2861void trace_latency_header(struct seq_file *m)
2862{
2863 struct trace_iterator *iter = m->private;
2864
2865 /* print nothing if the buffers are empty */
2866 if (trace_empty(iter))
2867 return;
2868
2869 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2870 print_trace_header(m, iter);
2871
2872 if (!(trace_flags & TRACE_ITER_VERBOSE))
2873 print_lat_help_header(m);
2874}
2875
62b915f1
JO
2876void trace_default_header(struct seq_file *m)
2877{
2878 struct trace_iterator *iter = m->private;
2879
f56e7f8e
JO
2880 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2881 return;
2882
62b915f1
JO
2883 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2884 /* print nothing if the buffers are empty */
2885 if (trace_empty(iter))
2886 return;
2887 print_trace_header(m, iter);
2888 if (!(trace_flags & TRACE_ITER_VERBOSE))
2889 print_lat_help_header(m);
2890 } else {
77271ce4
SR
2891 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2892 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2893 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2894 else
12883efb 2895 print_func_help_header(iter->trace_buffer, m);
77271ce4 2896 }
62b915f1
JO
2897 }
2898}
2899
e0a413f6
SR
2900static void test_ftrace_alive(struct seq_file *m)
2901{
2902 if (!ftrace_is_dead())
2903 return;
d79ac28f
RV
2904 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2905 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2906}
2907
d8741e2e 2908#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2909static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2910{
d79ac28f
RV
2911 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2912 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2913 "# Takes a snapshot of the main buffer.\n"
2914 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2915 "# (Doesn't have to be '2' works with any number that\n"
2916 "# is not a '0' or '1')\n");
d8741e2e 2917}
f1affcaa
SRRH
2918
2919static void show_snapshot_percpu_help(struct seq_file *m)
2920{
fa6f0cc7 2921 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2922#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2923 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2924 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2925#else
d79ac28f
RV
2926 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2927 "# Must use main snapshot file to allocate.\n");
f1affcaa 2928#endif
d79ac28f
RV
2929 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2930 "# (Doesn't have to be '2' works with any number that\n"
2931 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2932}
2933
d8741e2e
SRRH
2934static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2935{
45ad21ca 2936 if (iter->tr->allocated_snapshot)
fa6f0cc7 2937 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2938 else
fa6f0cc7 2939 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2940
fa6f0cc7 2941 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2942 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2943 show_snapshot_main_help(m);
2944 else
2945 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2946}
2947#else
2948/* Should never be called */
2949static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2950#endif
2951
bc0c38d1
SR
2952static int s_show(struct seq_file *m, void *v)
2953{
2954 struct trace_iterator *iter = v;
a63ce5b3 2955 int ret;
bc0c38d1
SR
2956
2957 if (iter->ent == NULL) {
2958 if (iter->tr) {
2959 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2960 seq_puts(m, "#\n");
e0a413f6 2961 test_ftrace_alive(m);
bc0c38d1 2962 }
d8741e2e
SRRH
2963 if (iter->snapshot && trace_empty(iter))
2964 print_snapshot_help(m, iter);
2965 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2966 iter->trace->print_header(m);
62b915f1
JO
2967 else
2968 trace_default_header(m);
2969
a63ce5b3
SR
2970 } else if (iter->leftover) {
2971 /*
2972 * If we filled the seq_file buffer earlier, we
2973 * want to just show it now.
2974 */
2975 ret = trace_print_seq(m, &iter->seq);
2976
2977 /* ret should this time be zero, but you never know */
2978 iter->leftover = ret;
2979
bc0c38d1 2980 } else {
f9896bf3 2981 print_trace_line(iter);
a63ce5b3
SR
2982 ret = trace_print_seq(m, &iter->seq);
2983 /*
2984 * If we overflow the seq_file buffer, then it will
2985 * ask us for this data again at start up.
2986 * Use that instead.
2987 * ret is 0 if seq_file write succeeded.
2988 * -1 otherwise.
2989 */
2990 iter->leftover = ret;
bc0c38d1
SR
2991 }
2992
2993 return 0;
2994}
2995
649e9c70
ON
2996/*
2997 * Should be used after trace_array_get(), trace_types_lock
2998 * ensures that i_cdev was already initialized.
2999 */
3000static inline int tracing_get_cpu(struct inode *inode)
3001{
3002 if (inode->i_cdev) /* See trace_create_cpu_file() */
3003 return (long)inode->i_cdev - 1;
3004 return RING_BUFFER_ALL_CPUS;
3005}
3006
88e9d34c 3007static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3008 .start = s_start,
3009 .next = s_next,
3010 .stop = s_stop,
3011 .show = s_show,
bc0c38d1
SR
3012};
3013
e309b41d 3014static struct trace_iterator *
6484c71c 3015__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3016{
6484c71c 3017 struct trace_array *tr = inode->i_private;
bc0c38d1 3018 struct trace_iterator *iter;
50e18b94 3019 int cpu;
bc0c38d1 3020
85a2f9b4
SR
3021 if (tracing_disabled)
3022 return ERR_PTR(-ENODEV);
60a11774 3023
50e18b94 3024 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3025 if (!iter)
3026 return ERR_PTR(-ENOMEM);
bc0c38d1 3027
72917235 3028 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3029 GFP_KERNEL);
93574fcc
DC
3030 if (!iter->buffer_iter)
3031 goto release;
3032
d7350c3f
FW
3033 /*
3034 * We make a copy of the current tracer to avoid concurrent
3035 * changes on it while we are reading.
3036 */
bc0c38d1 3037 mutex_lock(&trace_types_lock);
d7350c3f 3038 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3039 if (!iter->trace)
d7350c3f 3040 goto fail;
85a2f9b4 3041
2b6080f2 3042 *iter->trace = *tr->current_trace;
d7350c3f 3043
79f55997 3044 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3045 goto fail;
3046
12883efb
SRRH
3047 iter->tr = tr;
3048
3049#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3050 /* Currently only the top directory has a snapshot */
3051 if (tr->current_trace->print_max || snapshot)
12883efb 3052 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3053 else
12883efb
SRRH
3054#endif
3055 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3056 iter->snapshot = snapshot;
bc0c38d1 3057 iter->pos = -1;
6484c71c 3058 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3059 mutex_init(&iter->mutex);
bc0c38d1 3060
8bba1bf5
MM
3061 /* Notify the tracer early; before we stop tracing. */
3062 if (iter->trace && iter->trace->open)
a93751ca 3063 iter->trace->open(iter);
8bba1bf5 3064
12ef7d44 3065 /* Annotate start of buffers if we had overruns */
12883efb 3066 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3067 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3068
8be0709f 3069 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3070 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3071 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3072
debdd57f
HT
3073 /* stop the trace while dumping if we are not opening "snapshot" */
3074 if (!iter->snapshot)
2b6080f2 3075 tracing_stop_tr(tr);
2f26ebd5 3076
ae3b5093 3077 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3078 for_each_tracing_cpu(cpu) {
b04cc6b1 3079 iter->buffer_iter[cpu] =
12883efb 3080 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3081 }
3082 ring_buffer_read_prepare_sync();
3083 for_each_tracing_cpu(cpu) {
3084 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3085 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3086 }
3087 } else {
3088 cpu = iter->cpu_file;
3928a8a2 3089 iter->buffer_iter[cpu] =
12883efb 3090 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3091 ring_buffer_read_prepare_sync();
3092 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3093 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3094 }
3095
bc0c38d1
SR
3096 mutex_unlock(&trace_types_lock);
3097
bc0c38d1 3098 return iter;
3928a8a2 3099
d7350c3f 3100 fail:
3928a8a2 3101 mutex_unlock(&trace_types_lock);
d7350c3f 3102 kfree(iter->trace);
6d158a81 3103 kfree(iter->buffer_iter);
93574fcc 3104release:
50e18b94
JO
3105 seq_release_private(inode, file);
3106 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3107}
3108
3109int tracing_open_generic(struct inode *inode, struct file *filp)
3110{
60a11774
SR
3111 if (tracing_disabled)
3112 return -ENODEV;
3113
bc0c38d1
SR
3114 filp->private_data = inode->i_private;
3115 return 0;
3116}
3117
2e86421d
GB
3118bool tracing_is_disabled(void)
3119{
3120 return (tracing_disabled) ? true: false;
3121}
3122
7b85af63
SRRH
3123/*
3124 * Open and update trace_array ref count.
3125 * Must have the current trace_array passed to it.
3126 */
dcc30223 3127static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3128{
3129 struct trace_array *tr = inode->i_private;
3130
3131 if (tracing_disabled)
3132 return -ENODEV;
3133
3134 if (trace_array_get(tr) < 0)
3135 return -ENODEV;
3136
3137 filp->private_data = inode->i_private;
3138
3139 return 0;
7b85af63
SRRH
3140}
3141
4fd27358 3142static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3143{
6484c71c 3144 struct trace_array *tr = inode->i_private;
907f2784 3145 struct seq_file *m = file->private_data;
4acd4d00 3146 struct trace_iterator *iter;
3928a8a2 3147 int cpu;
bc0c38d1 3148
ff451961 3149 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3150 trace_array_put(tr);
4acd4d00 3151 return 0;
ff451961 3152 }
4acd4d00 3153
6484c71c 3154 /* Writes do not use seq_file */
4acd4d00 3155 iter = m->private;
bc0c38d1 3156 mutex_lock(&trace_types_lock);
a695cb58 3157
3928a8a2
SR
3158 for_each_tracing_cpu(cpu) {
3159 if (iter->buffer_iter[cpu])
3160 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3161 }
3162
bc0c38d1
SR
3163 if (iter->trace && iter->trace->close)
3164 iter->trace->close(iter);
3165
debdd57f
HT
3166 if (!iter->snapshot)
3167 /* reenable tracing if it was previously enabled */
2b6080f2 3168 tracing_start_tr(tr);
f77d09a3
AL
3169
3170 __trace_array_put(tr);
3171
bc0c38d1
SR
3172 mutex_unlock(&trace_types_lock);
3173
d7350c3f 3174 mutex_destroy(&iter->mutex);
b0dfa978 3175 free_cpumask_var(iter->started);
d7350c3f 3176 kfree(iter->trace);
6d158a81 3177 kfree(iter->buffer_iter);
50e18b94 3178 seq_release_private(inode, file);
ff451961 3179
bc0c38d1
SR
3180 return 0;
3181}
3182
7b85af63
SRRH
3183static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3184{
3185 struct trace_array *tr = inode->i_private;
3186
3187 trace_array_put(tr);
bc0c38d1
SR
3188 return 0;
3189}
3190
7b85af63
SRRH
3191static int tracing_single_release_tr(struct inode *inode, struct file *file)
3192{
3193 struct trace_array *tr = inode->i_private;
3194
3195 trace_array_put(tr);
3196
3197 return single_release(inode, file);
3198}
3199
bc0c38d1
SR
3200static int tracing_open(struct inode *inode, struct file *file)
3201{
6484c71c 3202 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3203 struct trace_iterator *iter;
3204 int ret = 0;
bc0c38d1 3205
ff451961
SRRH
3206 if (trace_array_get(tr) < 0)
3207 return -ENODEV;
3208
4acd4d00 3209 /* If this file was open for write, then erase contents */
6484c71c
ON
3210 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3211 int cpu = tracing_get_cpu(inode);
3212
3213 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3214 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3215 else
6484c71c 3216 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3217 }
bc0c38d1 3218
4acd4d00 3219 if (file->f_mode & FMODE_READ) {
6484c71c 3220 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3221 if (IS_ERR(iter))
3222 ret = PTR_ERR(iter);
3223 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3224 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3225 }
ff451961
SRRH
3226
3227 if (ret < 0)
3228 trace_array_put(tr);
3229
bc0c38d1
SR
3230 return ret;
3231}
3232
607e2ea1
SRRH
3233/*
3234 * Some tracers are not suitable for instance buffers.
3235 * A tracer is always available for the global array (toplevel)
3236 * or if it explicitly states that it is.
3237 */
3238static bool
3239trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3240{
3241 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3242}
3243
3244/* Find the next tracer that this trace array may use */
3245static struct tracer *
3246get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3247{
3248 while (t && !trace_ok_for_array(t, tr))
3249 t = t->next;
3250
3251 return t;
3252}
3253
e309b41d 3254static void *
bc0c38d1
SR
3255t_next(struct seq_file *m, void *v, loff_t *pos)
3256{
607e2ea1 3257 struct trace_array *tr = m->private;
f129e965 3258 struct tracer *t = v;
bc0c38d1
SR
3259
3260 (*pos)++;
3261
3262 if (t)
607e2ea1 3263 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3264
bc0c38d1
SR
3265 return t;
3266}
3267
3268static void *t_start(struct seq_file *m, loff_t *pos)
3269{
607e2ea1 3270 struct trace_array *tr = m->private;
f129e965 3271 struct tracer *t;
bc0c38d1
SR
3272 loff_t l = 0;
3273
3274 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3275
3276 t = get_tracer_for_array(tr, trace_types);
3277 for (; t && l < *pos; t = t_next(m, t, &l))
3278 ;
bc0c38d1
SR
3279
3280 return t;
3281}
3282
3283static void t_stop(struct seq_file *m, void *p)
3284{
3285 mutex_unlock(&trace_types_lock);
3286}
3287
3288static int t_show(struct seq_file *m, void *v)
3289{
3290 struct tracer *t = v;
3291
3292 if (!t)
3293 return 0;
3294
fa6f0cc7 3295 seq_puts(m, t->name);
bc0c38d1
SR
3296 if (t->next)
3297 seq_putc(m, ' ');
3298 else
3299 seq_putc(m, '\n');
3300
3301 return 0;
3302}
3303
88e9d34c 3304static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3305 .start = t_start,
3306 .next = t_next,
3307 .stop = t_stop,
3308 .show = t_show,
bc0c38d1
SR
3309};
3310
3311static int show_traces_open(struct inode *inode, struct file *file)
3312{
607e2ea1
SRRH
3313 struct trace_array *tr = inode->i_private;
3314 struct seq_file *m;
3315 int ret;
3316
60a11774
SR
3317 if (tracing_disabled)
3318 return -ENODEV;
3319
607e2ea1
SRRH
3320 ret = seq_open(file, &show_traces_seq_ops);
3321 if (ret)
3322 return ret;
3323
3324 m = file->private_data;
3325 m->private = tr;
3326
3327 return 0;
bc0c38d1
SR
3328}
3329
4acd4d00
SR
3330static ssize_t
3331tracing_write_stub(struct file *filp, const char __user *ubuf,
3332 size_t count, loff_t *ppos)
3333{
3334 return count;
3335}
3336
098c879e 3337loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3338{
098c879e
SRRH
3339 int ret;
3340
364829b1 3341 if (file->f_mode & FMODE_READ)
098c879e 3342 ret = seq_lseek(file, offset, whence);
364829b1 3343 else
098c879e
SRRH
3344 file->f_pos = ret = 0;
3345
3346 return ret;
364829b1
SP
3347}
3348
5e2336a0 3349static const struct file_operations tracing_fops = {
4bf39a94
IM
3350 .open = tracing_open,
3351 .read = seq_read,
4acd4d00 3352 .write = tracing_write_stub,
098c879e 3353 .llseek = tracing_lseek,
4bf39a94 3354 .release = tracing_release,
bc0c38d1
SR
3355};
3356
5e2336a0 3357static const struct file_operations show_traces_fops = {
c7078de1
IM
3358 .open = show_traces_open,
3359 .read = seq_read,
3360 .release = seq_release,
b444786f 3361 .llseek = seq_lseek,
c7078de1
IM
3362};
3363
36dfe925
IM
3364/*
3365 * The tracer itself will not take this lock, but still we want
3366 * to provide a consistent cpumask to user-space:
3367 */
3368static DEFINE_MUTEX(tracing_cpumask_update_lock);
3369
3370/*
3371 * Temporary storage for the character representation of the
3372 * CPU bitmask (and one more byte for the newline):
3373 */
3374static char mask_str[NR_CPUS + 1];
3375
c7078de1
IM
3376static ssize_t
3377tracing_cpumask_read(struct file *filp, char __user *ubuf,
3378 size_t count, loff_t *ppos)
3379{
ccfe9e42 3380 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3381 int len;
c7078de1
IM
3382
3383 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3384
1a40243b
TH
3385 len = snprintf(mask_str, count, "%*pb\n",
3386 cpumask_pr_args(tr->tracing_cpumask));
3387 if (len >= count) {
36dfe925
IM
3388 count = -EINVAL;
3389 goto out_err;
3390 }
36dfe925
IM
3391 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3392
3393out_err:
c7078de1
IM
3394 mutex_unlock(&tracing_cpumask_update_lock);
3395
3396 return count;
3397}
3398
3399static ssize_t
3400tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3401 size_t count, loff_t *ppos)
3402{
ccfe9e42 3403 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3404 cpumask_var_t tracing_cpumask_new;
2b6080f2 3405 int err, cpu;
9e01c1b7
RR
3406
3407 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3408 return -ENOMEM;
c7078de1 3409
9e01c1b7 3410 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3411 if (err)
36dfe925
IM
3412 goto err_unlock;
3413
215368e8
LZ
3414 mutex_lock(&tracing_cpumask_update_lock);
3415
a5e25883 3416 local_irq_disable();
0b9b12c1 3417 arch_spin_lock(&tr->max_lock);
ab46428c 3418 for_each_tracing_cpu(cpu) {
36dfe925
IM
3419 /*
3420 * Increase/decrease the disabled counter if we are
3421 * about to flip a bit in the cpumask:
3422 */
ccfe9e42 3423 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3424 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3425 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3426 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3427 }
ccfe9e42 3428 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3429 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3430 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3431 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3432 }
3433 }
0b9b12c1 3434 arch_spin_unlock(&tr->max_lock);
a5e25883 3435 local_irq_enable();
36dfe925 3436
ccfe9e42 3437 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3438
3439 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3440 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3441
3442 return count;
36dfe925
IM
3443
3444err_unlock:
215368e8 3445 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3446
3447 return err;
c7078de1
IM
3448}
3449
5e2336a0 3450static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3451 .open = tracing_open_generic_tr,
c7078de1
IM
3452 .read = tracing_cpumask_read,
3453 .write = tracing_cpumask_write,
ccfe9e42 3454 .release = tracing_release_generic_tr,
b444786f 3455 .llseek = generic_file_llseek,
bc0c38d1
SR
3456};
3457
fdb372ed 3458static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3459{
d8e83d26 3460 struct tracer_opt *trace_opts;
2b6080f2 3461 struct trace_array *tr = m->private;
d8e83d26 3462 u32 tracer_flags;
d8e83d26 3463 int i;
adf9f195 3464
d8e83d26 3465 mutex_lock(&trace_types_lock);
2b6080f2
SR
3466 tracer_flags = tr->current_trace->flags->val;
3467 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3468
bc0c38d1
SR
3469 for (i = 0; trace_options[i]; i++) {
3470 if (trace_flags & (1 << i))
fdb372ed 3471 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3472 else
fdb372ed 3473 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3474 }
3475
adf9f195
FW
3476 for (i = 0; trace_opts[i].name; i++) {
3477 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3478 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3479 else
fdb372ed 3480 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3481 }
d8e83d26 3482 mutex_unlock(&trace_types_lock);
adf9f195 3483
fdb372ed 3484 return 0;
bc0c38d1 3485}
bc0c38d1 3486
8c1a49ae 3487static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3488 struct tracer_flags *tracer_flags,
3489 struct tracer_opt *opts, int neg)
3490{
8c1a49ae 3491 struct tracer *trace = tr->current_trace;
8d18eaaf 3492 int ret;
bc0c38d1 3493
8c1a49ae 3494 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3495 if (ret)
3496 return ret;
3497
3498 if (neg)
3499 tracer_flags->val &= ~opts->bit;
3500 else
3501 tracer_flags->val |= opts->bit;
3502 return 0;
bc0c38d1
SR
3503}
3504
adf9f195 3505/* Try to assign a tracer specific option */
8c1a49ae 3506static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3507{
8c1a49ae 3508 struct tracer *trace = tr->current_trace;
7770841e 3509 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3510 struct tracer_opt *opts = NULL;
8d18eaaf 3511 int i;
adf9f195 3512
7770841e
Z
3513 for (i = 0; tracer_flags->opts[i].name; i++) {
3514 opts = &tracer_flags->opts[i];
adf9f195 3515
8d18eaaf 3516 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3517 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3518 }
adf9f195 3519
8d18eaaf 3520 return -EINVAL;
adf9f195
FW
3521}
3522
613f04a0
SRRH
3523/* Some tracers require overwrite to stay enabled */
3524int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3525{
3526 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3527 return -1;
3528
3529 return 0;
3530}
3531
2b6080f2 3532int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3533{
3534 /* do nothing if flag is already set */
3535 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3536 return 0;
3537
3538 /* Give the tracer a chance to approve the change */
2b6080f2 3539 if (tr->current_trace->flag_changed)
bf6065b5 3540 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3541 return -EINVAL;
af4617bd
SR
3542
3543 if (enabled)
3544 trace_flags |= mask;
3545 else
3546 trace_flags &= ~mask;
e870e9a1
LZ
3547
3548 if (mask == TRACE_ITER_RECORD_CMD)
3549 trace_event_enable_cmd_record(enabled);
750912fa 3550
80902822 3551 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3552 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3553#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3554 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3555#endif
3556 }
81698831 3557
b9f9108c 3558 if (mask == TRACE_ITER_PRINTK) {
81698831 3559 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
3560 trace_printk_control(enabled);
3561 }
613f04a0
SRRH
3562
3563 return 0;
af4617bd
SR
3564}
3565
2b6080f2 3566static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3567{
8d18eaaf 3568 char *cmp;
bc0c38d1 3569 int neg = 0;
613f04a0 3570 int ret = -ENODEV;
bc0c38d1
SR
3571 int i;
3572
7bcfaf54 3573 cmp = strstrip(option);
bc0c38d1 3574
8d18eaaf 3575 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3576 neg = 1;
3577 cmp += 2;
3578 }
3579
69d34da2
SRRH
3580 mutex_lock(&trace_types_lock);
3581
bc0c38d1 3582 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3583 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3584 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3585 break;
3586 }
3587 }
adf9f195
FW
3588
3589 /* If no option could be set, test the specific tracer options */
69d34da2 3590 if (!trace_options[i])
8c1a49ae 3591 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3592
3593 mutex_unlock(&trace_types_lock);
bc0c38d1 3594
7bcfaf54
SR
3595 return ret;
3596}
3597
3598static ssize_t
3599tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3600 size_t cnt, loff_t *ppos)
3601{
2b6080f2
SR
3602 struct seq_file *m = filp->private_data;
3603 struct trace_array *tr = m->private;
7bcfaf54 3604 char buf[64];
613f04a0 3605 int ret;
7bcfaf54
SR
3606
3607 if (cnt >= sizeof(buf))
3608 return -EINVAL;
3609
3610 if (copy_from_user(&buf, ubuf, cnt))
3611 return -EFAULT;
3612
a8dd2176
SR
3613 buf[cnt] = 0;
3614
2b6080f2 3615 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3616 if (ret < 0)
3617 return ret;
7bcfaf54 3618
cf8517cf 3619 *ppos += cnt;
bc0c38d1
SR
3620
3621 return cnt;
3622}
3623
fdb372ed
LZ
3624static int tracing_trace_options_open(struct inode *inode, struct file *file)
3625{
7b85af63 3626 struct trace_array *tr = inode->i_private;
f77d09a3 3627 int ret;
7b85af63 3628
fdb372ed
LZ
3629 if (tracing_disabled)
3630 return -ENODEV;
2b6080f2 3631
7b85af63
SRRH
3632 if (trace_array_get(tr) < 0)
3633 return -ENODEV;
3634
f77d09a3
AL
3635 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3636 if (ret < 0)
3637 trace_array_put(tr);
3638
3639 return ret;
fdb372ed
LZ
3640}
3641
5e2336a0 3642static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3643 .open = tracing_trace_options_open,
3644 .read = seq_read,
3645 .llseek = seq_lseek,
7b85af63 3646 .release = tracing_single_release_tr,
ee6bce52 3647 .write = tracing_trace_options_write,
bc0c38d1
SR
3648};
3649
7bd2f24c
IM
3650static const char readme_msg[] =
3651 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3652 "# echo 0 > tracing_on : quick way to disable tracing\n"
3653 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3654 " Important files:\n"
3655 " trace\t\t\t- The static contents of the buffer\n"
3656 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3657 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3658 " current_tracer\t- function and latency tracers\n"
3659 " available_tracers\t- list of configured tracers for current_tracer\n"
3660 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3661 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3662 " trace_clock\t\t-change the clock used to order events\n"
3663 " local: Per cpu clock but may not be synced across CPUs\n"
3664 " global: Synced across CPUs but slows tracing down.\n"
3665 " counter: Not a clock, but just an increment\n"
3666 " uptime: Jiffy counter from time of boot\n"
3667 " perf: Same clock that perf events use\n"
3668#ifdef CONFIG_X86_64
3669 " x86-tsc: TSC cycle counter\n"
3670#endif
3671 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3672 " tracing_cpumask\t- Limit which CPUs to trace\n"
3673 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3674 "\t\t\t Remove sub-buffer with rmdir\n"
3675 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3676 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3677 "\t\t\t option name\n"
939c7a4f 3678 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3679#ifdef CONFIG_DYNAMIC_FTRACE
3680 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3681 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3682 "\t\t\t functions\n"
3683 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3684 "\t modules: Can select a group via module\n"
3685 "\t Format: :mod:<module-name>\n"
3686 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3687 "\t triggers: a command to perform when function is hit\n"
3688 "\t Format: <function>:<trigger>[:count]\n"
3689 "\t trigger: traceon, traceoff\n"
3690 "\t\t enable_event:<system>:<event>\n"
3691 "\t\t disable_event:<system>:<event>\n"
22f45649 3692#ifdef CONFIG_STACKTRACE
71485c45 3693 "\t\t stacktrace\n"
22f45649
SRRH
3694#endif
3695#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3696 "\t\t snapshot\n"
22f45649 3697#endif
17a280ea
SRRH
3698 "\t\t dump\n"
3699 "\t\t cpudump\n"
71485c45
SRRH
3700 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3701 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3702 "\t The first one will disable tracing every time do_fault is hit\n"
3703 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3704 "\t The first time do trap is hit and it disables tracing, the\n"
3705 "\t counter will decrement to 2. If tracing is already disabled,\n"
3706 "\t the counter will not decrement. It only decrements when the\n"
3707 "\t trigger did work\n"
3708 "\t To remove trigger without count:\n"
3709 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3710 "\t To remove trigger with a count:\n"
3711 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3712 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3713 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3714 "\t modules: Can select a group via module command :mod:\n"
3715 "\t Does not accept triggers\n"
22f45649
SRRH
3716#endif /* CONFIG_DYNAMIC_FTRACE */
3717#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3718 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3719 "\t\t (function)\n"
22f45649
SRRH
3720#endif
3721#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3722 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3723 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3724 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3725#endif
3726#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3727 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3728 "\t\t\t snapshot buffer. Read the contents for more\n"
3729 "\t\t\t information\n"
22f45649 3730#endif
991821c8 3731#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3732 " stack_trace\t\t- Shows the max stack trace when active\n"
3733 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3734 "\t\t\t Write into this file to reset the max size (trigger a\n"
3735 "\t\t\t new trace)\n"
22f45649 3736#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3737 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3738 "\t\t\t traces\n"
22f45649 3739#endif
991821c8 3740#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3741 " events/\t\t- Directory containing all trace event subsystems:\n"
3742 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3743 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3744 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3745 "\t\t\t events\n"
26f25564 3746 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3747 " events/<system>/<event>/\t- Directory containing control files for\n"
3748 "\t\t\t <event>:\n"
26f25564
TZ
3749 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3750 " filter\t\t- If set, only events passing filter are traced\n"
3751 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3752 "\t Format: <trigger>[:count][if <filter>]\n"
3753 "\t trigger: traceon, traceoff\n"
3754 "\t enable_event:<system>:<event>\n"
3755 "\t disable_event:<system>:<event>\n"
26f25564 3756#ifdef CONFIG_STACKTRACE
71485c45 3757 "\t\t stacktrace\n"
26f25564
TZ
3758#endif
3759#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3760 "\t\t snapshot\n"
26f25564 3761#endif
71485c45
SRRH
3762 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3763 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3764 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3765 "\t events/block/block_unplug/trigger\n"
3766 "\t The first disables tracing every time block_unplug is hit.\n"
3767 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3768 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3769 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3770 "\t Like function triggers, the counter is only decremented if it\n"
3771 "\t enabled or disabled tracing.\n"
3772 "\t To remove a trigger without a count:\n"
3773 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3774 "\t To remove a trigger with a count:\n"
3775 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3776 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3777;
3778
3779static ssize_t
3780tracing_readme_read(struct file *filp, char __user *ubuf,
3781 size_t cnt, loff_t *ppos)
3782{
3783 return simple_read_from_buffer(ubuf, cnt, ppos,
3784 readme_msg, strlen(readme_msg));
3785}
3786
5e2336a0 3787static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3788 .open = tracing_open_generic,
3789 .read = tracing_readme_read,
b444786f 3790 .llseek = generic_file_llseek,
7bd2f24c
IM
3791};
3792
42584c81
YY
3793static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3794{
3795 unsigned int *ptr = v;
69abe6a5 3796
42584c81
YY
3797 if (*pos || m->count)
3798 ptr++;
69abe6a5 3799
42584c81 3800 (*pos)++;
69abe6a5 3801
939c7a4f
YY
3802 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3803 ptr++) {
42584c81
YY
3804 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3805 continue;
69abe6a5 3806
42584c81
YY
3807 return ptr;
3808 }
69abe6a5 3809
42584c81
YY
3810 return NULL;
3811}
3812
3813static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3814{
3815 void *v;
3816 loff_t l = 0;
69abe6a5 3817
4c27e756
SRRH
3818 preempt_disable();
3819 arch_spin_lock(&trace_cmdline_lock);
3820
939c7a4f 3821 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3822 while (l <= *pos) {
3823 v = saved_cmdlines_next(m, v, &l);
3824 if (!v)
3825 return NULL;
69abe6a5
AP
3826 }
3827
42584c81
YY
3828 return v;
3829}
3830
3831static void saved_cmdlines_stop(struct seq_file *m, void *v)
3832{
4c27e756
SRRH
3833 arch_spin_unlock(&trace_cmdline_lock);
3834 preempt_enable();
42584c81 3835}
69abe6a5 3836
42584c81
YY
3837static int saved_cmdlines_show(struct seq_file *m, void *v)
3838{
3839 char buf[TASK_COMM_LEN];
3840 unsigned int *pid = v;
69abe6a5 3841
4c27e756 3842 __trace_find_cmdline(*pid, buf);
42584c81
YY
3843 seq_printf(m, "%d %s\n", *pid, buf);
3844 return 0;
3845}
3846
3847static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3848 .start = saved_cmdlines_start,
3849 .next = saved_cmdlines_next,
3850 .stop = saved_cmdlines_stop,
3851 .show = saved_cmdlines_show,
3852};
3853
3854static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3855{
3856 if (tracing_disabled)
3857 return -ENODEV;
3858
3859 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3860}
3861
3862static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3863 .open = tracing_saved_cmdlines_open,
3864 .read = seq_read,
3865 .llseek = seq_lseek,
3866 .release = seq_release,
69abe6a5
AP
3867};
3868
939c7a4f
YY
3869static ssize_t
3870tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3871 size_t cnt, loff_t *ppos)
3872{
3873 char buf[64];
3874 int r;
3875
3876 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3877 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3878 arch_spin_unlock(&trace_cmdline_lock);
3879
3880 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3881}
3882
3883static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3884{
3885 kfree(s->saved_cmdlines);
3886 kfree(s->map_cmdline_to_pid);
3887 kfree(s);
3888}
3889
3890static int tracing_resize_saved_cmdlines(unsigned int val)
3891{
3892 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3893
a6af8fbf 3894 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3895 if (!s)
3896 return -ENOMEM;
3897
3898 if (allocate_cmdlines_buffer(val, s) < 0) {
3899 kfree(s);
3900 return -ENOMEM;
3901 }
3902
3903 arch_spin_lock(&trace_cmdline_lock);
3904 savedcmd_temp = savedcmd;
3905 savedcmd = s;
3906 arch_spin_unlock(&trace_cmdline_lock);
3907 free_saved_cmdlines_buffer(savedcmd_temp);
3908
3909 return 0;
3910}
3911
3912static ssize_t
3913tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3914 size_t cnt, loff_t *ppos)
3915{
3916 unsigned long val;
3917 int ret;
3918
3919 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3920 if (ret)
3921 return ret;
3922
3923 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3924 if (!val || val > PID_MAX_DEFAULT)
3925 return -EINVAL;
3926
3927 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3928 if (ret < 0)
3929 return ret;
3930
3931 *ppos += cnt;
3932
3933 return cnt;
3934}
3935
3936static const struct file_operations tracing_saved_cmdlines_size_fops = {
3937 .open = tracing_open_generic,
3938 .read = tracing_saved_cmdlines_size_read,
3939 .write = tracing_saved_cmdlines_size_write,
3940};
3941
9828413d
SRRH
3942#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3943static union trace_enum_map_item *
3944update_enum_map(union trace_enum_map_item *ptr)
3945{
3946 if (!ptr->map.enum_string) {
3947 if (ptr->tail.next) {
3948 ptr = ptr->tail.next;
3949 /* Set ptr to the next real item (skip head) */
3950 ptr++;
3951 } else
3952 return NULL;
3953 }
3954 return ptr;
3955}
3956
3957static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3958{
3959 union trace_enum_map_item *ptr = v;
3960
3961 /*
3962 * Paranoid! If ptr points to end, we don't want to increment past it.
3963 * This really should never happen.
3964 */
3965 ptr = update_enum_map(ptr);
3966 if (WARN_ON_ONCE(!ptr))
3967 return NULL;
3968
3969 ptr++;
3970
3971 (*pos)++;
3972
3973 ptr = update_enum_map(ptr);
3974
3975 return ptr;
3976}
3977
3978static void *enum_map_start(struct seq_file *m, loff_t *pos)
3979{
3980 union trace_enum_map_item *v;
3981 loff_t l = 0;
3982
3983 mutex_lock(&trace_enum_mutex);
3984
3985 v = trace_enum_maps;
3986 if (v)
3987 v++;
3988
3989 while (v && l < *pos) {
3990 v = enum_map_next(m, v, &l);
3991 }
3992
3993 return v;
3994}
3995
3996static void enum_map_stop(struct seq_file *m, void *v)
3997{
3998 mutex_unlock(&trace_enum_mutex);
3999}
4000
4001static int enum_map_show(struct seq_file *m, void *v)
4002{
4003 union trace_enum_map_item *ptr = v;
4004
4005 seq_printf(m, "%s %ld (%s)\n",
4006 ptr->map.enum_string, ptr->map.enum_value,
4007 ptr->map.system);
4008
4009 return 0;
4010}
4011
4012static const struct seq_operations tracing_enum_map_seq_ops = {
4013 .start = enum_map_start,
4014 .next = enum_map_next,
4015 .stop = enum_map_stop,
4016 .show = enum_map_show,
4017};
4018
4019static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4020{
4021 if (tracing_disabled)
4022 return -ENODEV;
4023
4024 return seq_open(filp, &tracing_enum_map_seq_ops);
4025}
4026
4027static const struct file_operations tracing_enum_map_fops = {
4028 .open = tracing_enum_map_open,
4029 .read = seq_read,
4030 .llseek = seq_lseek,
4031 .release = seq_release,
4032};
4033
4034static inline union trace_enum_map_item *
4035trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4036{
4037 /* Return tail of array given the head */
4038 return ptr + ptr->head.length + 1;
4039}
4040
4041static void
4042trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4043 int len)
4044{
4045 struct trace_enum_map **stop;
4046 struct trace_enum_map **map;
4047 union trace_enum_map_item *map_array;
4048 union trace_enum_map_item *ptr;
4049
4050 stop = start + len;
4051
4052 /*
4053 * The trace_enum_maps contains the map plus a head and tail item,
4054 * where the head holds the module and length of array, and the
4055 * tail holds a pointer to the next list.
4056 */
4057 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4058 if (!map_array) {
4059 pr_warning("Unable to allocate trace enum mapping\n");
4060 return;
4061 }
4062
4063 mutex_lock(&trace_enum_mutex);
4064
4065 if (!trace_enum_maps)
4066 trace_enum_maps = map_array;
4067 else {
4068 ptr = trace_enum_maps;
4069 for (;;) {
4070 ptr = trace_enum_jmp_to_tail(ptr);
4071 if (!ptr->tail.next)
4072 break;
4073 ptr = ptr->tail.next;
4074
4075 }
4076 ptr->tail.next = map_array;
4077 }
4078 map_array->head.mod = mod;
4079 map_array->head.length = len;
4080 map_array++;
4081
4082 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4083 map_array->map = **map;
4084 map_array++;
4085 }
4086 memset(map_array, 0, sizeof(*map_array));
4087
4088 mutex_unlock(&trace_enum_mutex);
4089}
4090
4091static void trace_create_enum_file(struct dentry *d_tracer)
4092{
4093 trace_create_file("enum_map", 0444, d_tracer,
4094 NULL, &tracing_enum_map_fops);
4095}
4096
4097#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4098static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4099static inline void trace_insert_enum_map_file(struct module *mod,
4100 struct trace_enum_map **start, int len) { }
4101#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4102
4103static void trace_insert_enum_map(struct module *mod,
4104 struct trace_enum_map **start, int len)
0c564a53
SRRH
4105{
4106 struct trace_enum_map **map;
0c564a53
SRRH
4107
4108 if (len <= 0)
4109 return;
4110
4111 map = start;
4112
4113 trace_event_enum_update(map, len);
9828413d
SRRH
4114
4115 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4116}
4117
bc0c38d1
SR
4118static ssize_t
4119tracing_set_trace_read(struct file *filp, char __user *ubuf,
4120 size_t cnt, loff_t *ppos)
4121{
2b6080f2 4122 struct trace_array *tr = filp->private_data;
ee6c2c1b 4123 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4124 int r;
4125
4126 mutex_lock(&trace_types_lock);
2b6080f2 4127 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4128 mutex_unlock(&trace_types_lock);
4129
4bf39a94 4130 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4131}
4132
b6f11df2
ACM
4133int tracer_init(struct tracer *t, struct trace_array *tr)
4134{
12883efb 4135 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4136 return t->init(tr);
4137}
4138
12883efb 4139static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4140{
4141 int cpu;
737223fb 4142
438ced17 4143 for_each_tracing_cpu(cpu)
12883efb 4144 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4145}
4146
12883efb 4147#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4148/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4149static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4150 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4151{
4152 int cpu, ret = 0;
4153
4154 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4155 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4156 ret = ring_buffer_resize(trace_buf->buffer,
4157 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4158 if (ret < 0)
4159 break;
12883efb
SRRH
4160 per_cpu_ptr(trace_buf->data, cpu)->entries =
4161 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4162 }
4163 } else {
12883efb
SRRH
4164 ret = ring_buffer_resize(trace_buf->buffer,
4165 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4166 if (ret == 0)
12883efb
SRRH
4167 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4168 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4169 }
4170
4171 return ret;
4172}
12883efb 4173#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4174
2b6080f2
SR
4175static int __tracing_resize_ring_buffer(struct trace_array *tr,
4176 unsigned long size, int cpu)
73c5162a
SR
4177{
4178 int ret;
4179
4180 /*
4181 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4182 * we use the size that was given, and we can forget about
4183 * expanding it later.
73c5162a 4184 */
55034cd6 4185 ring_buffer_expanded = true;
73c5162a 4186
b382ede6 4187 /* May be called before buffers are initialized */
12883efb 4188 if (!tr->trace_buffer.buffer)
b382ede6
SR
4189 return 0;
4190
12883efb 4191 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4192 if (ret < 0)
4193 return ret;
4194
12883efb 4195#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4196 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4197 !tr->current_trace->use_max_tr)
ef710e10
KM
4198 goto out;
4199
12883efb 4200 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4201 if (ret < 0) {
12883efb
SRRH
4202 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4203 &tr->trace_buffer, cpu);
73c5162a 4204 if (r < 0) {
a123c52b
SR
4205 /*
4206 * AARGH! We are left with different
4207 * size max buffer!!!!
4208 * The max buffer is our "snapshot" buffer.
4209 * When a tracer needs a snapshot (one of the
4210 * latency tracers), it swaps the max buffer
4211 * with the saved snap shot. We succeeded to
4212 * update the size of the main buffer, but failed to
4213 * update the size of the max buffer. But when we tried
4214 * to reset the main buffer to the original size, we
4215 * failed there too. This is very unlikely to
4216 * happen, but if it does, warn and kill all
4217 * tracing.
4218 */
73c5162a
SR
4219 WARN_ON(1);
4220 tracing_disabled = 1;
4221 }
4222 return ret;
4223 }
4224
438ced17 4225 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4226 set_buffer_entries(&tr->max_buffer, size);
438ced17 4227 else
12883efb 4228 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4229
ef710e10 4230 out:
12883efb
SRRH
4231#endif /* CONFIG_TRACER_MAX_TRACE */
4232
438ced17 4233 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4234 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4235 else
12883efb 4236 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4237
4238 return ret;
4239}
4240
2b6080f2
SR
4241static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4242 unsigned long size, int cpu_id)
4f271a2a 4243{
83f40318 4244 int ret = size;
4f271a2a
VN
4245
4246 mutex_lock(&trace_types_lock);
4247
438ced17
VN
4248 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4249 /* make sure, this cpu is enabled in the mask */
4250 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4251 ret = -EINVAL;
4252 goto out;
4253 }
4254 }
4f271a2a 4255
2b6080f2 4256 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4257 if (ret < 0)
4258 ret = -ENOMEM;
4259
438ced17 4260out:
4f271a2a
VN
4261 mutex_unlock(&trace_types_lock);
4262
4263 return ret;
4264}
4265
ef710e10 4266
1852fcce
SR
4267/**
4268 * tracing_update_buffers - used by tracing facility to expand ring buffers
4269 *
4270 * To save on memory when the tracing is never used on a system with it
4271 * configured in. The ring buffers are set to a minimum size. But once
4272 * a user starts to use the tracing facility, then they need to grow
4273 * to their default size.
4274 *
4275 * This function is to be called when a tracer is about to be used.
4276 */
4277int tracing_update_buffers(void)
4278{
4279 int ret = 0;
4280
1027fcb2 4281 mutex_lock(&trace_types_lock);
1852fcce 4282 if (!ring_buffer_expanded)
2b6080f2 4283 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4284 RING_BUFFER_ALL_CPUS);
1027fcb2 4285 mutex_unlock(&trace_types_lock);
1852fcce
SR
4286
4287 return ret;
4288}
4289
577b785f
SR
4290struct trace_option_dentry;
4291
4292static struct trace_option_dentry *
2b6080f2 4293create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4294
6b450d25
SRRH
4295/*
4296 * Used to clear out the tracer before deletion of an instance.
4297 * Must have trace_types_lock held.
4298 */
4299static void tracing_set_nop(struct trace_array *tr)
4300{
4301 if (tr->current_trace == &nop_trace)
4302 return;
4303
50512ab5 4304 tr->current_trace->enabled--;
6b450d25
SRRH
4305
4306 if (tr->current_trace->reset)
4307 tr->current_trace->reset(tr);
4308
4309 tr->current_trace = &nop_trace;
4310}
4311
41d9c0be 4312static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4313{
09d23a1d
SRRH
4314 /* Only enable if the directory has been created already. */
4315 if (!tr->dir)
4316 return;
4317
4318 /* Currently, only the top instance has options */
4319 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4320 return;
4321
41d9c0be
SRRH
4322 /* Ignore if they were already created */
4323 if (t->topts)
4324 return;
4325
4326 t->topts = create_trace_option_files(tr, t);
09d23a1d
SRRH
4327}
4328
4329static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4330{
bc0c38d1 4331 struct tracer *t;
12883efb 4332#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4333 bool had_max_tr;
12883efb 4334#endif
d9e54076 4335 int ret = 0;
bc0c38d1 4336
1027fcb2
SR
4337 mutex_lock(&trace_types_lock);
4338
73c5162a 4339 if (!ring_buffer_expanded) {
2b6080f2 4340 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4341 RING_BUFFER_ALL_CPUS);
73c5162a 4342 if (ret < 0)
59f586db 4343 goto out;
73c5162a
SR
4344 ret = 0;
4345 }
4346
bc0c38d1
SR
4347 for (t = trace_types; t; t = t->next) {
4348 if (strcmp(t->name, buf) == 0)
4349 break;
4350 }
c2931e05
FW
4351 if (!t) {
4352 ret = -EINVAL;
4353 goto out;
4354 }
2b6080f2 4355 if (t == tr->current_trace)
bc0c38d1
SR
4356 goto out;
4357
607e2ea1
SRRH
4358 /* Some tracers are only allowed for the top level buffer */
4359 if (!trace_ok_for_array(t, tr)) {
4360 ret = -EINVAL;
4361 goto out;
4362 }
4363
cf6ab6d9
SRRH
4364 /* If trace pipe files are being read, we can't change the tracer */
4365 if (tr->current_trace->ref) {
4366 ret = -EBUSY;
4367 goto out;
4368 }
4369
9f029e83 4370 trace_branch_disable();
613f04a0 4371
50512ab5 4372 tr->current_trace->enabled--;
613f04a0 4373
2b6080f2
SR
4374 if (tr->current_trace->reset)
4375 tr->current_trace->reset(tr);
34600f0e 4376
12883efb 4377 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4378 tr->current_trace = &nop_trace;
34600f0e 4379
45ad21ca
SRRH
4380#ifdef CONFIG_TRACER_MAX_TRACE
4381 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4382
4383 if (had_max_tr && !t->use_max_tr) {
4384 /*
4385 * We need to make sure that the update_max_tr sees that
4386 * current_trace changed to nop_trace to keep it from
4387 * swapping the buffers after we resize it.
4388 * The update_max_tr is called from interrupts disabled
4389 * so a synchronized_sched() is sufficient.
4390 */
4391 synchronize_sched();
3209cff4 4392 free_snapshot(tr);
ef710e10 4393 }
12883efb 4394#endif
12883efb
SRRH
4395
4396#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4397 if (t->use_max_tr && !had_max_tr) {
3209cff4 4398 ret = alloc_snapshot(tr);
d60da506
HT
4399 if (ret < 0)
4400 goto out;
ef710e10 4401 }
12883efb 4402#endif
577b785f 4403
1c80025a 4404 if (t->init) {
b6f11df2 4405 ret = tracer_init(t, tr);
1c80025a
FW
4406 if (ret)
4407 goto out;
4408 }
bc0c38d1 4409
2b6080f2 4410 tr->current_trace = t;
50512ab5 4411 tr->current_trace->enabled++;
9f029e83 4412 trace_branch_enable(tr);
bc0c38d1
SR
4413 out:
4414 mutex_unlock(&trace_types_lock);
4415
d9e54076
PZ
4416 return ret;
4417}
4418
4419static ssize_t
4420tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4421 size_t cnt, loff_t *ppos)
4422{
607e2ea1 4423 struct trace_array *tr = filp->private_data;
ee6c2c1b 4424 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4425 int i;
4426 size_t ret;
e6e7a65a
FW
4427 int err;
4428
4429 ret = cnt;
d9e54076 4430
ee6c2c1b
LZ
4431 if (cnt > MAX_TRACER_SIZE)
4432 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4433
4434 if (copy_from_user(&buf, ubuf, cnt))
4435 return -EFAULT;
4436
4437 buf[cnt] = 0;
4438
4439 /* strip ending whitespace. */
4440 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4441 buf[i] = 0;
4442
607e2ea1 4443 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4444 if (err)
4445 return err;
d9e54076 4446
cf8517cf 4447 *ppos += ret;
bc0c38d1 4448
c2931e05 4449 return ret;
bc0c38d1
SR
4450}
4451
4452static ssize_t
6508fa76
SF
4453tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4454 size_t cnt, loff_t *ppos)
bc0c38d1 4455{
bc0c38d1
SR
4456 char buf[64];
4457 int r;
4458
cffae437 4459 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4460 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4461 if (r > sizeof(buf))
4462 r = sizeof(buf);
4bf39a94 4463 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4464}
4465
4466static ssize_t
6508fa76
SF
4467tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4468 size_t cnt, loff_t *ppos)
bc0c38d1 4469{
5e39841c 4470 unsigned long val;
c6caeeb1 4471 int ret;
bc0c38d1 4472
22fe9b54
PH
4473 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4474 if (ret)
c6caeeb1 4475 return ret;
bc0c38d1
SR
4476
4477 *ptr = val * 1000;
4478
4479 return cnt;
4480}
4481
6508fa76
SF
4482static ssize_t
4483tracing_thresh_read(struct file *filp, char __user *ubuf,
4484 size_t cnt, loff_t *ppos)
4485{
4486 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4487}
4488
4489static ssize_t
4490tracing_thresh_write(struct file *filp, const char __user *ubuf,
4491 size_t cnt, loff_t *ppos)
4492{
4493 struct trace_array *tr = filp->private_data;
4494 int ret;
4495
4496 mutex_lock(&trace_types_lock);
4497 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4498 if (ret < 0)
4499 goto out;
4500
4501 if (tr->current_trace->update_thresh) {
4502 ret = tr->current_trace->update_thresh(tr);
4503 if (ret < 0)
4504 goto out;
4505 }
4506
4507 ret = cnt;
4508out:
4509 mutex_unlock(&trace_types_lock);
4510
4511 return ret;
4512}
4513
4514static ssize_t
4515tracing_max_lat_read(struct file *filp, char __user *ubuf,
4516 size_t cnt, loff_t *ppos)
4517{
4518 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4519}
4520
4521static ssize_t
4522tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4523 size_t cnt, loff_t *ppos)
4524{
4525 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4526}
4527
b3806b43
SR
4528static int tracing_open_pipe(struct inode *inode, struct file *filp)
4529{
15544209 4530 struct trace_array *tr = inode->i_private;
b3806b43 4531 struct trace_iterator *iter;
b04cc6b1 4532 int ret = 0;
b3806b43
SR
4533
4534 if (tracing_disabled)
4535 return -ENODEV;
4536
7b85af63
SRRH
4537 if (trace_array_get(tr) < 0)
4538 return -ENODEV;
4539
b04cc6b1
FW
4540 mutex_lock(&trace_types_lock);
4541
b3806b43
SR
4542 /* create a buffer to store the information to pass to userspace */
4543 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4544 if (!iter) {
4545 ret = -ENOMEM;
f77d09a3 4546 __trace_array_put(tr);
b04cc6b1
FW
4547 goto out;
4548 }
b3806b43 4549
3a161d99 4550 trace_seq_init(&iter->seq);
d716ff71 4551 iter->trace = tr->current_trace;
d7350c3f 4552
4462344e 4553 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4554 ret = -ENOMEM;
d7350c3f 4555 goto fail;
4462344e
RR
4556 }
4557
a309720c 4558 /* trace pipe does not show start of buffer */
4462344e 4559 cpumask_setall(iter->started);
a309720c 4560
112f38a7
SR
4561 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4562 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4563
8be0709f 4564 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4565 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4566 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4567
15544209
ON
4568 iter->tr = tr;
4569 iter->trace_buffer = &tr->trace_buffer;
4570 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4571 mutex_init(&iter->mutex);
b3806b43
SR
4572 filp->private_data = iter;
4573
107bad8b
SR
4574 if (iter->trace->pipe_open)
4575 iter->trace->pipe_open(iter);
107bad8b 4576
b444786f 4577 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4578
4579 tr->current_trace->ref++;
b04cc6b1
FW
4580out:
4581 mutex_unlock(&trace_types_lock);
4582 return ret;
d7350c3f
FW
4583
4584fail:
4585 kfree(iter->trace);
4586 kfree(iter);
7b85af63 4587 __trace_array_put(tr);
d7350c3f
FW
4588 mutex_unlock(&trace_types_lock);
4589 return ret;
b3806b43
SR
4590}
4591
4592static int tracing_release_pipe(struct inode *inode, struct file *file)
4593{
4594 struct trace_iterator *iter = file->private_data;
15544209 4595 struct trace_array *tr = inode->i_private;
b3806b43 4596
b04cc6b1
FW
4597 mutex_lock(&trace_types_lock);
4598
cf6ab6d9
SRRH
4599 tr->current_trace->ref--;
4600
29bf4a5e 4601 if (iter->trace->pipe_close)
c521efd1
SR
4602 iter->trace->pipe_close(iter);
4603
b04cc6b1
FW
4604 mutex_unlock(&trace_types_lock);
4605
4462344e 4606 free_cpumask_var(iter->started);
d7350c3f 4607 mutex_destroy(&iter->mutex);
b3806b43 4608 kfree(iter);
b3806b43 4609
7b85af63
SRRH
4610 trace_array_put(tr);
4611
b3806b43
SR
4612 return 0;
4613}
4614
2a2cc8f7 4615static unsigned int
cc60cdc9 4616trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4617{
15693458
SRRH
4618 /* Iterators are static, they should be filled or empty */
4619 if (trace_buffer_iter(iter, iter->cpu_file))
4620 return POLLIN | POLLRDNORM;
2a2cc8f7 4621
15693458 4622 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4623 /*
4624 * Always select as readable when in blocking mode
4625 */
4626 return POLLIN | POLLRDNORM;
15693458 4627 else
12883efb 4628 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4629 filp, poll_table);
2a2cc8f7 4630}
2a2cc8f7 4631
cc60cdc9
SR
4632static unsigned int
4633tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4634{
4635 struct trace_iterator *iter = filp->private_data;
4636
4637 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4638}
4639
d716ff71 4640/* Must be called with iter->mutex held. */
ff98781b 4641static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4642{
4643 struct trace_iterator *iter = filp->private_data;
8b8b3683 4644 int ret;
b3806b43 4645
b3806b43 4646 while (trace_empty(iter)) {
2dc8f095 4647
107bad8b 4648 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4649 return -EAGAIN;
107bad8b 4650 }
2dc8f095 4651
b3806b43 4652 /*
250bfd3d 4653 * We block until we read something and tracing is disabled.
b3806b43
SR
4654 * We still block if tracing is disabled, but we have never
4655 * read anything. This allows a user to cat this file, and
4656 * then enable tracing. But after we have read something,
4657 * we give an EOF when tracing is again disabled.
4658 *
4659 * iter->pos will be 0 if we haven't read anything.
4660 */
10246fa3 4661 if (!tracing_is_on() && iter->pos)
b3806b43 4662 break;
f4874261
SRRH
4663
4664 mutex_unlock(&iter->mutex);
4665
e30f53aa 4666 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4667
4668 mutex_lock(&iter->mutex);
4669
8b8b3683
SRRH
4670 if (ret)
4671 return ret;
b3806b43
SR
4672 }
4673
ff98781b
EGM
4674 return 1;
4675}
4676
4677/*
4678 * Consumer reader.
4679 */
4680static ssize_t
4681tracing_read_pipe(struct file *filp, char __user *ubuf,
4682 size_t cnt, loff_t *ppos)
4683{
4684 struct trace_iterator *iter = filp->private_data;
4685 ssize_t sret;
4686
4687 /* return any leftover data */
4688 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4689 if (sret != -EBUSY)
4690 return sret;
4691
f9520750 4692 trace_seq_init(&iter->seq);
ff98781b 4693
d7350c3f
FW
4694 /*
4695 * Avoid more than one consumer on a single file descriptor
4696 * This is just a matter of traces coherency, the ring buffer itself
4697 * is protected.
4698 */
4699 mutex_lock(&iter->mutex);
ff98781b
EGM
4700 if (iter->trace->read) {
4701 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4702 if (sret)
4703 goto out;
4704 }
4705
4706waitagain:
4707 sret = tracing_wait_pipe(filp);
4708 if (sret <= 0)
4709 goto out;
4710
b3806b43 4711 /* stop when tracing is finished */
ff98781b
EGM
4712 if (trace_empty(iter)) {
4713 sret = 0;
107bad8b 4714 goto out;
ff98781b 4715 }
b3806b43
SR
4716
4717 if (cnt >= PAGE_SIZE)
4718 cnt = PAGE_SIZE - 1;
4719
53d0aa77 4720 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4721 memset(&iter->seq, 0,
4722 sizeof(struct trace_iterator) -
4723 offsetof(struct trace_iterator, seq));
ed5467da 4724 cpumask_clear(iter->started);
4823ed7e 4725 iter->pos = -1;
b3806b43 4726
4f535968 4727 trace_event_read_lock();
7e53bd42 4728 trace_access_lock(iter->cpu_file);
955b61e5 4729 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4730 enum print_line_t ret;
5ac48378 4731 int save_len = iter->seq.seq.len;
088b1e42 4732
f9896bf3 4733 ret = print_trace_line(iter);
2c4f035f 4734 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4735 /* don't print partial lines */
5ac48378 4736 iter->seq.seq.len = save_len;
b3806b43 4737 break;
088b1e42 4738 }
b91facc3
FW
4739 if (ret != TRACE_TYPE_NO_CONSUME)
4740 trace_consume(iter);
b3806b43 4741
5ac48378 4742 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4743 break;
ee5e51f5
JO
4744
4745 /*
4746 * Setting the full flag means we reached the trace_seq buffer
4747 * size and we should leave by partial output condition above.
4748 * One of the trace_seq_* functions is not used properly.
4749 */
4750 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4751 iter->ent->type);
b3806b43 4752 }
7e53bd42 4753 trace_access_unlock(iter->cpu_file);
4f535968 4754 trace_event_read_unlock();
b3806b43 4755
b3806b43 4756 /* Now copy what we have to the user */
6c6c2796 4757 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4758 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4759 trace_seq_init(&iter->seq);
9ff4b974
PP
4760
4761 /*
25985edc 4762 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4763 * entries, go back to wait for more entries.
4764 */
6c6c2796 4765 if (sret == -EBUSY)
9ff4b974 4766 goto waitagain;
b3806b43 4767
107bad8b 4768out:
d7350c3f 4769 mutex_unlock(&iter->mutex);
107bad8b 4770
6c6c2796 4771 return sret;
b3806b43
SR
4772}
4773
3c56819b
EGM
4774static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4775 unsigned int idx)
4776{
4777 __free_page(spd->pages[idx]);
4778}
4779
28dfef8f 4780static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4781 .can_merge = 0,
34cd4998 4782 .confirm = generic_pipe_buf_confirm,
92fdd98c 4783 .release = generic_pipe_buf_release,
34cd4998
SR
4784 .steal = generic_pipe_buf_steal,
4785 .get = generic_pipe_buf_get,
3c56819b
EGM
4786};
4787
34cd4998 4788static size_t
fa7c7f6e 4789tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4790{
4791 size_t count;
74f06bb7 4792 int save_len;
34cd4998
SR
4793 int ret;
4794
4795 /* Seq buffer is page-sized, exactly what we need. */
4796 for (;;) {
74f06bb7 4797 save_len = iter->seq.seq.len;
34cd4998 4798 ret = print_trace_line(iter);
74f06bb7
SRRH
4799
4800 if (trace_seq_has_overflowed(&iter->seq)) {
4801 iter->seq.seq.len = save_len;
34cd4998
SR
4802 break;
4803 }
74f06bb7
SRRH
4804
4805 /*
4806 * This should not be hit, because it should only
4807 * be set if the iter->seq overflowed. But check it
4808 * anyway to be safe.
4809 */
34cd4998 4810 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4811 iter->seq.seq.len = save_len;
4812 break;
4813 }
4814
5ac48378 4815 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4816 if (rem < count) {
4817 rem = 0;
4818 iter->seq.seq.len = save_len;
34cd4998
SR
4819 break;
4820 }
4821
74e7ff8c
LJ
4822 if (ret != TRACE_TYPE_NO_CONSUME)
4823 trace_consume(iter);
34cd4998 4824 rem -= count;
955b61e5 4825 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4826 rem = 0;
4827 iter->ent = NULL;
4828 break;
4829 }
4830 }
4831
4832 return rem;
4833}
4834
3c56819b
EGM
4835static ssize_t tracing_splice_read_pipe(struct file *filp,
4836 loff_t *ppos,
4837 struct pipe_inode_info *pipe,
4838 size_t len,
4839 unsigned int flags)
4840{
35f3d14d
JA
4841 struct page *pages_def[PIPE_DEF_BUFFERS];
4842 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4843 struct trace_iterator *iter = filp->private_data;
4844 struct splice_pipe_desc spd = {
35f3d14d
JA
4845 .pages = pages_def,
4846 .partial = partial_def,
34cd4998 4847 .nr_pages = 0, /* This gets updated below. */
047fe360 4848 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4849 .flags = flags,
4850 .ops = &tracing_pipe_buf_ops,
4851 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4852 };
4853 ssize_t ret;
34cd4998 4854 size_t rem;
3c56819b
EGM
4855 unsigned int i;
4856
35f3d14d
JA
4857 if (splice_grow_spd(pipe, &spd))
4858 return -ENOMEM;
4859
d7350c3f 4860 mutex_lock(&iter->mutex);
3c56819b
EGM
4861
4862 if (iter->trace->splice_read) {
4863 ret = iter->trace->splice_read(iter, filp,
4864 ppos, pipe, len, flags);
4865 if (ret)
34cd4998 4866 goto out_err;
3c56819b
EGM
4867 }
4868
4869 ret = tracing_wait_pipe(filp);
4870 if (ret <= 0)
34cd4998 4871 goto out_err;
3c56819b 4872
955b61e5 4873 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4874 ret = -EFAULT;
34cd4998 4875 goto out_err;
3c56819b
EGM
4876 }
4877
4f535968 4878 trace_event_read_lock();
7e53bd42 4879 trace_access_lock(iter->cpu_file);
4f535968 4880
3c56819b 4881 /* Fill as many pages as possible. */
a786c06d 4882 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4883 spd.pages[i] = alloc_page(GFP_KERNEL);
4884 if (!spd.pages[i])
34cd4998 4885 break;
3c56819b 4886
fa7c7f6e 4887 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4888
4889 /* Copy the data into the page, so we can start over. */
4890 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4891 page_address(spd.pages[i]),
5ac48378 4892 trace_seq_used(&iter->seq));
3c56819b 4893 if (ret < 0) {
35f3d14d 4894 __free_page(spd.pages[i]);
3c56819b
EGM
4895 break;
4896 }
35f3d14d 4897 spd.partial[i].offset = 0;
5ac48378 4898 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4899
f9520750 4900 trace_seq_init(&iter->seq);
3c56819b
EGM
4901 }
4902
7e53bd42 4903 trace_access_unlock(iter->cpu_file);
4f535968 4904 trace_event_read_unlock();
d7350c3f 4905 mutex_unlock(&iter->mutex);
3c56819b
EGM
4906
4907 spd.nr_pages = i;
4908
35f3d14d
JA
4909 ret = splice_to_pipe(pipe, &spd);
4910out:
047fe360 4911 splice_shrink_spd(&spd);
35f3d14d 4912 return ret;
3c56819b 4913
34cd4998 4914out_err:
d7350c3f 4915 mutex_unlock(&iter->mutex);
35f3d14d 4916 goto out;
3c56819b
EGM
4917}
4918
a98a3c3f
SR
4919static ssize_t
4920tracing_entries_read(struct file *filp, char __user *ubuf,
4921 size_t cnt, loff_t *ppos)
4922{
0bc392ee
ON
4923 struct inode *inode = file_inode(filp);
4924 struct trace_array *tr = inode->i_private;
4925 int cpu = tracing_get_cpu(inode);
438ced17
VN
4926 char buf[64];
4927 int r = 0;
4928 ssize_t ret;
a98a3c3f 4929
db526ca3 4930 mutex_lock(&trace_types_lock);
438ced17 4931
0bc392ee 4932 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4933 int cpu, buf_size_same;
4934 unsigned long size;
4935
4936 size = 0;
4937 buf_size_same = 1;
4938 /* check if all cpu sizes are same */
4939 for_each_tracing_cpu(cpu) {
4940 /* fill in the size from first enabled cpu */
4941 if (size == 0)
12883efb
SRRH
4942 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4943 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4944 buf_size_same = 0;
4945 break;
4946 }
4947 }
4948
4949 if (buf_size_same) {
4950 if (!ring_buffer_expanded)
4951 r = sprintf(buf, "%lu (expanded: %lu)\n",
4952 size >> 10,
4953 trace_buf_size >> 10);
4954 else
4955 r = sprintf(buf, "%lu\n", size >> 10);
4956 } else
4957 r = sprintf(buf, "X\n");
4958 } else
0bc392ee 4959 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4960
db526ca3
SR
4961 mutex_unlock(&trace_types_lock);
4962
438ced17
VN
4963 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4964 return ret;
a98a3c3f
SR
4965}
4966
4967static ssize_t
4968tracing_entries_write(struct file *filp, const char __user *ubuf,
4969 size_t cnt, loff_t *ppos)
4970{
0bc392ee
ON
4971 struct inode *inode = file_inode(filp);
4972 struct trace_array *tr = inode->i_private;
a98a3c3f 4973 unsigned long val;
4f271a2a 4974 int ret;
a98a3c3f 4975
22fe9b54
PH
4976 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4977 if (ret)
c6caeeb1 4978 return ret;
a98a3c3f
SR
4979
4980 /* must have at least 1 entry */
4981 if (!val)
4982 return -EINVAL;
4983
1696b2b0
SR
4984 /* value is in KB */
4985 val <<= 10;
0bc392ee 4986 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
4987 if (ret < 0)
4988 return ret;
a98a3c3f 4989
cf8517cf 4990 *ppos += cnt;
a98a3c3f 4991
4f271a2a
VN
4992 return cnt;
4993}
bf5e6519 4994
f81ab074
VN
4995static ssize_t
4996tracing_total_entries_read(struct file *filp, char __user *ubuf,
4997 size_t cnt, loff_t *ppos)
4998{
4999 struct trace_array *tr = filp->private_data;
5000 char buf[64];
5001 int r, cpu;
5002 unsigned long size = 0, expanded_size = 0;
5003
5004 mutex_lock(&trace_types_lock);
5005 for_each_tracing_cpu(cpu) {
12883efb 5006 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5007 if (!ring_buffer_expanded)
5008 expanded_size += trace_buf_size >> 10;
5009 }
5010 if (ring_buffer_expanded)
5011 r = sprintf(buf, "%lu\n", size);
5012 else
5013 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5014 mutex_unlock(&trace_types_lock);
5015
5016 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5017}
5018
4f271a2a
VN
5019static ssize_t
5020tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5021 size_t cnt, loff_t *ppos)
5022{
5023 /*
5024 * There is no need to read what the user has written, this function
5025 * is just to make sure that there is no error when "echo" is used
5026 */
5027
5028 *ppos += cnt;
a98a3c3f
SR
5029
5030 return cnt;
5031}
5032
4f271a2a
VN
5033static int
5034tracing_free_buffer_release(struct inode *inode, struct file *filp)
5035{
2b6080f2
SR
5036 struct trace_array *tr = inode->i_private;
5037
cf30cf67
SR
5038 /* disable tracing ? */
5039 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5040 tracer_tracing_off(tr);
4f271a2a 5041 /* resize the ring buffer to 0 */
2b6080f2 5042 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5043
7b85af63
SRRH
5044 trace_array_put(tr);
5045
4f271a2a
VN
5046 return 0;
5047}
5048
5bf9a1ee
PP
5049static ssize_t
5050tracing_mark_write(struct file *filp, const char __user *ubuf,
5051 size_t cnt, loff_t *fpos)
5052{
d696b58c 5053 unsigned long addr = (unsigned long)ubuf;
2d71619c 5054 struct trace_array *tr = filp->private_data;
d696b58c
SR
5055 struct ring_buffer_event *event;
5056 struct ring_buffer *buffer;
5057 struct print_entry *entry;
5058 unsigned long irq_flags;
5059 struct page *pages[2];
6edb2a8a 5060 void *map_page[2];
d696b58c
SR
5061 int nr_pages = 1;
5062 ssize_t written;
d696b58c
SR
5063 int offset;
5064 int size;
5065 int len;
5066 int ret;
6edb2a8a 5067 int i;
5bf9a1ee 5068
c76f0694 5069 if (tracing_disabled)
5bf9a1ee
PP
5070 return -EINVAL;
5071
5224c3a3
MSB
5072 if (!(trace_flags & TRACE_ITER_MARKERS))
5073 return -EINVAL;
5074
5bf9a1ee
PP
5075 if (cnt > TRACE_BUF_SIZE)
5076 cnt = TRACE_BUF_SIZE;
5077
d696b58c
SR
5078 /*
5079 * Userspace is injecting traces into the kernel trace buffer.
5080 * We want to be as non intrusive as possible.
5081 * To do so, we do not want to allocate any special buffers
5082 * or take any locks, but instead write the userspace data
5083 * straight into the ring buffer.
5084 *
5085 * First we need to pin the userspace buffer into memory,
5086 * which, most likely it is, because it just referenced it.
5087 * But there's no guarantee that it is. By using get_user_pages_fast()
5088 * and kmap_atomic/kunmap_atomic() we can get access to the
5089 * pages directly. We then write the data directly into the
5090 * ring buffer.
5091 */
5092 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5093
d696b58c
SR
5094 /* check if we cross pages */
5095 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5096 nr_pages = 2;
5097
5098 offset = addr & (PAGE_SIZE - 1);
5099 addr &= PAGE_MASK;
5100
5101 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5102 if (ret < nr_pages) {
5103 while (--ret >= 0)
5104 put_page(pages[ret]);
5105 written = -EFAULT;
5106 goto out;
5bf9a1ee 5107 }
d696b58c 5108
6edb2a8a
SR
5109 for (i = 0; i < nr_pages; i++)
5110 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5111
5112 local_save_flags(irq_flags);
5113 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5114 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5115 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5116 irq_flags, preempt_count());
5117 if (!event) {
5118 /* Ring buffer disabled, return as if not open for write */
5119 written = -EBADF;
5120 goto out_unlock;
5bf9a1ee 5121 }
d696b58c
SR
5122
5123 entry = ring_buffer_event_data(event);
5124 entry->ip = _THIS_IP_;
5125
5126 if (nr_pages == 2) {
5127 len = PAGE_SIZE - offset;
6edb2a8a
SR
5128 memcpy(&entry->buf, map_page[0] + offset, len);
5129 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5130 } else
6edb2a8a 5131 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5132
d696b58c
SR
5133 if (entry->buf[cnt - 1] != '\n') {
5134 entry->buf[cnt] = '\n';
5135 entry->buf[cnt + 1] = '\0';
5136 } else
5137 entry->buf[cnt] = '\0';
5138
7ffbd48d 5139 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5140
d696b58c 5141 written = cnt;
5bf9a1ee 5142
d696b58c 5143 *fpos += written;
1aa54bca 5144
d696b58c 5145 out_unlock:
7215853e 5146 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5147 kunmap_atomic(map_page[i]);
5148 put_page(pages[i]);
5149 }
d696b58c 5150 out:
1aa54bca 5151 return written;
5bf9a1ee
PP
5152}
5153
13f16d20 5154static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5155{
2b6080f2 5156 struct trace_array *tr = m->private;
5079f326
Z
5157 int i;
5158
5159 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5160 seq_printf(m,
5079f326 5161 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5162 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5163 i == tr->clock_id ? "]" : "");
13f16d20 5164 seq_putc(m, '\n');
5079f326 5165
13f16d20 5166 return 0;
5079f326
Z
5167}
5168
e1e232ca 5169static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5170{
5079f326
Z
5171 int i;
5172
5079f326
Z
5173 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5174 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5175 break;
5176 }
5177 if (i == ARRAY_SIZE(trace_clocks))
5178 return -EINVAL;
5179
5079f326
Z
5180 mutex_lock(&trace_types_lock);
5181
2b6080f2
SR
5182 tr->clock_id = i;
5183
12883efb 5184 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5185
60303ed3
DS
5186 /*
5187 * New clock may not be consistent with the previous clock.
5188 * Reset the buffer so that it doesn't have incomparable timestamps.
5189 */
9457158b 5190 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5191
5192#ifdef CONFIG_TRACER_MAX_TRACE
5193 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5194 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5195 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5196#endif
60303ed3 5197
5079f326
Z
5198 mutex_unlock(&trace_types_lock);
5199
e1e232ca
SR
5200 return 0;
5201}
5202
5203static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5204 size_t cnt, loff_t *fpos)
5205{
5206 struct seq_file *m = filp->private_data;
5207 struct trace_array *tr = m->private;
5208 char buf[64];
5209 const char *clockstr;
5210 int ret;
5211
5212 if (cnt >= sizeof(buf))
5213 return -EINVAL;
5214
5215 if (copy_from_user(&buf, ubuf, cnt))
5216 return -EFAULT;
5217
5218 buf[cnt] = 0;
5219
5220 clockstr = strstrip(buf);
5221
5222 ret = tracing_set_clock(tr, clockstr);
5223 if (ret)
5224 return ret;
5225
5079f326
Z
5226 *fpos += cnt;
5227
5228 return cnt;
5229}
5230
13f16d20
LZ
5231static int tracing_clock_open(struct inode *inode, struct file *file)
5232{
7b85af63
SRRH
5233 struct trace_array *tr = inode->i_private;
5234 int ret;
5235
13f16d20
LZ
5236 if (tracing_disabled)
5237 return -ENODEV;
2b6080f2 5238
7b85af63
SRRH
5239 if (trace_array_get(tr))
5240 return -ENODEV;
5241
5242 ret = single_open(file, tracing_clock_show, inode->i_private);
5243 if (ret < 0)
5244 trace_array_put(tr);
5245
5246 return ret;
13f16d20
LZ
5247}
5248
6de58e62
SRRH
5249struct ftrace_buffer_info {
5250 struct trace_iterator iter;
5251 void *spare;
5252 unsigned int read;
5253};
5254
debdd57f
HT
5255#ifdef CONFIG_TRACER_SNAPSHOT
5256static int tracing_snapshot_open(struct inode *inode, struct file *file)
5257{
6484c71c 5258 struct trace_array *tr = inode->i_private;
debdd57f 5259 struct trace_iterator *iter;
2b6080f2 5260 struct seq_file *m;
debdd57f
HT
5261 int ret = 0;
5262
ff451961
SRRH
5263 if (trace_array_get(tr) < 0)
5264 return -ENODEV;
5265
debdd57f 5266 if (file->f_mode & FMODE_READ) {
6484c71c 5267 iter = __tracing_open(inode, file, true);
debdd57f
HT
5268 if (IS_ERR(iter))
5269 ret = PTR_ERR(iter);
2b6080f2
SR
5270 } else {
5271 /* Writes still need the seq_file to hold the private data */
f77d09a3 5272 ret = -ENOMEM;
2b6080f2
SR
5273 m = kzalloc(sizeof(*m), GFP_KERNEL);
5274 if (!m)
f77d09a3 5275 goto out;
2b6080f2
SR
5276 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5277 if (!iter) {
5278 kfree(m);
f77d09a3 5279 goto out;
2b6080f2 5280 }
f77d09a3
AL
5281 ret = 0;
5282
ff451961 5283 iter->tr = tr;
6484c71c
ON
5284 iter->trace_buffer = &tr->max_buffer;
5285 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5286 m->private = iter;
5287 file->private_data = m;
debdd57f 5288 }
f77d09a3 5289out:
ff451961
SRRH
5290 if (ret < 0)
5291 trace_array_put(tr);
5292
debdd57f
HT
5293 return ret;
5294}
5295
5296static ssize_t
5297tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5298 loff_t *ppos)
5299{
2b6080f2
SR
5300 struct seq_file *m = filp->private_data;
5301 struct trace_iterator *iter = m->private;
5302 struct trace_array *tr = iter->tr;
debdd57f
HT
5303 unsigned long val;
5304 int ret;
5305
5306 ret = tracing_update_buffers();
5307 if (ret < 0)
5308 return ret;
5309
5310 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5311 if (ret)
5312 return ret;
5313
5314 mutex_lock(&trace_types_lock);
5315
2b6080f2 5316 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5317 ret = -EBUSY;
5318 goto out;
5319 }
5320
5321 switch (val) {
5322 case 0:
f1affcaa
SRRH
5323 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5324 ret = -EINVAL;
5325 break;
debdd57f 5326 }
3209cff4
SRRH
5327 if (tr->allocated_snapshot)
5328 free_snapshot(tr);
debdd57f
HT
5329 break;
5330 case 1:
f1affcaa
SRRH
5331/* Only allow per-cpu swap if the ring buffer supports it */
5332#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5333 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5334 ret = -EINVAL;
5335 break;
5336 }
5337#endif
45ad21ca 5338 if (!tr->allocated_snapshot) {
3209cff4 5339 ret = alloc_snapshot(tr);
debdd57f
HT
5340 if (ret < 0)
5341 break;
debdd57f 5342 }
debdd57f
HT
5343 local_irq_disable();
5344 /* Now, we're going to swap */
f1affcaa 5345 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5346 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5347 else
ce9bae55 5348 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5349 local_irq_enable();
5350 break;
5351 default:
45ad21ca 5352 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5353 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5354 tracing_reset_online_cpus(&tr->max_buffer);
5355 else
5356 tracing_reset(&tr->max_buffer, iter->cpu_file);
5357 }
debdd57f
HT
5358 break;
5359 }
5360
5361 if (ret >= 0) {
5362 *ppos += cnt;
5363 ret = cnt;
5364 }
5365out:
5366 mutex_unlock(&trace_types_lock);
5367 return ret;
5368}
2b6080f2
SR
5369
5370static int tracing_snapshot_release(struct inode *inode, struct file *file)
5371{
5372 struct seq_file *m = file->private_data;
ff451961
SRRH
5373 int ret;
5374
5375 ret = tracing_release(inode, file);
2b6080f2
SR
5376
5377 if (file->f_mode & FMODE_READ)
ff451961 5378 return ret;
2b6080f2
SR
5379
5380 /* If write only, the seq_file is just a stub */
5381 if (m)
5382 kfree(m->private);
5383 kfree(m);
5384
5385 return 0;
5386}
5387
6de58e62
SRRH
5388static int tracing_buffers_open(struct inode *inode, struct file *filp);
5389static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5390 size_t count, loff_t *ppos);
5391static int tracing_buffers_release(struct inode *inode, struct file *file);
5392static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5393 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5394
5395static int snapshot_raw_open(struct inode *inode, struct file *filp)
5396{
5397 struct ftrace_buffer_info *info;
5398 int ret;
5399
5400 ret = tracing_buffers_open(inode, filp);
5401 if (ret < 0)
5402 return ret;
5403
5404 info = filp->private_data;
5405
5406 if (info->iter.trace->use_max_tr) {
5407 tracing_buffers_release(inode, filp);
5408 return -EBUSY;
5409 }
5410
5411 info->iter.snapshot = true;
5412 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5413
5414 return ret;
5415}
5416
debdd57f
HT
5417#endif /* CONFIG_TRACER_SNAPSHOT */
5418
5419
6508fa76
SF
5420static const struct file_operations tracing_thresh_fops = {
5421 .open = tracing_open_generic,
5422 .read = tracing_thresh_read,
5423 .write = tracing_thresh_write,
5424 .llseek = generic_file_llseek,
5425};
5426
5e2336a0 5427static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5428 .open = tracing_open_generic,
5429 .read = tracing_max_lat_read,
5430 .write = tracing_max_lat_write,
b444786f 5431 .llseek = generic_file_llseek,
bc0c38d1
SR
5432};
5433
5e2336a0 5434static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5435 .open = tracing_open_generic,
5436 .read = tracing_set_trace_read,
5437 .write = tracing_set_trace_write,
b444786f 5438 .llseek = generic_file_llseek,
bc0c38d1
SR
5439};
5440
5e2336a0 5441static const struct file_operations tracing_pipe_fops = {
4bf39a94 5442 .open = tracing_open_pipe,
2a2cc8f7 5443 .poll = tracing_poll_pipe,
4bf39a94 5444 .read = tracing_read_pipe,
3c56819b 5445 .splice_read = tracing_splice_read_pipe,
4bf39a94 5446 .release = tracing_release_pipe,
b444786f 5447 .llseek = no_llseek,
b3806b43
SR
5448};
5449
5e2336a0 5450static const struct file_operations tracing_entries_fops = {
0bc392ee 5451 .open = tracing_open_generic_tr,
a98a3c3f
SR
5452 .read = tracing_entries_read,
5453 .write = tracing_entries_write,
b444786f 5454 .llseek = generic_file_llseek,
0bc392ee 5455 .release = tracing_release_generic_tr,
a98a3c3f
SR
5456};
5457
f81ab074 5458static const struct file_operations tracing_total_entries_fops = {
7b85af63 5459 .open = tracing_open_generic_tr,
f81ab074
VN
5460 .read = tracing_total_entries_read,
5461 .llseek = generic_file_llseek,
7b85af63 5462 .release = tracing_release_generic_tr,
f81ab074
VN
5463};
5464
4f271a2a 5465static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5466 .open = tracing_open_generic_tr,
4f271a2a
VN
5467 .write = tracing_free_buffer_write,
5468 .release = tracing_free_buffer_release,
5469};
5470
5e2336a0 5471static const struct file_operations tracing_mark_fops = {
7b85af63 5472 .open = tracing_open_generic_tr,
5bf9a1ee 5473 .write = tracing_mark_write,
b444786f 5474 .llseek = generic_file_llseek,
7b85af63 5475 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5476};
5477
5079f326 5478static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5479 .open = tracing_clock_open,
5480 .read = seq_read,
5481 .llseek = seq_lseek,
7b85af63 5482 .release = tracing_single_release_tr,
5079f326
Z
5483 .write = tracing_clock_write,
5484};
5485
debdd57f
HT
5486#ifdef CONFIG_TRACER_SNAPSHOT
5487static const struct file_operations snapshot_fops = {
5488 .open = tracing_snapshot_open,
5489 .read = seq_read,
5490 .write = tracing_snapshot_write,
098c879e 5491 .llseek = tracing_lseek,
2b6080f2 5492 .release = tracing_snapshot_release,
debdd57f 5493};
debdd57f 5494
6de58e62
SRRH
5495static const struct file_operations snapshot_raw_fops = {
5496 .open = snapshot_raw_open,
5497 .read = tracing_buffers_read,
5498 .release = tracing_buffers_release,
5499 .splice_read = tracing_buffers_splice_read,
5500 .llseek = no_llseek,
2cadf913
SR
5501};
5502
6de58e62
SRRH
5503#endif /* CONFIG_TRACER_SNAPSHOT */
5504
2cadf913
SR
5505static int tracing_buffers_open(struct inode *inode, struct file *filp)
5506{
46ef2be0 5507 struct trace_array *tr = inode->i_private;
2cadf913 5508 struct ftrace_buffer_info *info;
7b85af63 5509 int ret;
2cadf913
SR
5510
5511 if (tracing_disabled)
5512 return -ENODEV;
5513
7b85af63
SRRH
5514 if (trace_array_get(tr) < 0)
5515 return -ENODEV;
5516
2cadf913 5517 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5518 if (!info) {
5519 trace_array_put(tr);
2cadf913 5520 return -ENOMEM;
7b85af63 5521 }
2cadf913 5522
a695cb58
SRRH
5523 mutex_lock(&trace_types_lock);
5524
cc60cdc9 5525 info->iter.tr = tr;
46ef2be0 5526 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5527 info->iter.trace = tr->current_trace;
12883efb 5528 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5529 info->spare = NULL;
2cadf913 5530 /* Force reading ring buffer for first read */
cc60cdc9 5531 info->read = (unsigned int)-1;
2cadf913
SR
5532
5533 filp->private_data = info;
5534
cf6ab6d9
SRRH
5535 tr->current_trace->ref++;
5536
a695cb58
SRRH
5537 mutex_unlock(&trace_types_lock);
5538
7b85af63
SRRH
5539 ret = nonseekable_open(inode, filp);
5540 if (ret < 0)
5541 trace_array_put(tr);
5542
5543 return ret;
2cadf913
SR
5544}
5545
cc60cdc9
SR
5546static unsigned int
5547tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5548{
5549 struct ftrace_buffer_info *info = filp->private_data;
5550 struct trace_iterator *iter = &info->iter;
5551
5552 return trace_poll(iter, filp, poll_table);
5553}
5554
2cadf913
SR
5555static ssize_t
5556tracing_buffers_read(struct file *filp, char __user *ubuf,
5557 size_t count, loff_t *ppos)
5558{
5559 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5560 struct trace_iterator *iter = &info->iter;
2cadf913 5561 ssize_t ret;
6de58e62 5562 ssize_t size;
2cadf913 5563
2dc5d12b
SR
5564 if (!count)
5565 return 0;
5566
6de58e62 5567#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5568 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5569 return -EBUSY;
6de58e62
SRRH
5570#endif
5571
ddd538f3 5572 if (!info->spare)
12883efb
SRRH
5573 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5574 iter->cpu_file);
ddd538f3 5575 if (!info->spare)
d716ff71 5576 return -ENOMEM;
ddd538f3 5577
2cadf913
SR
5578 /* Do we have previous read data to read? */
5579 if (info->read < PAGE_SIZE)
5580 goto read;
5581
b627344f 5582 again:
cc60cdc9 5583 trace_access_lock(iter->cpu_file);
12883efb 5584 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5585 &info->spare,
5586 count,
cc60cdc9
SR
5587 iter->cpu_file, 0);
5588 trace_access_unlock(iter->cpu_file);
2cadf913 5589
b627344f
SR
5590 if (ret < 0) {
5591 if (trace_empty(iter)) {
d716ff71
SRRH
5592 if ((filp->f_flags & O_NONBLOCK))
5593 return -EAGAIN;
5594
e30f53aa 5595 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5596 if (ret)
5597 return ret;
5598
b627344f
SR
5599 goto again;
5600 }
d716ff71 5601 return 0;
b627344f 5602 }
436fc280 5603
436fc280 5604 info->read = 0;
b627344f 5605 read:
2cadf913
SR
5606 size = PAGE_SIZE - info->read;
5607 if (size > count)
5608 size = count;
5609
5610 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5611 if (ret == size)
5612 return -EFAULT;
5613
2dc5d12b
SR
5614 size -= ret;
5615
2cadf913
SR
5616 *ppos += size;
5617 info->read += size;
5618
5619 return size;
5620}
5621
5622static int tracing_buffers_release(struct inode *inode, struct file *file)
5623{
5624 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5625 struct trace_iterator *iter = &info->iter;
2cadf913 5626
a695cb58
SRRH
5627 mutex_lock(&trace_types_lock);
5628
cf6ab6d9
SRRH
5629 iter->tr->current_trace->ref--;
5630
ff451961 5631 __trace_array_put(iter->tr);
2cadf913 5632
ddd538f3 5633 if (info->spare)
12883efb 5634 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5635 kfree(info);
5636
a695cb58
SRRH
5637 mutex_unlock(&trace_types_lock);
5638
2cadf913
SR
5639 return 0;
5640}
5641
5642struct buffer_ref {
5643 struct ring_buffer *buffer;
5644 void *page;
5645 int ref;
5646};
5647
5648static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5649 struct pipe_buffer *buf)
5650{
5651 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5652
5653 if (--ref->ref)
5654 return;
5655
5656 ring_buffer_free_read_page(ref->buffer, ref->page);
5657 kfree(ref);
5658 buf->private = 0;
5659}
5660
2cadf913
SR
5661static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5662 struct pipe_buffer *buf)
5663{
5664 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5665
5666 ref->ref++;
5667}
5668
5669/* Pipe buffer operations for a buffer. */
28dfef8f 5670static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5671 .can_merge = 0,
2cadf913
SR
5672 .confirm = generic_pipe_buf_confirm,
5673 .release = buffer_pipe_buf_release,
d55cb6cf 5674 .steal = generic_pipe_buf_steal,
2cadf913
SR
5675 .get = buffer_pipe_buf_get,
5676};
5677
5678/*
5679 * Callback from splice_to_pipe(), if we need to release some pages
5680 * at the end of the spd in case we error'ed out in filling the pipe.
5681 */
5682static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5683{
5684 struct buffer_ref *ref =
5685 (struct buffer_ref *)spd->partial[i].private;
5686
5687 if (--ref->ref)
5688 return;
5689
5690 ring_buffer_free_read_page(ref->buffer, ref->page);
5691 kfree(ref);
5692 spd->partial[i].private = 0;
5693}
5694
5695static ssize_t
5696tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5697 struct pipe_inode_info *pipe, size_t len,
5698 unsigned int flags)
5699{
5700 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5701 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5702 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5703 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5704 struct splice_pipe_desc spd = {
35f3d14d
JA
5705 .pages = pages_def,
5706 .partial = partial_def,
047fe360 5707 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5708 .flags = flags,
5709 .ops = &buffer_pipe_buf_ops,
5710 .spd_release = buffer_spd_release,
5711 };
5712 struct buffer_ref *ref;
93459c6c 5713 int entries, size, i;
07906da7 5714 ssize_t ret = 0;
2cadf913 5715
6de58e62 5716#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5717 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5718 return -EBUSY;
6de58e62
SRRH
5719#endif
5720
d716ff71
SRRH
5721 if (splice_grow_spd(pipe, &spd))
5722 return -ENOMEM;
35f3d14d 5723
d716ff71
SRRH
5724 if (*ppos & (PAGE_SIZE - 1))
5725 return -EINVAL;
93cfb3c9
LJ
5726
5727 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5728 if (len < PAGE_SIZE)
5729 return -EINVAL;
93cfb3c9
LJ
5730 len &= PAGE_MASK;
5731 }
5732
cc60cdc9
SR
5733 again:
5734 trace_access_lock(iter->cpu_file);
12883efb 5735 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5736
a786c06d 5737 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5738 struct page *page;
5739 int r;
5740
5741 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5742 if (!ref) {
5743 ret = -ENOMEM;
2cadf913 5744 break;
07906da7 5745 }
2cadf913 5746
7267fa68 5747 ref->ref = 1;
12883efb 5748 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5749 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5750 if (!ref->page) {
07906da7 5751 ret = -ENOMEM;
2cadf913
SR
5752 kfree(ref);
5753 break;
5754 }
5755
5756 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5757 len, iter->cpu_file, 1);
2cadf913 5758 if (r < 0) {
7ea59064 5759 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5760 kfree(ref);
5761 break;
5762 }
5763
5764 /*
5765 * zero out any left over data, this is going to
5766 * user land.
5767 */
5768 size = ring_buffer_page_len(ref->page);
5769 if (size < PAGE_SIZE)
5770 memset(ref->page + size, 0, PAGE_SIZE - size);
5771
5772 page = virt_to_page(ref->page);
5773
5774 spd.pages[i] = page;
5775 spd.partial[i].len = PAGE_SIZE;
5776 spd.partial[i].offset = 0;
5777 spd.partial[i].private = (unsigned long)ref;
5778 spd.nr_pages++;
93cfb3c9 5779 *ppos += PAGE_SIZE;
93459c6c 5780
12883efb 5781 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5782 }
5783
cc60cdc9 5784 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5785 spd.nr_pages = i;
5786
5787 /* did we read anything? */
5788 if (!spd.nr_pages) {
07906da7 5789 if (ret)
d716ff71
SRRH
5790 return ret;
5791
5792 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5793 return -EAGAIN;
07906da7 5794
e30f53aa 5795 ret = wait_on_pipe(iter, true);
8b8b3683 5796 if (ret)
d716ff71 5797 return ret;
e30f53aa 5798
cc60cdc9 5799 goto again;
2cadf913
SR
5800 }
5801
5802 ret = splice_to_pipe(pipe, &spd);
047fe360 5803 splice_shrink_spd(&spd);
6de58e62 5804
2cadf913
SR
5805 return ret;
5806}
5807
5808static const struct file_operations tracing_buffers_fops = {
5809 .open = tracing_buffers_open,
5810 .read = tracing_buffers_read,
cc60cdc9 5811 .poll = tracing_buffers_poll,
2cadf913
SR
5812 .release = tracing_buffers_release,
5813 .splice_read = tracing_buffers_splice_read,
5814 .llseek = no_llseek,
5815};
5816
c8d77183
SR
5817static ssize_t
5818tracing_stats_read(struct file *filp, char __user *ubuf,
5819 size_t count, loff_t *ppos)
5820{
4d3435b8
ON
5821 struct inode *inode = file_inode(filp);
5822 struct trace_array *tr = inode->i_private;
12883efb 5823 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5824 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5825 struct trace_seq *s;
5826 unsigned long cnt;
c64e148a
VN
5827 unsigned long long t;
5828 unsigned long usec_rem;
c8d77183 5829
e4f2d10f 5830 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5831 if (!s)
a646365c 5832 return -ENOMEM;
c8d77183
SR
5833
5834 trace_seq_init(s);
5835
12883efb 5836 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5837 trace_seq_printf(s, "entries: %ld\n", cnt);
5838
12883efb 5839 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5840 trace_seq_printf(s, "overrun: %ld\n", cnt);
5841
12883efb 5842 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5843 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5844
12883efb 5845 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5846 trace_seq_printf(s, "bytes: %ld\n", cnt);
5847
58e8eedf 5848 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5849 /* local or global for trace_clock */
12883efb 5850 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5851 usec_rem = do_div(t, USEC_PER_SEC);
5852 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5853 t, usec_rem);
5854
12883efb 5855 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5856 usec_rem = do_div(t, USEC_PER_SEC);
5857 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5858 } else {
5859 /* counter or tsc mode for trace_clock */
5860 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5861 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5862
11043d8b 5863 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5864 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5865 }
c64e148a 5866
12883efb 5867 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5868 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5869
12883efb 5870 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5871 trace_seq_printf(s, "read events: %ld\n", cnt);
5872
5ac48378
SRRH
5873 count = simple_read_from_buffer(ubuf, count, ppos,
5874 s->buffer, trace_seq_used(s));
c8d77183
SR
5875
5876 kfree(s);
5877
5878 return count;
5879}
5880
5881static const struct file_operations tracing_stats_fops = {
4d3435b8 5882 .open = tracing_open_generic_tr,
c8d77183 5883 .read = tracing_stats_read,
b444786f 5884 .llseek = generic_file_llseek,
4d3435b8 5885 .release = tracing_release_generic_tr,
c8d77183
SR
5886};
5887
bc0c38d1
SR
5888#ifdef CONFIG_DYNAMIC_FTRACE
5889
b807c3d0
SR
5890int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5891{
5892 return 0;
5893}
5894
bc0c38d1 5895static ssize_t
b807c3d0 5896tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5897 size_t cnt, loff_t *ppos)
5898{
a26a2a27
SR
5899 static char ftrace_dyn_info_buffer[1024];
5900 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5901 unsigned long *p = filp->private_data;
b807c3d0 5902 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5903 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5904 int r;
5905
b807c3d0
SR
5906 mutex_lock(&dyn_info_mutex);
5907 r = sprintf(buf, "%ld ", *p);
4bf39a94 5908
a26a2a27 5909 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5910 buf[r++] = '\n';
5911
5912 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5913
5914 mutex_unlock(&dyn_info_mutex);
5915
5916 return r;
bc0c38d1
SR
5917}
5918
5e2336a0 5919static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5920 .open = tracing_open_generic,
b807c3d0 5921 .read = tracing_read_dyn_info,
b444786f 5922 .llseek = generic_file_llseek,
bc0c38d1 5923};
77fd5c15 5924#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5925
77fd5c15
SRRH
5926#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5927static void
5928ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5929{
5930 tracing_snapshot();
5931}
bc0c38d1 5932
77fd5c15
SRRH
5933static void
5934ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5935{
77fd5c15
SRRH
5936 unsigned long *count = (long *)data;
5937
5938 if (!*count)
5939 return;
bc0c38d1 5940
77fd5c15
SRRH
5941 if (*count != -1)
5942 (*count)--;
5943
5944 tracing_snapshot();
5945}
5946
5947static int
5948ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5949 struct ftrace_probe_ops *ops, void *data)
5950{
5951 long count = (long)data;
5952
5953 seq_printf(m, "%ps:", (void *)ip);
5954
fa6f0cc7 5955 seq_puts(m, "snapshot");
77fd5c15
SRRH
5956
5957 if (count == -1)
fa6f0cc7 5958 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
5959 else
5960 seq_printf(m, ":count=%ld\n", count);
5961
5962 return 0;
5963}
5964
5965static struct ftrace_probe_ops snapshot_probe_ops = {
5966 .func = ftrace_snapshot,
5967 .print = ftrace_snapshot_print,
5968};
5969
5970static struct ftrace_probe_ops snapshot_count_probe_ops = {
5971 .func = ftrace_count_snapshot,
5972 .print = ftrace_snapshot_print,
5973};
5974
5975static int
5976ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5977 char *glob, char *cmd, char *param, int enable)
5978{
5979 struct ftrace_probe_ops *ops;
5980 void *count = (void *)-1;
5981 char *number;
5982 int ret;
5983
5984 /* hash funcs only work with set_ftrace_filter */
5985 if (!enable)
5986 return -EINVAL;
5987
5988 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5989
5990 if (glob[0] == '!') {
5991 unregister_ftrace_function_probe_func(glob+1, ops);
5992 return 0;
5993 }
5994
5995 if (!param)
5996 goto out_reg;
5997
5998 number = strsep(&param, ":");
5999
6000 if (!strlen(number))
6001 goto out_reg;
6002
6003 /*
6004 * We use the callback data field (which is a pointer)
6005 * as our counter.
6006 */
6007 ret = kstrtoul(number, 0, (unsigned long *)&count);
6008 if (ret)
6009 return ret;
6010
6011 out_reg:
6012 ret = register_ftrace_function_probe(glob, ops, count);
6013
6014 if (ret >= 0)
6015 alloc_snapshot(&global_trace);
6016
6017 return ret < 0 ? ret : 0;
6018}
6019
6020static struct ftrace_func_command ftrace_snapshot_cmd = {
6021 .name = "snapshot",
6022 .func = ftrace_trace_snapshot_callback,
6023};
6024
38de93ab 6025static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6026{
6027 return register_ftrace_command(&ftrace_snapshot_cmd);
6028}
6029#else
38de93ab 6030static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6031#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6032
7eeafbca 6033static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6034{
8434dc93
SRRH
6035 if (WARN_ON(!tr->dir))
6036 return ERR_PTR(-ENODEV);
6037
6038 /* Top directory uses NULL as the parent */
6039 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6040 return NULL;
6041
6042 /* All sub buffers have a descriptor */
2b6080f2 6043 return tr->dir;
bc0c38d1
SR
6044}
6045
2b6080f2 6046static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6047{
b04cc6b1
FW
6048 struct dentry *d_tracer;
6049
2b6080f2
SR
6050 if (tr->percpu_dir)
6051 return tr->percpu_dir;
b04cc6b1 6052
7eeafbca 6053 d_tracer = tracing_get_dentry(tr);
14a5ae40 6054 if (IS_ERR(d_tracer))
b04cc6b1
FW
6055 return NULL;
6056
8434dc93 6057 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6058
2b6080f2 6059 WARN_ONCE(!tr->percpu_dir,
8434dc93 6060 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6061
2b6080f2 6062 return tr->percpu_dir;
b04cc6b1
FW
6063}
6064
649e9c70
ON
6065static struct dentry *
6066trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6067 void *data, long cpu, const struct file_operations *fops)
6068{
6069 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6070
6071 if (ret) /* See tracing_get_cpu() */
7682c918 6072 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6073 return ret;
6074}
6075
2b6080f2 6076static void
8434dc93 6077tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6078{
2b6080f2 6079 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6080 struct dentry *d_cpu;
dd49a38c 6081 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6082
0a3d7ce7
NK
6083 if (!d_percpu)
6084 return;
6085
dd49a38c 6086 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6087 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6088 if (!d_cpu) {
8434dc93 6089 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6090 return;
6091 }
b04cc6b1 6092
8656e7a2 6093 /* per cpu trace_pipe */
649e9c70 6094 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6095 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6096
6097 /* per cpu trace */
649e9c70 6098 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6099 tr, cpu, &tracing_fops);
7f96f93f 6100
649e9c70 6101 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6102 tr, cpu, &tracing_buffers_fops);
7f96f93f 6103
649e9c70 6104 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6105 tr, cpu, &tracing_stats_fops);
438ced17 6106
649e9c70 6107 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6108 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6109
6110#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6111 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6112 tr, cpu, &snapshot_fops);
6de58e62 6113
649e9c70 6114 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6115 tr, cpu, &snapshot_raw_fops);
f1affcaa 6116#endif
b04cc6b1
FW
6117}
6118
60a11774
SR
6119#ifdef CONFIG_FTRACE_SELFTEST
6120/* Let selftest have access to static functions in this file */
6121#include "trace_selftest.c"
6122#endif
6123
577b785f
SR
6124static ssize_t
6125trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6126 loff_t *ppos)
6127{
6128 struct trace_option_dentry *topt = filp->private_data;
6129 char *buf;
6130
6131 if (topt->flags->val & topt->opt->bit)
6132 buf = "1\n";
6133 else
6134 buf = "0\n";
6135
6136 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6137}
6138
6139static ssize_t
6140trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6141 loff_t *ppos)
6142{
6143 struct trace_option_dentry *topt = filp->private_data;
6144 unsigned long val;
577b785f
SR
6145 int ret;
6146
22fe9b54
PH
6147 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6148 if (ret)
577b785f
SR
6149 return ret;
6150
8d18eaaf
LZ
6151 if (val != 0 && val != 1)
6152 return -EINVAL;
577b785f 6153
8d18eaaf 6154 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6155 mutex_lock(&trace_types_lock);
8c1a49ae 6156 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6157 topt->opt, !val);
577b785f
SR
6158 mutex_unlock(&trace_types_lock);
6159 if (ret)
6160 return ret;
577b785f
SR
6161 }
6162
6163 *ppos += cnt;
6164
6165 return cnt;
6166}
6167
6168
6169static const struct file_operations trace_options_fops = {
6170 .open = tracing_open_generic,
6171 .read = trace_options_read,
6172 .write = trace_options_write,
b444786f 6173 .llseek = generic_file_llseek,
577b785f
SR
6174};
6175
a8259075
SR
6176static ssize_t
6177trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6178 loff_t *ppos)
6179{
6180 long index = (long)filp->private_data;
6181 char *buf;
6182
6183 if (trace_flags & (1 << index))
6184 buf = "1\n";
6185 else
6186 buf = "0\n";
6187
6188 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6189}
6190
6191static ssize_t
6192trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6193 loff_t *ppos)
6194{
2b6080f2 6195 struct trace_array *tr = &global_trace;
a8259075 6196 long index = (long)filp->private_data;
a8259075
SR
6197 unsigned long val;
6198 int ret;
6199
22fe9b54
PH
6200 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6201 if (ret)
a8259075
SR
6202 return ret;
6203
f2d84b65 6204 if (val != 0 && val != 1)
a8259075 6205 return -EINVAL;
69d34da2
SRRH
6206
6207 mutex_lock(&trace_types_lock);
2b6080f2 6208 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6209 mutex_unlock(&trace_types_lock);
a8259075 6210
613f04a0
SRRH
6211 if (ret < 0)
6212 return ret;
6213
a8259075
SR
6214 *ppos += cnt;
6215
6216 return cnt;
6217}
6218
a8259075
SR
6219static const struct file_operations trace_options_core_fops = {
6220 .open = tracing_open_generic,
6221 .read = trace_options_core_read,
6222 .write = trace_options_core_write,
b444786f 6223 .llseek = generic_file_llseek,
a8259075
SR
6224};
6225
5452af66 6226struct dentry *trace_create_file(const char *name,
f4ae40a6 6227 umode_t mode,
5452af66
FW
6228 struct dentry *parent,
6229 void *data,
6230 const struct file_operations *fops)
6231{
6232 struct dentry *ret;
6233
8434dc93 6234 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6235 if (!ret)
8434dc93 6236 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6237
6238 return ret;
6239}
6240
6241
2b6080f2 6242static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6243{
6244 struct dentry *d_tracer;
a8259075 6245
2b6080f2
SR
6246 if (tr->options)
6247 return tr->options;
a8259075 6248
7eeafbca 6249 d_tracer = tracing_get_dentry(tr);
14a5ae40 6250 if (IS_ERR(d_tracer))
a8259075
SR
6251 return NULL;
6252
8434dc93 6253 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6254 if (!tr->options) {
8434dc93 6255 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6256 return NULL;
6257 }
6258
2b6080f2 6259 return tr->options;
a8259075
SR
6260}
6261
577b785f 6262static void
2b6080f2
SR
6263create_trace_option_file(struct trace_array *tr,
6264 struct trace_option_dentry *topt,
577b785f
SR
6265 struct tracer_flags *flags,
6266 struct tracer_opt *opt)
6267{
6268 struct dentry *t_options;
577b785f 6269
2b6080f2 6270 t_options = trace_options_init_dentry(tr);
577b785f
SR
6271 if (!t_options)
6272 return;
6273
6274 topt->flags = flags;
6275 topt->opt = opt;
2b6080f2 6276 topt->tr = tr;
577b785f 6277
5452af66 6278 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6279 &trace_options_fops);
6280
577b785f
SR
6281}
6282
6283static struct trace_option_dentry *
2b6080f2 6284create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6285{
6286 struct trace_option_dentry *topts;
6287 struct tracer_flags *flags;
6288 struct tracer_opt *opts;
6289 int cnt;
6290
6291 if (!tracer)
6292 return NULL;
6293
6294 flags = tracer->flags;
6295
6296 if (!flags || !flags->opts)
6297 return NULL;
6298
6299 opts = flags->opts;
6300
6301 for (cnt = 0; opts[cnt].name; cnt++)
6302 ;
6303
0cfe8245 6304 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
6305 if (!topts)
6306 return NULL;
6307
41d9c0be 6308 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6309 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6310 &opts[cnt]);
41d9c0be
SRRH
6311 WARN_ONCE(topts[cnt].entry == NULL,
6312 "Failed to create trace option: %s",
6313 opts[cnt].name);
6314 }
577b785f
SR
6315
6316 return topts;
6317}
6318
a8259075 6319static struct dentry *
2b6080f2
SR
6320create_trace_option_core_file(struct trace_array *tr,
6321 const char *option, long index)
a8259075
SR
6322{
6323 struct dentry *t_options;
a8259075 6324
2b6080f2 6325 t_options = trace_options_init_dentry(tr);
a8259075
SR
6326 if (!t_options)
6327 return NULL;
6328
5452af66 6329 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 6330 &trace_options_core_fops);
a8259075
SR
6331}
6332
2b6080f2 6333static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6334{
6335 struct dentry *t_options;
a8259075
SR
6336 int i;
6337
2b6080f2 6338 t_options = trace_options_init_dentry(tr);
a8259075
SR
6339 if (!t_options)
6340 return;
6341
5452af66 6342 for (i = 0; trace_options[i]; i++)
2b6080f2 6343 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
6344}
6345
499e5470
SR
6346static ssize_t
6347rb_simple_read(struct file *filp, char __user *ubuf,
6348 size_t cnt, loff_t *ppos)
6349{
348f0fc2 6350 struct trace_array *tr = filp->private_data;
499e5470
SR
6351 char buf[64];
6352 int r;
6353
10246fa3 6354 r = tracer_tracing_is_on(tr);
499e5470
SR
6355 r = sprintf(buf, "%d\n", r);
6356
6357 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6358}
6359
6360static ssize_t
6361rb_simple_write(struct file *filp, const char __user *ubuf,
6362 size_t cnt, loff_t *ppos)
6363{
348f0fc2 6364 struct trace_array *tr = filp->private_data;
12883efb 6365 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6366 unsigned long val;
6367 int ret;
6368
6369 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6370 if (ret)
6371 return ret;
6372
6373 if (buffer) {
2df8f8a6
SR
6374 mutex_lock(&trace_types_lock);
6375 if (val) {
10246fa3 6376 tracer_tracing_on(tr);
2b6080f2
SR
6377 if (tr->current_trace->start)
6378 tr->current_trace->start(tr);
2df8f8a6 6379 } else {
10246fa3 6380 tracer_tracing_off(tr);
2b6080f2
SR
6381 if (tr->current_trace->stop)
6382 tr->current_trace->stop(tr);
2df8f8a6
SR
6383 }
6384 mutex_unlock(&trace_types_lock);
499e5470
SR
6385 }
6386
6387 (*ppos)++;
6388
6389 return cnt;
6390}
6391
6392static const struct file_operations rb_simple_fops = {
7b85af63 6393 .open = tracing_open_generic_tr,
499e5470
SR
6394 .read = rb_simple_read,
6395 .write = rb_simple_write,
7b85af63 6396 .release = tracing_release_generic_tr,
499e5470
SR
6397 .llseek = default_llseek,
6398};
6399
277ba044
SR
6400struct dentry *trace_instance_dir;
6401
6402static void
8434dc93 6403init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6404
55034cd6
SRRH
6405static int
6406allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6407{
6408 enum ring_buffer_flags rb_flags;
737223fb
SRRH
6409
6410 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6411
dced341b
SRRH
6412 buf->tr = tr;
6413
55034cd6
SRRH
6414 buf->buffer = ring_buffer_alloc(size, rb_flags);
6415 if (!buf->buffer)
6416 return -ENOMEM;
737223fb 6417
55034cd6
SRRH
6418 buf->data = alloc_percpu(struct trace_array_cpu);
6419 if (!buf->data) {
6420 ring_buffer_free(buf->buffer);
6421 return -ENOMEM;
6422 }
737223fb 6423
737223fb
SRRH
6424 /* Allocate the first page for all buffers */
6425 set_buffer_entries(&tr->trace_buffer,
6426 ring_buffer_size(tr->trace_buffer.buffer, 0));
6427
55034cd6
SRRH
6428 return 0;
6429}
737223fb 6430
55034cd6
SRRH
6431static int allocate_trace_buffers(struct trace_array *tr, int size)
6432{
6433 int ret;
737223fb 6434
55034cd6
SRRH
6435 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6436 if (ret)
6437 return ret;
737223fb 6438
55034cd6
SRRH
6439#ifdef CONFIG_TRACER_MAX_TRACE
6440 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6441 allocate_snapshot ? size : 1);
6442 if (WARN_ON(ret)) {
737223fb 6443 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6444 free_percpu(tr->trace_buffer.data);
6445 return -ENOMEM;
6446 }
6447 tr->allocated_snapshot = allocate_snapshot;
737223fb 6448
55034cd6
SRRH
6449 /*
6450 * Only the top level trace array gets its snapshot allocated
6451 * from the kernel command line.
6452 */
6453 allocate_snapshot = false;
737223fb 6454#endif
55034cd6 6455 return 0;
737223fb
SRRH
6456}
6457
f0b70cc4
SRRH
6458static void free_trace_buffer(struct trace_buffer *buf)
6459{
6460 if (buf->buffer) {
6461 ring_buffer_free(buf->buffer);
6462 buf->buffer = NULL;
6463 free_percpu(buf->data);
6464 buf->data = NULL;
6465 }
6466}
6467
23aaa3c1
SRRH
6468static void free_trace_buffers(struct trace_array *tr)
6469{
6470 if (!tr)
6471 return;
6472
f0b70cc4 6473 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6474
6475#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6476 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6477#endif
6478}
6479
eae47358 6480static int instance_mkdir(const char *name)
737223fb 6481{
277ba044
SR
6482 struct trace_array *tr;
6483 int ret;
277ba044
SR
6484
6485 mutex_lock(&trace_types_lock);
6486
6487 ret = -EEXIST;
6488 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6489 if (tr->name && strcmp(tr->name, name) == 0)
6490 goto out_unlock;
6491 }
6492
6493 ret = -ENOMEM;
6494 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6495 if (!tr)
6496 goto out_unlock;
6497
6498 tr->name = kstrdup(name, GFP_KERNEL);
6499 if (!tr->name)
6500 goto out_free_tr;
6501
ccfe9e42
AL
6502 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6503 goto out_free_tr;
6504
6505 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6506
277ba044
SR
6507 raw_spin_lock_init(&tr->start_lock);
6508
0b9b12c1
SRRH
6509 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6510
277ba044
SR
6511 tr->current_trace = &nop_trace;
6512
6513 INIT_LIST_HEAD(&tr->systems);
6514 INIT_LIST_HEAD(&tr->events);
6515
737223fb 6516 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6517 goto out_free_tr;
6518
8434dc93 6519 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6520 if (!tr->dir)
6521 goto out_free_tr;
6522
6523 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6524 if (ret) {
8434dc93 6525 tracefs_remove_recursive(tr->dir);
277ba044 6526 goto out_free_tr;
609e85a7 6527 }
277ba044 6528
8434dc93 6529 init_tracer_tracefs(tr, tr->dir);
277ba044
SR
6530
6531 list_add(&tr->list, &ftrace_trace_arrays);
6532
6533 mutex_unlock(&trace_types_lock);
6534
6535 return 0;
6536
6537 out_free_tr:
23aaa3c1 6538 free_trace_buffers(tr);
ccfe9e42 6539 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6540 kfree(tr->name);
6541 kfree(tr);
6542
6543 out_unlock:
6544 mutex_unlock(&trace_types_lock);
6545
6546 return ret;
6547
6548}
6549
eae47358 6550static int instance_rmdir(const char *name)
0c8916c3
SR
6551{
6552 struct trace_array *tr;
6553 int found = 0;
6554 int ret;
6555
6556 mutex_lock(&trace_types_lock);
6557
6558 ret = -ENODEV;
6559 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6560 if (tr->name && strcmp(tr->name, name) == 0) {
6561 found = 1;
6562 break;
6563 }
6564 }
6565 if (!found)
6566 goto out_unlock;
6567
a695cb58 6568 ret = -EBUSY;
cf6ab6d9 6569 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6570 goto out_unlock;
6571
0c8916c3
SR
6572 list_del(&tr->list);
6573
6b450d25 6574 tracing_set_nop(tr);
0c8916c3 6575 event_trace_del_tracer(tr);
591dffda 6576 ftrace_destroy_function_files(tr);
0c8916c3 6577 debugfs_remove_recursive(tr->dir);
a9fcaaac 6578 free_trace_buffers(tr);
0c8916c3
SR
6579
6580 kfree(tr->name);
6581 kfree(tr);
6582
6583 ret = 0;
6584
6585 out_unlock:
6586 mutex_unlock(&trace_types_lock);
6587
6588 return ret;
6589}
6590
277ba044
SR
6591static __init void create_trace_instances(struct dentry *d_tracer)
6592{
eae47358
SRRH
6593 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6594 instance_mkdir,
6595 instance_rmdir);
277ba044
SR
6596 if (WARN_ON(!trace_instance_dir))
6597 return;
277ba044
SR
6598}
6599
2b6080f2 6600static void
8434dc93 6601init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6602{
121aaee7 6603 int cpu;
2b6080f2 6604
607e2ea1
SRRH
6605 trace_create_file("available_tracers", 0444, d_tracer,
6606 tr, &show_traces_fops);
6607
6608 trace_create_file("current_tracer", 0644, d_tracer,
6609 tr, &set_tracer_fops);
6610
ccfe9e42
AL
6611 trace_create_file("tracing_cpumask", 0644, d_tracer,
6612 tr, &tracing_cpumask_fops);
6613
2b6080f2
SR
6614 trace_create_file("trace_options", 0644, d_tracer,
6615 tr, &tracing_iter_fops);
6616
6617 trace_create_file("trace", 0644, d_tracer,
6484c71c 6618 tr, &tracing_fops);
2b6080f2
SR
6619
6620 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6621 tr, &tracing_pipe_fops);
2b6080f2
SR
6622
6623 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6624 tr, &tracing_entries_fops);
2b6080f2
SR
6625
6626 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6627 tr, &tracing_total_entries_fops);
6628
238ae93d 6629 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6630 tr, &tracing_free_buffer_fops);
6631
6632 trace_create_file("trace_marker", 0220, d_tracer,
6633 tr, &tracing_mark_fops);
6634
6635 trace_create_file("trace_clock", 0644, d_tracer, tr,
6636 &trace_clock_fops);
6637
6638 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6639 tr, &rb_simple_fops);
ce9bae55 6640
6d9b3fa5
SRRH
6641#ifdef CONFIG_TRACER_MAX_TRACE
6642 trace_create_file("tracing_max_latency", 0644, d_tracer,
6643 &tr->max_latency, &tracing_max_lat_fops);
6644#endif
6645
591dffda
SRRH
6646 if (ftrace_create_function_files(tr, d_tracer))
6647 WARN(1, "Could not allocate function filter files");
6648
ce9bae55
SRRH
6649#ifdef CONFIG_TRACER_SNAPSHOT
6650 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6651 tr, &snapshot_fops);
ce9bae55 6652#endif
121aaee7
SRRH
6653
6654 for_each_tracing_cpu(cpu)
8434dc93 6655 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6656
2b6080f2
SR
6657}
6658
f76180bc
SRRH
6659static struct vfsmount *trace_automount(void *ingore)
6660{
6661 struct vfsmount *mnt;
6662 struct file_system_type *type;
6663
6664 /*
6665 * To maintain backward compatibility for tools that mount
6666 * debugfs to get to the tracing facility, tracefs is automatically
6667 * mounted to the debugfs/tracing directory.
6668 */
6669 type = get_fs_type("tracefs");
6670 if (!type)
6671 return NULL;
6672 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6673 put_filesystem(type);
6674 if (IS_ERR(mnt))
6675 return NULL;
6676 mntget(mnt);
6677
6678 return mnt;
6679}
6680
7eeafbca
SRRH
6681/**
6682 * tracing_init_dentry - initialize top level trace array
6683 *
6684 * This is called when creating files or directories in the tracing
6685 * directory. It is called via fs_initcall() by any of the boot up code
6686 * and expects to return the dentry of the top level tracing directory.
6687 */
6688struct dentry *tracing_init_dentry(void)
6689{
6690 struct trace_array *tr = &global_trace;
6691
f76180bc 6692 /* The top level trace array uses NULL as parent */
7eeafbca 6693 if (tr->dir)
f76180bc 6694 return NULL;
7eeafbca
SRRH
6695
6696 if (WARN_ON(!debugfs_initialized()))
6697 return ERR_PTR(-ENODEV);
6698
f76180bc
SRRH
6699 /*
6700 * As there may still be users that expect the tracing
6701 * files to exist in debugfs/tracing, we must automount
6702 * the tracefs file system there, so older tools still
6703 * work with the newer kerenl.
6704 */
6705 tr->dir = debugfs_create_automount("tracing", NULL,
6706 trace_automount, NULL);
7eeafbca
SRRH
6707 if (!tr->dir) {
6708 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6709 return ERR_PTR(-ENOMEM);
6710 }
6711
8434dc93 6712 return NULL;
7eeafbca
SRRH
6713}
6714
0c564a53
SRRH
6715extern struct trace_enum_map *__start_ftrace_enum_maps[];
6716extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6717
6718static void __init trace_enum_init(void)
6719{
3673b8e4
SRRH
6720 int len;
6721
6722 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6723 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6724}
6725
6726#ifdef CONFIG_MODULES
6727static void trace_module_add_enums(struct module *mod)
6728{
6729 if (!mod->num_trace_enums)
6730 return;
6731
6732 /*
6733 * Modules with bad taint do not have events created, do
6734 * not bother with enums either.
6735 */
6736 if (trace_module_has_bad_taint(mod))
6737 return;
6738
9828413d 6739 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6740}
6741
9828413d
SRRH
6742#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6743static void trace_module_remove_enums(struct module *mod)
6744{
6745 union trace_enum_map_item *map;
6746 union trace_enum_map_item **last = &trace_enum_maps;
6747
6748 if (!mod->num_trace_enums)
6749 return;
6750
6751 mutex_lock(&trace_enum_mutex);
6752
6753 map = trace_enum_maps;
6754
6755 while (map) {
6756 if (map->head.mod == mod)
6757 break;
6758 map = trace_enum_jmp_to_tail(map);
6759 last = &map->tail.next;
6760 map = map->tail.next;
6761 }
6762 if (!map)
6763 goto out;
6764
6765 *last = trace_enum_jmp_to_tail(map)->tail.next;
6766 kfree(map);
6767 out:
6768 mutex_unlock(&trace_enum_mutex);
6769}
6770#else
6771static inline void trace_module_remove_enums(struct module *mod) { }
6772#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6773
3673b8e4
SRRH
6774static int trace_module_notify(struct notifier_block *self,
6775 unsigned long val, void *data)
6776{
6777 struct module *mod = data;
6778
6779 switch (val) {
6780 case MODULE_STATE_COMING:
6781 trace_module_add_enums(mod);
6782 break;
9828413d
SRRH
6783 case MODULE_STATE_GOING:
6784 trace_module_remove_enums(mod);
6785 break;
3673b8e4
SRRH
6786 }
6787
6788 return 0;
0c564a53
SRRH
6789}
6790
3673b8e4
SRRH
6791static struct notifier_block trace_module_nb = {
6792 .notifier_call = trace_module_notify,
6793 .priority = 0,
6794};
9828413d 6795#endif /* CONFIG_MODULES */
3673b8e4 6796
8434dc93 6797static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6798{
6799 struct dentry *d_tracer;
41d9c0be 6800 struct tracer *t;
bc0c38d1 6801
7e53bd42
LJ
6802 trace_access_lock_init();
6803
bc0c38d1 6804 d_tracer = tracing_init_dentry();
14a5ae40 6805 if (IS_ERR(d_tracer))
ed6f1c99 6806 return 0;
bc0c38d1 6807
8434dc93 6808 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6809
5452af66 6810 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6811 &global_trace, &tracing_thresh_fops);
a8259075 6812
339ae5d3 6813 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6814 NULL, &tracing_readme_fops);
6815
69abe6a5
AP
6816 trace_create_file("saved_cmdlines", 0444, d_tracer,
6817 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6818
939c7a4f
YY
6819 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6820 NULL, &tracing_saved_cmdlines_size_fops);
6821
0c564a53
SRRH
6822 trace_enum_init();
6823
9828413d
SRRH
6824 trace_create_enum_file(d_tracer);
6825
3673b8e4
SRRH
6826#ifdef CONFIG_MODULES
6827 register_module_notifier(&trace_module_nb);
6828#endif
6829
bc0c38d1 6830#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6831 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6832 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6833#endif
b04cc6b1 6834
277ba044 6835 create_trace_instances(d_tracer);
5452af66 6836
2b6080f2 6837 create_trace_options_dir(&global_trace);
b04cc6b1 6838
41d9c0be
SRRH
6839 mutex_lock(&trace_types_lock);
6840 for (t = trace_types; t; t = t->next)
6841 add_tracer_options(&global_trace, t);
6842 mutex_unlock(&trace_types_lock);
09d23a1d 6843
b5ad384e 6844 return 0;
bc0c38d1
SR
6845}
6846
3f5a54e3
SR
6847static int trace_panic_handler(struct notifier_block *this,
6848 unsigned long event, void *unused)
6849{
944ac425 6850 if (ftrace_dump_on_oops)
cecbca96 6851 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6852 return NOTIFY_OK;
6853}
6854
6855static struct notifier_block trace_panic_notifier = {
6856 .notifier_call = trace_panic_handler,
6857 .next = NULL,
6858 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6859};
6860
6861static int trace_die_handler(struct notifier_block *self,
6862 unsigned long val,
6863 void *data)
6864{
6865 switch (val) {
6866 case DIE_OOPS:
944ac425 6867 if (ftrace_dump_on_oops)
cecbca96 6868 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6869 break;
6870 default:
6871 break;
6872 }
6873 return NOTIFY_OK;
6874}
6875
6876static struct notifier_block trace_die_notifier = {
6877 .notifier_call = trace_die_handler,
6878 .priority = 200
6879};
6880
6881/*
6882 * printk is set to max of 1024, we really don't need it that big.
6883 * Nothing should be printing 1000 characters anyway.
6884 */
6885#define TRACE_MAX_PRINT 1000
6886
6887/*
6888 * Define here KERN_TRACE so that we have one place to modify
6889 * it if we decide to change what log level the ftrace dump
6890 * should be at.
6891 */
428aee14 6892#define KERN_TRACE KERN_EMERG
3f5a54e3 6893
955b61e5 6894void
3f5a54e3
SR
6895trace_printk_seq(struct trace_seq *s)
6896{
6897 /* Probably should print a warning here. */
3a161d99
SRRH
6898 if (s->seq.len >= TRACE_MAX_PRINT)
6899 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 6900
820b75f6
SRRH
6901 /*
6902 * More paranoid code. Although the buffer size is set to
6903 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6904 * an extra layer of protection.
6905 */
6906 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6907 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
6908
6909 /* should be zero ended, but we are paranoid. */
3a161d99 6910 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
6911
6912 printk(KERN_TRACE "%s", s->buffer);
6913
f9520750 6914 trace_seq_init(s);
3f5a54e3
SR
6915}
6916
955b61e5
JW
6917void trace_init_global_iter(struct trace_iterator *iter)
6918{
6919 iter->tr = &global_trace;
2b6080f2 6920 iter->trace = iter->tr->current_trace;
ae3b5093 6921 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6922 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6923
6924 if (iter->trace && iter->trace->open)
6925 iter->trace->open(iter);
6926
6927 /* Annotate start of buffers if we had overruns */
6928 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6929 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6930
6931 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6932 if (trace_clocks[iter->tr->clock_id].in_ns)
6933 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
6934}
6935
7fe70b57 6936void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6937{
3f5a54e3
SR
6938 /* use static because iter can be a bit big for the stack */
6939 static struct trace_iterator iter;
7fe70b57 6940 static atomic_t dump_running;
cf586b61 6941 unsigned int old_userobj;
d769041f
SR
6942 unsigned long flags;
6943 int cnt = 0, cpu;
3f5a54e3 6944
7fe70b57
SRRH
6945 /* Only allow one dump user at a time. */
6946 if (atomic_inc_return(&dump_running) != 1) {
6947 atomic_dec(&dump_running);
6948 return;
6949 }
3f5a54e3 6950
7fe70b57
SRRH
6951 /*
6952 * Always turn off tracing when we dump.
6953 * We don't need to show trace output of what happens
6954 * between multiple crashes.
6955 *
6956 * If the user does a sysrq-z, then they can re-enable
6957 * tracing with echo 1 > tracing_on.
6958 */
0ee6b6cf 6959 tracing_off();
cf586b61 6960
7fe70b57 6961 local_irq_save(flags);
3f5a54e3 6962
38dbe0b1 6963 /* Simulate the iterator */
955b61e5
JW
6964 trace_init_global_iter(&iter);
6965
d769041f 6966 for_each_tracing_cpu(cpu) {
5e2d5ef8 6967 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
6968 }
6969
cf586b61
FW
6970 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6971
b54d3de9
TE
6972 /* don't look at user memory in panic mode */
6973 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6974
cecbca96
FW
6975 switch (oops_dump_mode) {
6976 case DUMP_ALL:
ae3b5093 6977 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6978 break;
6979 case DUMP_ORIG:
6980 iter.cpu_file = raw_smp_processor_id();
6981 break;
6982 case DUMP_NONE:
6983 goto out_enable;
6984 default:
6985 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 6986 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6987 }
6988
6989 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 6990
7fe70b57
SRRH
6991 /* Did function tracer already get disabled? */
6992 if (ftrace_is_dead()) {
6993 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6994 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6995 }
6996
3f5a54e3
SR
6997 /*
6998 * We need to stop all tracing on all CPUS to read the
6999 * the next buffer. This is a bit expensive, but is
7000 * not done often. We fill all what we can read,
7001 * and then release the locks again.
7002 */
7003
3f5a54e3
SR
7004 while (!trace_empty(&iter)) {
7005
7006 if (!cnt)
7007 printk(KERN_TRACE "---------------------------------\n");
7008
7009 cnt++;
7010
7011 /* reset all but tr, trace, and overruns */
7012 memset(&iter.seq, 0,
7013 sizeof(struct trace_iterator) -
7014 offsetof(struct trace_iterator, seq));
7015 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7016 iter.pos = -1;
7017
955b61e5 7018 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7019 int ret;
7020
7021 ret = print_trace_line(&iter);
7022 if (ret != TRACE_TYPE_NO_CONSUME)
7023 trace_consume(&iter);
3f5a54e3 7024 }
b892e5c8 7025 touch_nmi_watchdog();
3f5a54e3
SR
7026
7027 trace_printk_seq(&iter.seq);
7028 }
7029
7030 if (!cnt)
7031 printk(KERN_TRACE " (ftrace buffer empty)\n");
7032 else
7033 printk(KERN_TRACE "---------------------------------\n");
7034
cecbca96 7035 out_enable:
7fe70b57 7036 trace_flags |= old_userobj;
cf586b61 7037
7fe70b57
SRRH
7038 for_each_tracing_cpu(cpu) {
7039 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7040 }
7fe70b57 7041 atomic_dec(&dump_running);
cd891ae0 7042 local_irq_restore(flags);
3f5a54e3 7043}
a8eecf22 7044EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7045
3928a8a2 7046__init static int tracer_alloc_buffers(void)
bc0c38d1 7047{
73c5162a 7048 int ring_buf_size;
9e01c1b7 7049 int ret = -ENOMEM;
4c11d7ae 7050
b5e87c05
SRRH
7051 /*
7052 * Make sure we don't accidently add more trace options
7053 * than we have bits for.
7054 */
7055 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > 32);
7056
9e01c1b7
RR
7057 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7058 goto out;
7059
ccfe9e42 7060 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7061 goto out_free_buffer_mask;
4c11d7ae 7062
07d777fe
SR
7063 /* Only allocate trace_printk buffers if a trace_printk exists */
7064 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7065 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7066 trace_printk_init_buffers();
7067
73c5162a
SR
7068 /* To save memory, keep the ring buffer size to its minimum */
7069 if (ring_buffer_expanded)
7070 ring_buf_size = trace_buf_size;
7071 else
7072 ring_buf_size = 1;
7073
9e01c1b7 7074 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7075 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7076
2b6080f2
SR
7077 raw_spin_lock_init(&global_trace.start_lock);
7078
2c4a33ab
SRRH
7079 /* Used for event triggers */
7080 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7081 if (!temp_buffer)
7082 goto out_free_cpumask;
7083
939c7a4f
YY
7084 if (trace_create_savedcmd() < 0)
7085 goto out_free_temp_buffer;
7086
9e01c1b7 7087 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7088 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7089 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7090 WARN_ON(1);
939c7a4f 7091 goto out_free_savedcmd;
4c11d7ae 7092 }
a7603ff4 7093
499e5470
SR
7094 if (global_trace.buffer_disabled)
7095 tracing_off();
4c11d7ae 7096
e1e232ca
SR
7097 if (trace_boot_clock) {
7098 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7099 if (ret < 0)
7100 pr_warning("Trace clock %s not defined, going back to default\n",
7101 trace_boot_clock);
7102 }
7103
ca164318
SRRH
7104 /*
7105 * register_tracer() might reference current_trace, so it
7106 * needs to be set before we register anything. This is
7107 * just a bootstrap of current_trace anyway.
7108 */
2b6080f2
SR
7109 global_trace.current_trace = &nop_trace;
7110
0b9b12c1
SRRH
7111 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7112
4104d326
SRRH
7113 ftrace_init_global_array_ops(&global_trace);
7114
ca164318
SRRH
7115 register_tracer(&nop_trace);
7116
60a11774
SR
7117 /* All seems OK, enable tracing */
7118 tracing_disabled = 0;
3928a8a2 7119
3f5a54e3
SR
7120 atomic_notifier_chain_register(&panic_notifier_list,
7121 &trace_panic_notifier);
7122
7123 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7124
ae63b31e
SR
7125 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7126
7127 INIT_LIST_HEAD(&global_trace.systems);
7128 INIT_LIST_HEAD(&global_trace.events);
7129 list_add(&global_trace.list, &ftrace_trace_arrays);
7130
7bcfaf54
SR
7131 while (trace_boot_options) {
7132 char *option;
7133
7134 option = strsep(&trace_boot_options, ",");
2b6080f2 7135 trace_set_options(&global_trace, option);
7bcfaf54
SR
7136 }
7137
77fd5c15
SRRH
7138 register_snapshot_cmd();
7139
2fc1dfbe 7140 return 0;
3f5a54e3 7141
939c7a4f
YY
7142out_free_savedcmd:
7143 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7144out_free_temp_buffer:
7145 ring_buffer_free(temp_buffer);
9e01c1b7 7146out_free_cpumask:
ccfe9e42 7147 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7148out_free_buffer_mask:
7149 free_cpumask_var(tracing_buffer_mask);
7150out:
7151 return ret;
bc0c38d1 7152}
b2821ae6 7153
5f893b26
SRRH
7154void __init trace_init(void)
7155{
0daa2302
SRRH
7156 if (tracepoint_printk) {
7157 tracepoint_print_iter =
7158 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7159 if (WARN_ON(!tracepoint_print_iter))
7160 tracepoint_printk = 0;
7161 }
5f893b26 7162 tracer_alloc_buffers();
0c564a53 7163 trace_event_init();
5f893b26
SRRH
7164}
7165
b2821ae6
SR
7166__init static int clear_boot_tracer(void)
7167{
7168 /*
7169 * The default tracer at boot buffer is an init section.
7170 * This function is called in lateinit. If we did not
7171 * find the boot tracer, then clear it out, to prevent
7172 * later registration from accessing the buffer that is
7173 * about to be freed.
7174 */
7175 if (!default_bootup_tracer)
7176 return 0;
7177
7178 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7179 default_bootup_tracer);
7180 default_bootup_tracer = NULL;
7181
7182 return 0;
7183}
7184
8434dc93 7185fs_initcall(tracer_init_tracefs);
b2821ae6 7186late_initcall(clear_boot_tracer);